From 9aab3f2be041b09f42375d3bf173d1a8795a1ee9 Mon Sep 17 00:00:00 2001 From: Yinan Zhang Date: Fri, 15 Mar 2019 11:01:45 -0700 Subject: Add memory utilization analytics to mallctl The analytics tool is put under experimental.utilization namespace in mallctl. Input is one pointer or an array of pointers and the output is a list of memory utilization statistics. --- include/jemalloc/internal/extent_externs.h | 6 + include/jemalloc/internal/extent_structs.h | 21 +++ include/jemalloc/internal/extent_types.h | 3 + src/ctl.c | 236 ++++++++++++++++++++++++++++- src/extent.c | 69 +++++++++ test/unit/mallctl.c | 196 +++++++++++++++++++++++- 6 files changed, 526 insertions(+), 5 deletions(-) diff --git a/include/jemalloc/internal/extent_externs.h b/include/jemalloc/internal/extent_externs.h index 8680251..5d53aad 100644 --- a/include/jemalloc/internal/extent_externs.h +++ b/include/jemalloc/internal/extent_externs.h @@ -74,4 +74,10 @@ bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, bool extent_boot(void); +void extent_util_stats_get(tsdn_t *tsdn, const void *ptr, + size_t *nfree, size_t *nregs, size_t *size); +void extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr, + size_t *nfree, size_t *nregs, size_t *size, + size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr); + #endif /* JEMALLOC_INTERNAL_EXTENT_EXTERNS_H */ diff --git a/include/jemalloc/internal/extent_structs.h b/include/jemalloc/internal/extent_structs.h index ceb1897..ad6710e 100644 --- a/include/jemalloc/internal/extent_structs.h +++ b/include/jemalloc/internal/extent_structs.h @@ -228,4 +228,25 @@ struct extents_s { bool delay_coalesce; }; +/* + * The following two structs are for experimental purposes. See + * experimental_utilization_query_ctl and + * experimental_utilization_batch_query_ctl in src/ctl.c. + */ + +struct extent_util_stats_s { + size_t nfree; + size_t nregs; + size_t size; +}; + +struct extent_util_stats_verbose_s { + void *slabcur_addr; + size_t nfree; + size_t nregs; + size_t size; + size_t bin_nfree; + size_t bin_nregs; +}; + #endif /* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */ diff --git a/include/jemalloc/internal/extent_types.h b/include/jemalloc/internal/extent_types.h index acbcf27..865f8a1 100644 --- a/include/jemalloc/internal/extent_types.h +++ b/include/jemalloc/internal/extent_types.h @@ -4,6 +4,9 @@ typedef struct extent_s extent_t; typedef struct extents_s extents_t; +typedef struct extent_util_stats_s extent_util_stats_t; +typedef struct extent_util_stats_verbose_s extent_util_stats_verbose_t; + #define EXTENT_HOOKS_INITIALIZER NULL /* diff --git a/src/ctl.c b/src/ctl.c index 09310a9..dd7e467 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -216,6 +216,8 @@ CTL_PROTO(stats_mapped) CTL_PROTO(stats_retained) CTL_PROTO(experimental_hooks_install) CTL_PROTO(experimental_hooks_remove) +CTL_PROTO(experimental_utilization_query) +CTL_PROTO(experimental_utilization_batch_query) #define MUTEX_STATS_CTL_PROTO_GEN(n) \ CTL_PROTO(stats_##n##_num_ops) \ @@ -574,11 +576,17 @@ static const ctl_named_node_t stats_node[] = { static const ctl_named_node_t hooks_node[] = { {NAME("install"), CTL(experimental_hooks_install)}, - {NAME("remove"), CTL(experimental_hooks_remove)}, + {NAME("remove"), CTL(experimental_hooks_remove)} +}; + +static const ctl_named_node_t utilization_node[] = { + {NAME("query"), CTL(experimental_utilization_query)}, + {NAME("batch_query"), CTL(experimental_utilization_batch_query)} }; static const ctl_named_node_t experimental_node[] = { - {NAME("hooks"), CHILD(named, hooks)} + {NAME("hooks"), CHILD(named, hooks)}, + {NAME("utilization"), CHILD(named, utilization)} }; static const ctl_named_node_t root_node[] = { @@ -2714,7 +2722,7 @@ static int prof_log_start_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; - + const char *filename = NULL; if (!config_prof) { @@ -2726,7 +2734,7 @@ prof_log_start_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, if (prof_log_start(tsd_tsdn(tsd), filename)) { ret = EFAULT; - goto label_return; + goto label_return; } ret = 0; @@ -3083,3 +3091,223 @@ experimental_hooks_remove_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, label_return: return ret; } + +/* + * Output six memory utilization entries for an input pointer, the first one of + * type (void *) and the remaining five of type size_t, describing the following + * (in the same order): + * + * (a) memory address of the extent a potential reallocation would go into, + * == the five fields below describe about the extent the pointer resides in == + * (b) number of free regions in the extent, + * (c) number of regions in the extent, + * (d) size of the extent in terms of bytes, + * (e) total number of free regions in the bin the extent belongs to, and + * (f) total number of regions in the bin the extent belongs to. + * + * Note that "(e)" and "(f)" are only available when stats are enabled; + * otherwise both are set zero. + * + * This API is mainly intended for small class allocations, where extents are + * used as slab. + * + * In case of large class allocations, "(a)" will be NULL, and "(e)" and "(f)" + * will be zero. The other three fields will be properly set though the values + * are trivial: "(b)" will be 0, "(c)" will be 1, and "(d)" will be the usable + * size. + * + * The input pointer and size are respectively passed in by newp and newlen, + * and the output fields and size are respectively oldp and *oldlenp. + * + * It can be beneficial to define the following macros to make it easier to + * access the output: + * + * #define SLABCUR_READ(out) (*(void **)out) + * #define COUNTS(out) ((size_t *)((void **)out + 1)) + * #define NFREE_READ(out) COUNTS(out)[0] + * #define NREGS_READ(out) COUNTS(out)[1] + * #define SIZE_READ(out) COUNTS(out)[2] + * #define BIN_NFREE_READ(out) COUNTS(out)[3] + * #define BIN_NREGS_READ(out) COUNTS(out)[4] + * + * and then write e.g. NFREE_READ(oldp) to fetch the output. See the unit test + * test_utilization_query in test/unit/mallctl.c for an example. + * + * For a typical defragmentation workflow making use of this API for + * understanding the fragmentation level, please refer to the comment for + * experimental_utilization_batch_query_ctl. + * + * It's up to the application how to determine the significance of + * fragmentation relying on the outputs returned. Possible choices are: + * + * (a) if extent utilization ratio is below certain threshold, + * (b) if extent memory consumption is above certain threshold, + * (c) if extent utilization ratio is significantly below bin utilization ratio, + * (d) if input pointer deviates a lot from potential reallocation address, or + * (e) some selection/combination of the above. + * + * The caller needs to make sure that the input/output arguments are valid, + * in particular, that the size of the output is correct, i.e.: + * + * *oldlenp = sizeof(void *) + sizeof(size_t) * 5 + * + * Otherwise, the function immediately returns EINVAL without touching anything. + * + * In the rare case where there's no associated extent found for the input + * pointer, the function zeros out all output fields and return. Please refer + * to the comment for experimental_utilization_batch_query_ctl to understand the + * motivation from C++. + */ +static int +experimental_utilization_query_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + + assert(sizeof(extent_util_stats_verbose_t) + == sizeof(void *) + sizeof(size_t) * 5); + + if (oldp == NULL || oldlenp == NULL + || *oldlenp != sizeof(extent_util_stats_verbose_t) + || newp == NULL) { + ret = EINVAL; + goto label_return; + } + + void *ptr = NULL; + WRITE(ptr, void *); + extent_util_stats_verbose_t *util_stats + = (extent_util_stats_verbose_t *)oldp; + extent_util_stats_verbose_get(tsd_tsdn(tsd), ptr, + &util_stats->nfree, &util_stats->nregs, &util_stats->size, + &util_stats->bin_nfree, &util_stats->bin_nregs, + &util_stats->slabcur_addr); + ret = 0; + +label_return: + return ret; +} + +/* + * Given an input array of pointers, output three memory utilization entries of + * type size_t for each input pointer about the extent it resides in: + * + * (a) number of free regions in the extent, + * (b) number of regions in the extent, and + * (c) size of the extent in terms of bytes. + * + * This API is mainly intended for small class allocations, where extents are + * used as slab. In case of large class allocations, the outputs are trivial: + * "(a)" will be 0, "(b)" will be 1, and "(c)" will be the usable size. + * + * Note that multiple input pointers may reside on a same extent so the output + * fields may contain duplicates. + * + * The format of the input/output looks like: + * + * input[0]: 1st_pointer_to_query | output[0]: 1st_extent_n_free_regions + * | output[1]: 1st_extent_n_regions + * | output[2]: 1st_extent_size + * input[1]: 2nd_pointer_to_query | output[3]: 2nd_extent_n_free_regions + * | output[4]: 2nd_extent_n_regions + * | output[5]: 2nd_extent_size + * ... | ... + * + * The input array and size are respectively passed in by newp and newlen, and + * the output array and size are respectively oldp and *oldlenp. + * + * It can be beneficial to define the following macros to make it easier to + * access the output: + * + * #define NFREE_READ(out, i) out[(i) * 3] + * #define NREGS_READ(out, i) out[(i) * 3 + 1] + * #define SIZE_READ(out, i) out[(i) * 3 + 2] + * + * and then write e.g. NFREE_READ(oldp, i) to fetch the output. See the unit + * test test_utilization_batch in test/unit/mallctl.c for a concrete example. + * + * A typical workflow would be composed of the following steps: + * + * (1) flush tcache: mallctl("thread.tcache.flush", ...) + * (2) initialize input array of pointers to query fragmentation + * (3) allocate output array to hold utilization statistics + * (4) query utilization: mallctl("experimental.utilization.batch_query", ...) + * (5) (optional) decide if it's worthwhile to defragment; otherwise stop here + * (6) disable tcache: mallctl("thread.tcache.enabled", ...) + * (7) defragment allocations with significant fragmentation, e.g.: + * for each allocation { + * if it's fragmented { + * malloc(...); + * memcpy(...); + * free(...); + * } + * } + * (8) enable tcache: mallctl("thread.tcache.enabled", ...) + * + * The application can determine the significance of fragmentation themselves + * relying on the statistics returned, both at the overall level i.e. step "(5)" + * and at individual allocation level i.e. within step "(7)". Possible choices + * are: + * + * (a) whether memory utilization ratio is below certain threshold, + * (b) whether memory consumption is above certain threshold, or + * (c) some combination of the two. + * + * The caller needs to make sure that the input/output arrays are valid and + * their sizes are proper as well as matched, meaning: + * + * (a) newlen = n_pointers * sizeof(const void *) + * (b) *oldlenp = n_pointers * sizeof(size_t) * 3 + * (c) n_pointers > 0 + * + * Otherwise, the function immediately returns EINVAL without touching anything. + * + * In the rare case where there's no associated extent found for some pointers, + * rather than immediately terminating the computation and raising an error, + * the function simply zeros out the corresponding output fields and continues + * the computation until all input pointers are handled. The motivations of + * such a design are as follows: + * + * (a) The function always either processes nothing or processes everything, and + * never leaves the output half touched and half untouched. + * + * (b) It facilitates usage needs especially common in C++. A vast variety of + * C++ objects are instantiated with multiple dynamic memory allocations. For + * example, std::string and std::vector typically use at least two allocations, + * one for the metadata and one for the actual content. Other types may use + * even more allocations. When inquiring about utilization statistics, the + * caller often wants to examine into all such allocations, especially internal + * one(s), rather than just the topmost one. The issue comes when some + * implementations do certain optimizations to reduce/aggregate some internal + * allocations, e.g. putting short strings directly into the metadata, and such + * decisions are not known to the caller. Therefore, we permit pointers to + * memory usages that may not be returned by previous malloc calls, and we + * provide the caller a convenient way to identify such cases. + */ +static int +experimental_utilization_batch_query_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + + assert(sizeof(extent_util_stats_t) == sizeof(size_t) * 3); + + const size_t len = newlen / sizeof(const void *); + if (oldp == NULL || oldlenp == NULL || newp == NULL || newlen == 0 + || newlen != len * sizeof(const void *) + || *oldlenp != len * sizeof(extent_util_stats_t)) { + ret = EINVAL; + goto label_return; + } + + void **ptrs = (void **)newp; + extent_util_stats_t *util_stats = (extent_util_stats_t *)oldp; + size_t i; + for (i = 0; i < len; ++i) { + extent_util_stats_get(tsd_tsdn(tsd), ptrs[i], + &util_stats[i].nfree, &util_stats[i].nregs, + &util_stats[i].size); + } + ret = 0; + +label_return: + return ret; +} diff --git a/src/extent.c b/src/extent.c index 62086c7..814f0a3 100644 --- a/src/extent.c +++ b/src/extent.c @@ -2280,3 +2280,72 @@ extent_boot(void) { return false; } + +void +extent_util_stats_get(tsdn_t *tsdn, const void *ptr, + size_t *nfree, size_t *nregs, size_t *size) { + assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL); + + const extent_t *extent = iealloc(tsdn, ptr); + if (unlikely(extent == NULL)) { + *nfree = *nregs = *size = 0; + return; + } + + *size = extent_size_get(extent); + if (!extent_slab_get(extent)) { + *nfree = 0; + *nregs = 1; + } else { + *nfree = extent_nfree_get(extent); + *nregs = bin_infos[extent_szind_get(extent)].nregs; + assert(*nfree <= *nregs); + assert(*nfree * extent_usize_get(extent) <= *size); + } +} + +void +extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr, + size_t *nfree, size_t *nregs, size_t *size, + size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr) { + assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL + && bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL); + + const extent_t *extent = iealloc(tsdn, ptr); + if (unlikely(extent == NULL)) { + *nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0; + *slabcur_addr = NULL; + return; + } + + *size = extent_size_get(extent); + if (!extent_slab_get(extent)) { + *nfree = *bin_nfree = *bin_nregs = 0; + *nregs = 1; + *slabcur_addr = NULL; + return; + } + + *nfree = extent_nfree_get(extent); + const szind_t szind = extent_szind_get(extent); + *nregs = bin_infos[szind].nregs; + assert(*nfree <= *nregs); + assert(*nfree * extent_usize_get(extent) <= *size); + + const arena_t *arena = extent_arena_get(extent); + assert(arena != NULL); + const unsigned binshard = extent_binshard_get(extent); + bin_t *bin = &arena->bins[szind].bin_shards[binshard]; + + malloc_mutex_lock(tsdn, &bin->lock); + if (config_stats) { + *bin_nregs = *nregs * bin->stats.curslabs; + assert(*bin_nregs >= bin->stats.curregs); + *bin_nfree = *bin_nregs - bin->stats.curregs; + } else { + *bin_nfree = *bin_nregs = 0; + } + *slabcur_addr = extent_addr_get(bin->slabcur); + assert(*slabcur_addr != NULL); + malloc_mutex_unlock(tsdn, &bin->lock); +} diff --git a/test/unit/mallctl.c b/test/unit/mallctl.c index 498f9e0..ef00a3d 100644 --- a/test/unit/mallctl.c +++ b/test/unit/mallctl.c @@ -853,6 +853,198 @@ TEST_BEGIN(test_hooks_exhaustion) { } TEST_END +#define TEST_UTIL_EINVAL(node, a, b, c, d, why_inval) do { \ + assert_d_eq(mallctl("experimental.utilization." node, \ + a, b, c, d), EINVAL, "Should fail when " why_inval); \ + assert_zu_eq(out_sz, out_sz_ref, \ + "Output size touched when given invalid arguments"); \ + assert_d_eq(memcmp(out, out_ref, out_sz_ref), 0, \ + "Output content touched when given invalid arguments"); \ +} while (0) + +#define TEST_UTIL_VALID(node) do { \ + assert_d_eq(mallctl("experimental.utilization." node, \ + out, &out_sz, in, in_sz), 0, \ + "Should return 0 on correct arguments"); \ + assert_zu_eq(out_sz, out_sz_ref, "incorrect output size"); \ + assert_d_ne(memcmp(out, out_ref, out_sz_ref), 0, \ + "Output content should be changed"); \ +} while (0) + +TEST_BEGIN(test_utilization_query) { + void *p = mallocx(1, 0); + void **in = &p; + size_t in_sz = sizeof(const void *); + size_t out_sz = sizeof(void *) + sizeof(size_t) * 5; + void *out = mallocx(out_sz, 0); + void *out_ref = mallocx(out_sz, 0); + size_t out_sz_ref = out_sz; + + assert_ptr_not_null(p, "test pointer allocation failed"); + assert_ptr_not_null(out, "test output allocation failed"); + assert_ptr_not_null(out_ref, "test reference output allocation failed"); + +#define SLABCUR_READ(out) (*(void **)out) +#define COUNTS(out) ((size_t *)((void **)out + 1)) +#define NFREE_READ(out) COUNTS(out)[0] +#define NREGS_READ(out) COUNTS(out)[1] +#define SIZE_READ(out) COUNTS(out)[2] +#define BIN_NFREE_READ(out) COUNTS(out)[3] +#define BIN_NREGS_READ(out) COUNTS(out)[4] + + SLABCUR_READ(out) = NULL; + NFREE_READ(out) = NREGS_READ(out) = SIZE_READ(out) = -1; + BIN_NFREE_READ(out) = BIN_NREGS_READ(out) = -1; + memcpy(out_ref, out, out_sz); + + /* Test invalid argument(s) errors */ +#define TEST_UTIL_QUERY_EINVAL(a, b, c, d, why_inval) \ + TEST_UTIL_EINVAL("query", a, b, c, d, why_inval) + + TEST_UTIL_QUERY_EINVAL(NULL, &out_sz, in, in_sz, "old is NULL"); + TEST_UTIL_QUERY_EINVAL(out, NULL, in, in_sz, "oldlenp is NULL"); + TEST_UTIL_QUERY_EINVAL(out, &out_sz, NULL, in_sz, "newp is NULL"); + TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, 0, "newlen is zero"); + in_sz -= 1; + TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, in_sz, "invalid newlen"); + in_sz += 1; + out_sz_ref = out_sz -= 2 * sizeof(size_t); + TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, in_sz, "invalid *oldlenp"); + out_sz_ref = out_sz += 2 * sizeof(size_t); + +#undef TEST_UTIL_QUERY_EINVAL + + /* Examine output for valid call */ + TEST_UTIL_VALID("query"); + assert_zu_le(NFREE_READ(out), NREGS_READ(out), + "Extent free count exceeded region count"); + assert_zu_le(NREGS_READ(out), SIZE_READ(out), + "Extent region count exceeded size"); + assert_zu_ne(NREGS_READ(out), 0, + "Extent region count must be positive"); + assert_zu_ne(SIZE_READ(out), 0, "Extent size must be positive"); + if (config_stats) { + assert_zu_le(BIN_NFREE_READ(out), BIN_NREGS_READ(out), + "Bin free count exceeded region count"); + assert_zu_ne(BIN_NREGS_READ(out), 0, + "Bin region count must be positive"); + assert_zu_le(NFREE_READ(out), BIN_NFREE_READ(out), + "Extent free count exceeded bin free count"); + assert_zu_le(NREGS_READ(out), BIN_NREGS_READ(out), + "Extent region count exceeded bin region count"); + assert_zu_eq(BIN_NREGS_READ(out) % NREGS_READ(out), 0, + "Bin region count isn't a multiple of extent region count"); + assert_zu_le(NREGS_READ(out) - NFREE_READ(out), + BIN_NREGS_READ(out) - BIN_NFREE_READ(out), + "Extent utilized count exceeded bin utilized count"); + } else { + assert_zu_eq(BIN_NFREE_READ(out), 0, + "Bin free count should be zero when stats are disabled"); + assert_zu_eq(BIN_NREGS_READ(out), 0, + "Bin region count should be zero when stats are disabled"); + } + assert_ptr_not_null(SLABCUR_READ(out), "Current slab is null"); + assert_true(NFREE_READ(out) == 0 || SLABCUR_READ(out) <= p, + "Allocation should follow first fit principle"); + +#undef BIN_NREGS_READ +#undef BIN_NFREE_READ +#undef SIZE_READ +#undef NREGS_READ +#undef NFREE_READ +#undef COUNTS +#undef SLABCUR_READ + + free(out_ref); + free(out); + free(p); +} +TEST_END + +TEST_BEGIN(test_utilization_batch_query) { + void *p = mallocx(1, 0); + void *q = mallocx(1, 0); + void *in[] = {p, q}; + size_t in_sz = sizeof(const void *) * 2; + size_t out[] = {-1, -1, -1, -1, -1, -1}; + size_t out_sz = sizeof(size_t) * 6; + size_t out_ref[] = {-1, -1, -1, -1, -1, -1}; + size_t out_sz_ref = out_sz; + + assert_ptr_not_null(p, "test pointer allocation failed"); + assert_ptr_not_null(q, "test pointer allocation failed"); + + /* Test invalid argument(s) errors */ +#define TEST_UTIL_BATCH_EINVAL(a, b, c, d, why_inval) \ + TEST_UTIL_EINVAL("batch_query", a, b, c, d, why_inval) + + TEST_UTIL_BATCH_EINVAL(NULL, &out_sz, in, in_sz, "old is NULL"); + TEST_UTIL_BATCH_EINVAL(out, NULL, in, in_sz, "oldlenp is NULL"); + TEST_UTIL_BATCH_EINVAL(out, &out_sz, NULL, in_sz, "newp is NULL"); + TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, 0, "newlen is zero"); + in_sz -= 1; + TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz, + "newlen is not an exact multiple"); + in_sz += 1; + out_sz_ref = out_sz -= 2 * sizeof(size_t); + TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz, + "*oldlenp is not an exact multiple"); + out_sz_ref = out_sz += 2 * sizeof(size_t); + in_sz -= sizeof(const void *); + TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz, + "*oldlenp and newlen do not match"); + in_sz += sizeof(const void *); + +#undef TEST_UTIL_BATCH_EINVAL + + /* Examine output for valid calls */ +#define TEST_UTIL_BATCH_VALID TEST_UTIL_VALID("batch_query") +#define TEST_EQUAL_REF(i, message) \ + assert_d_eq(memcmp(out + (i) * 3, out_ref + (i) * 3, 3), 0, message) + +#define NFREE_READ(out, i) out[(i) * 3] +#define NREGS_READ(out, i) out[(i) * 3 + 1] +#define SIZE_READ(out, i) out[(i) * 3 + 2] + + out_sz_ref = out_sz /= 2; + in_sz /= 2; + TEST_UTIL_BATCH_VALID; + assert_zu_le(NFREE_READ(out, 0), NREGS_READ(out, 0), + "Extent free count exceeded region count"); + assert_zu_le(NREGS_READ(out, 0), SIZE_READ(out, 0), + "Extent region count exceeded size"); + assert_zu_ne(NREGS_READ(out, 0), 0, + "Extent region count must be positive"); + assert_zu_ne(SIZE_READ(out, 0), 0, "Extent size must be positive"); + TEST_EQUAL_REF(1, "Should not overwrite content beyond what's needed"); + in_sz *= 2; + out_sz_ref = out_sz *= 2; + + memcpy(out_ref, out, 3 * sizeof(size_t)); + TEST_UTIL_BATCH_VALID; + TEST_EQUAL_REF(0, "Statistics should be stable across calls"); + assert_zu_le(NFREE_READ(out, 1), NREGS_READ(out, 1), + "Extent free count exceeded region count"); + assert_zu_eq(NREGS_READ(out, 0), NREGS_READ(out, 1), + "Extent region count should be same for same region size"); + assert_zu_eq(SIZE_READ(out, 0), SIZE_READ(out, 1), + "Extent size should be same for same region size"); + +#undef SIZE_READ +#undef NREGS_READ +#undef NFREE_READ + +#undef TEST_EQUAL_REF +#undef TEST_UTIL_BATCH_VALID + + free(q); + free(p); +} +TEST_END + +#undef TEST_UTIL_VALID +#undef TEST_UTIL_EINVAL + int main(void) { return test( @@ -883,5 +1075,7 @@ main(void) { test_arenas_lookup, test_stats_arenas, test_hooks, - test_hooks_exhaustion); + test_hooks_exhaustion, + test_utilization_query, + test_utilization_batch_query); } -- cgit v0.12 From 93084cdc8960935d0acc93424dddd3a79a86e2da Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Thu, 28 Mar 2019 20:42:40 -0700 Subject: Ensure page alignment on extent_alloc. This is discovered and suggested by @jasone in #1468. When custom extent hooks are in use, we should ensure page alignment on the extent alloc path, instead of relying on the user hooks to do so. --- src/extent.c | 7 ++++--- src/extent_dss.c | 2 +- src/extent_mmap.c | 4 ++-- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/extent.c b/src/extent.c index 814f0a3..66cbf05 100644 --- a/src/extent.c +++ b/src/extent.c @@ -1256,7 +1256,7 @@ extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size, assert(arena != NULL); return extent_alloc_default_impl(tsdn, arena, new_addr, size, - alignment, zero, commit); + ALIGNMENT_CEILING(alignment, PAGE), zero, commit); } static void @@ -1493,14 +1493,15 @@ extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, return NULL; } void *addr; + size_t palignment = ALIGNMENT_CEILING(alignment, PAGE); if (*r_extent_hooks == &extent_hooks_default) { /* Call directly to propagate tsdn. */ addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize, - alignment, zero, commit); + palignment, zero, commit); } else { extent_hook_pre_reentrancy(tsdn, arena); addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr, - esize, alignment, zero, commit, arena_ind_get(arena)); + esize, palignment, zero, commit, arena_ind_get(arena)); extent_hook_post_reentrancy(tsdn); } if (addr == NULL) { diff --git a/src/extent_dss.c b/src/extent_dss.c index 6c56cf6..69a7bee 100644 --- a/src/extent_dss.c +++ b/src/extent_dss.c @@ -113,7 +113,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, cassert(have_dss); assert(size > 0); - assert(alignment > 0); + assert(alignment == ALIGNMENT_CEILING(alignment, PAGE)); /* * sbrk() uses a signed increment argument, so take care not to diff --git a/src/extent_mmap.c b/src/extent_mmap.c index 8d607dc..17fd1c8 100644 --- a/src/extent_mmap.c +++ b/src/extent_mmap.c @@ -21,8 +21,8 @@ bool opt_retain = void * extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit) { - void *ret = pages_map(new_addr, size, ALIGNMENT_CEILING(alignment, - PAGE), commit); + assert(alignment == ALIGNMENT_CEILING(alignment, PAGE)); + void *ret = pages_map(new_addr, size, alignment, commit); if (ret == NULL) { return NULL; } -- cgit v0.12 From c2a3a7cd3f3cbc177d677101be85a31a39c26bd0 Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Wed, 3 Apr 2019 16:19:00 -0700 Subject: Fix test/unit/prof_log Compiler optimizations may produce traces more than expected. Instead verify the lower bound only. --- test/unit/prof_log.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/test/unit/prof_log.c b/test/unit/prof_log.c index 6a3464b..92fbd7c 100644 --- a/test/unit/prof_log.c +++ b/test/unit/prof_log.c @@ -125,12 +125,14 @@ TEST_BEGIN(test_prof_log_many_traces) { assert_rep(); } /* - * There should be 8 total backtraces: two for malloc/free in f1(), - * two for malloc/free in f2(), two for malloc/free in f3(), and then - * two for malloc/free in f1()'s call to f3(). + * There should be 8 total backtraces: two for malloc/free in f1(), two + * for malloc/free in f2(), two for malloc/free in f3(), and then two + * for malloc/free in f1()'s call to f3(). However compiler + * optimizations such as loop unrolling might generate more call sites. + * So >= 8 traces are expected. */ - assert_zu_eq(prof_log_bt_count(), 8, - "Wrong number of backtraces given sample workload"); + assert_zu_ge(prof_log_bt_count(), 8, + "Expect at least 8 backtraces given sample workload"); assert_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0, "Unexpected mallctl failure when stopping logging"); } -- cgit v0.12 From d3d7a8ef09b6fa79109e8930aaba7a677f8b24ac Mon Sep 17 00:00:00 2001 From: mgrice Date: Fri, 8 Mar 2019 11:50:30 -0800 Subject: remove compare and branch in fast path for c++ operator delete[] Summary: sdallocx is checking a flag that will never be set (at least in the provided C++ destructor implementation). This branch will probably only rarely be mispredicted however it removes two instructions in sdallocx and one at the callsite (to zero out flags). --- bin/jeprof.in | 1 + include/jemalloc/internal/jemalloc_internal_externs.h | 1 + src/jemalloc.c | 14 +++++++++++++- src/jemalloc_cpp.cpp | 4 ++-- 4 files changed, 17 insertions(+), 3 deletions(-) diff --git a/bin/jeprof.in b/bin/jeprof.in index 588c6b4..16a76c6 100644 --- a/bin/jeprof.in +++ b/bin/jeprof.in @@ -2909,6 +2909,7 @@ sub RemoveUninterestingFrames { '@JEMALLOC_PREFIX@xallocx', '@JEMALLOC_PREFIX@dallocx', '@JEMALLOC_PREFIX@sdallocx', + '@JEMALLOC_PREFIX@sdallocx_noflags', 'tc_calloc', 'tc_cfree', 'tc_malloc', diff --git a/include/jemalloc/internal/jemalloc_internal_externs.h b/include/jemalloc/internal/jemalloc_internal_externs.h index b784362..cdbc33a 100644 --- a/include/jemalloc/internal/jemalloc_internal_externs.h +++ b/include/jemalloc/internal/jemalloc_internal_externs.h @@ -51,5 +51,6 @@ void jemalloc_prefork(void); void jemalloc_postfork_parent(void); void jemalloc_postfork_child(void); bool malloc_initialized(void); +void je_sdallocx_noflags(void *ptr, size_t size); #endif /* JEMALLOC_INTERNAL_EXTERNS_H */ diff --git a/src/jemalloc.c b/src/jemalloc.c index c8afa9c..7bc7b95 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -2732,7 +2732,7 @@ bool free_fastpath(void *ptr, size_t size, bool size_hint) { tcache_t *tcache = tsd_tcachep_get(tsd); alloc_ctx_t alloc_ctx; - /* + /* * If !config_cache_oblivious, we can check PAGE alignment to * detect sampled objects. Otherwise addresses are * randomized, and we have to look it up in the rtree anyway. @@ -3522,6 +3522,18 @@ je_sdallocx(void *ptr, size_t size, int flags) { LOG("core.sdallocx.exit", ""); } +void JEMALLOC_NOTHROW +je_sdallocx_noflags(void *ptr, size_t size) { + LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: 0", ptr, + size); + + if (!free_fastpath(ptr, size, true)) { + sdallocx_default(ptr, size, 0); + } + + LOG("core.sdallocx.exit", ""); +} + JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW JEMALLOC_ATTR(pure) je_nallocx(size_t size, int flags) { diff --git a/src/jemalloc_cpp.cpp b/src/jemalloc_cpp.cpp index f0cedda..da0441a 100644 --- a/src/jemalloc_cpp.cpp +++ b/src/jemalloc_cpp.cpp @@ -128,14 +128,14 @@ operator delete(void *ptr, std::size_t size) noexcept { if (unlikely(ptr == nullptr)) { return; } - je_sdallocx(ptr, size, /*flags=*/0); + je_sdallocx_noflags(ptr, size); } void operator delete[](void *ptr, std::size_t size) noexcept { if (unlikely(ptr == nullptr)) { return; } - je_sdallocx(ptr, size, /*flags=*/0); + je_sdallocx_noflags(ptr, size); } #endif // __cpp_sized_deallocation -- cgit v0.12 From 7ee3897740aabdccb2381b7b6ab68fff0aac3ec4 Mon Sep 17 00:00:00 2001 From: Yinan Zhang Date: Tue, 9 Apr 2019 11:01:26 -0700 Subject: Separate tests for extent utilization API As title. --- Makefile.in | 1 + src/ctl.c | 4 +- test/unit/extent_util.c | 190 ++++++++++++++++++++++++++++++++++++++++++++++ test/unit/mallctl.c | 196 +----------------------------------------------- 4 files changed, 194 insertions(+), 197 deletions(-) create mode 100644 test/unit/extent_util.c diff --git a/Makefile.in b/Makefile.in index 0777f6a..3a09442 100644 --- a/Makefile.in +++ b/Makefile.in @@ -178,6 +178,7 @@ TESTS_UNIT := \ $(srcroot)test/unit/div.c \ $(srcroot)test/unit/emitter.c \ $(srcroot)test/unit/extent_quantize.c \ + $(srcroot)test/unit/extent_util.c \ $(srcroot)test/unit/fork.c \ $(srcroot)test/unit/hash.c \ $(srcroot)test/unit/hook.c \ diff --git a/src/ctl.c b/src/ctl.c index dd7e467..193d2b0 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -3131,7 +3131,7 @@ label_return: * #define BIN_NREGS_READ(out) COUNTS(out)[4] * * and then write e.g. NFREE_READ(oldp) to fetch the output. See the unit test - * test_utilization_query in test/unit/mallctl.c for an example. + * test_query in test/unit/extent_util.c for an example. * * For a typical defragmentation workflow making use of this API for * understanding the fragmentation level, please refer to the comment for @@ -3223,7 +3223,7 @@ label_return: * #define SIZE_READ(out, i) out[(i) * 3 + 2] * * and then write e.g. NFREE_READ(oldp, i) to fetch the output. See the unit - * test test_utilization_batch in test/unit/mallctl.c for a concrete example. + * test test_batch in test/unit/extent_util.c for a concrete example. * * A typical workflow would be composed of the following steps: * diff --git a/test/unit/extent_util.c b/test/unit/extent_util.c new file mode 100644 index 0000000..6995325 --- /dev/null +++ b/test/unit/extent_util.c @@ -0,0 +1,190 @@ +#include "test/jemalloc_test.h" + +#define TEST_UTIL_EINVAL(node, a, b, c, d, why_inval) do { \ + assert_d_eq(mallctl("experimental.utilization." node, \ + a, b, c, d), EINVAL, "Should fail when " why_inval); \ + assert_zu_eq(out_sz, out_sz_ref, \ + "Output size touched when given invalid arguments"); \ + assert_d_eq(memcmp(out, out_ref, out_sz_ref), 0, \ + "Output content touched when given invalid arguments"); \ +} while (0) + +#define TEST_UTIL_QUERY_EINVAL(a, b, c, d, why_inval) \ + TEST_UTIL_EINVAL("query", a, b, c, d, why_inval) +#define TEST_UTIL_BATCH_EINVAL(a, b, c, d, why_inval) \ + TEST_UTIL_EINVAL("batch_query", a, b, c, d, why_inval) + +#define TEST_UTIL_VALID(node) do { \ + assert_d_eq(mallctl("experimental.utilization." node, \ + out, &out_sz, in, in_sz), 0, \ + "Should return 0 on correct arguments"); \ + assert_zu_eq(out_sz, out_sz_ref, "incorrect output size"); \ + assert_d_ne(memcmp(out, out_ref, out_sz_ref), 0, \ + "Output content should be changed"); \ +} while (0) + +#define TEST_UTIL_BATCH_VALID TEST_UTIL_VALID("batch_query") + +TEST_BEGIN(test_query) { + void *p = mallocx(1, 0); + void **in = &p; + size_t in_sz = sizeof(const void *); + size_t out_sz = sizeof(void *) + sizeof(size_t) * 5; + void *out = mallocx(out_sz, 0); + void *out_ref = mallocx(out_sz, 0); + size_t out_sz_ref = out_sz; + + assert_ptr_not_null(p, "test pointer allocation failed"); + assert_ptr_not_null(out, "test output allocation failed"); + assert_ptr_not_null(out_ref, "test reference output allocation failed"); + +#define SLABCUR_READ(out) (*(void **)out) +#define COUNTS(out) ((size_t *)((void **)out + 1)) +#define NFREE_READ(out) COUNTS(out)[0] +#define NREGS_READ(out) COUNTS(out)[1] +#define SIZE_READ(out) COUNTS(out)[2] +#define BIN_NFREE_READ(out) COUNTS(out)[3] +#define BIN_NREGS_READ(out) COUNTS(out)[4] + + SLABCUR_READ(out) = NULL; + NFREE_READ(out) = NREGS_READ(out) = SIZE_READ(out) = -1; + BIN_NFREE_READ(out) = BIN_NREGS_READ(out) = -1; + memcpy(out_ref, out, out_sz); + + /* Test invalid argument(s) errors */ + TEST_UTIL_QUERY_EINVAL(NULL, &out_sz, in, in_sz, "old is NULL"); + TEST_UTIL_QUERY_EINVAL(out, NULL, in, in_sz, "oldlenp is NULL"); + TEST_UTIL_QUERY_EINVAL(out, &out_sz, NULL, in_sz, "newp is NULL"); + TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, 0, "newlen is zero"); + in_sz -= 1; + TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, in_sz, "invalid newlen"); + in_sz += 1; + out_sz_ref = out_sz -= 2 * sizeof(size_t); + TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, in_sz, "invalid *oldlenp"); + out_sz_ref = out_sz += 2 * sizeof(size_t); + + /* Examine output for valid call */ + TEST_UTIL_VALID("query"); + assert_zu_le(NFREE_READ(out), NREGS_READ(out), + "Extent free count exceeded region count"); + assert_zu_le(NREGS_READ(out), SIZE_READ(out), + "Extent region count exceeded size"); + assert_zu_ne(NREGS_READ(out), 0, + "Extent region count must be positive"); + assert_zu_ne(SIZE_READ(out), 0, "Extent size must be positive"); + if (config_stats) { + assert_zu_le(BIN_NFREE_READ(out), BIN_NREGS_READ(out), + "Bin free count exceeded region count"); + assert_zu_ne(BIN_NREGS_READ(out), 0, + "Bin region count must be positive"); + assert_zu_le(NFREE_READ(out), BIN_NFREE_READ(out), + "Extent free count exceeded bin free count"); + assert_zu_le(NREGS_READ(out), BIN_NREGS_READ(out), + "Extent region count exceeded bin region count"); + assert_zu_eq(BIN_NREGS_READ(out) % NREGS_READ(out), 0, + "Bin region count isn't a multiple of extent region count"); + assert_zu_le(NREGS_READ(out) - NFREE_READ(out), + BIN_NREGS_READ(out) - BIN_NFREE_READ(out), + "Extent utilized count exceeded bin utilized count"); + } else { + assert_zu_eq(BIN_NFREE_READ(out), 0, + "Bin free count should be zero when stats are disabled"); + assert_zu_eq(BIN_NREGS_READ(out), 0, + "Bin region count should be zero when stats are disabled"); + } + assert_ptr_not_null(SLABCUR_READ(out), "Current slab is null"); + assert_true(NFREE_READ(out) == 0 || SLABCUR_READ(out) <= p, + "Allocation should follow first fit principle"); + +#undef BIN_NREGS_READ +#undef BIN_NFREE_READ +#undef SIZE_READ +#undef NREGS_READ +#undef NFREE_READ +#undef COUNTS +#undef SLABCUR_READ + + free(out_ref); + free(out); + free(p); +} +TEST_END + +TEST_BEGIN(test_batch) { + void *p = mallocx(1, 0); + void *q = mallocx(1, 0); + void *in[] = {p, q}; + size_t in_sz = sizeof(const void *) * 2; + size_t out[] = {-1, -1, -1, -1, -1, -1}; + size_t out_sz = sizeof(size_t) * 6; + size_t out_ref[] = {-1, -1, -1, -1, -1, -1}; + size_t out_sz_ref = out_sz; + + assert_ptr_not_null(p, "test pointer allocation failed"); + assert_ptr_not_null(q, "test pointer allocation failed"); + + /* Test invalid argument(s) errors */ + TEST_UTIL_BATCH_EINVAL(NULL, &out_sz, in, in_sz, "old is NULL"); + TEST_UTIL_BATCH_EINVAL(out, NULL, in, in_sz, "oldlenp is NULL"); + TEST_UTIL_BATCH_EINVAL(out, &out_sz, NULL, in_sz, "newp is NULL"); + TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, 0, "newlen is zero"); + in_sz -= 1; + TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz, + "newlen is not an exact multiple"); + in_sz += 1; + out_sz_ref = out_sz -= 2 * sizeof(size_t); + TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz, + "*oldlenp is not an exact multiple"); + out_sz_ref = out_sz += 2 * sizeof(size_t); + in_sz -= sizeof(const void *); + TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz, + "*oldlenp and newlen do not match"); + in_sz += sizeof(const void *); + + /* Examine output for valid calls */ +#define TEST_EQUAL_REF(i, message) \ + assert_d_eq(memcmp(out + (i) * 3, out_ref + (i) * 3, 3), 0, message) + +#define NFREE_READ(out, i) out[(i) * 3] +#define NREGS_READ(out, i) out[(i) * 3 + 1] +#define SIZE_READ(out, i) out[(i) * 3 + 2] + + out_sz_ref = out_sz /= 2; + in_sz /= 2; + TEST_UTIL_BATCH_VALID; + assert_zu_le(NFREE_READ(out, 0), NREGS_READ(out, 0), + "Extent free count exceeded region count"); + assert_zu_le(NREGS_READ(out, 0), SIZE_READ(out, 0), + "Extent region count exceeded size"); + assert_zu_ne(NREGS_READ(out, 0), 0, + "Extent region count must be positive"); + assert_zu_ne(SIZE_READ(out, 0), 0, "Extent size must be positive"); + TEST_EQUAL_REF(1, "Should not overwrite content beyond what's needed"); + in_sz *= 2; + out_sz_ref = out_sz *= 2; + + memcpy(out_ref, out, 3 * sizeof(size_t)); + TEST_UTIL_BATCH_VALID; + TEST_EQUAL_REF(0, "Statistics should be stable across calls"); + assert_zu_le(NFREE_READ(out, 1), NREGS_READ(out, 1), + "Extent free count exceeded region count"); + assert_zu_eq(NREGS_READ(out, 0), NREGS_READ(out, 1), + "Extent region count should be same for same region size"); + assert_zu_eq(SIZE_READ(out, 0), SIZE_READ(out, 1), + "Extent size should be same for same region size"); + +#undef SIZE_READ +#undef NREGS_READ +#undef NFREE_READ + +#undef TEST_EQUAL_REF + + free(q); + free(p); +} +TEST_END + +int +main(void) { + return test(test_query, test_batch); +} diff --git a/test/unit/mallctl.c b/test/unit/mallctl.c index ef00a3d..498f9e0 100644 --- a/test/unit/mallctl.c +++ b/test/unit/mallctl.c @@ -853,198 +853,6 @@ TEST_BEGIN(test_hooks_exhaustion) { } TEST_END -#define TEST_UTIL_EINVAL(node, a, b, c, d, why_inval) do { \ - assert_d_eq(mallctl("experimental.utilization." node, \ - a, b, c, d), EINVAL, "Should fail when " why_inval); \ - assert_zu_eq(out_sz, out_sz_ref, \ - "Output size touched when given invalid arguments"); \ - assert_d_eq(memcmp(out, out_ref, out_sz_ref), 0, \ - "Output content touched when given invalid arguments"); \ -} while (0) - -#define TEST_UTIL_VALID(node) do { \ - assert_d_eq(mallctl("experimental.utilization." node, \ - out, &out_sz, in, in_sz), 0, \ - "Should return 0 on correct arguments"); \ - assert_zu_eq(out_sz, out_sz_ref, "incorrect output size"); \ - assert_d_ne(memcmp(out, out_ref, out_sz_ref), 0, \ - "Output content should be changed"); \ -} while (0) - -TEST_BEGIN(test_utilization_query) { - void *p = mallocx(1, 0); - void **in = &p; - size_t in_sz = sizeof(const void *); - size_t out_sz = sizeof(void *) + sizeof(size_t) * 5; - void *out = mallocx(out_sz, 0); - void *out_ref = mallocx(out_sz, 0); - size_t out_sz_ref = out_sz; - - assert_ptr_not_null(p, "test pointer allocation failed"); - assert_ptr_not_null(out, "test output allocation failed"); - assert_ptr_not_null(out_ref, "test reference output allocation failed"); - -#define SLABCUR_READ(out) (*(void **)out) -#define COUNTS(out) ((size_t *)((void **)out + 1)) -#define NFREE_READ(out) COUNTS(out)[0] -#define NREGS_READ(out) COUNTS(out)[1] -#define SIZE_READ(out) COUNTS(out)[2] -#define BIN_NFREE_READ(out) COUNTS(out)[3] -#define BIN_NREGS_READ(out) COUNTS(out)[4] - - SLABCUR_READ(out) = NULL; - NFREE_READ(out) = NREGS_READ(out) = SIZE_READ(out) = -1; - BIN_NFREE_READ(out) = BIN_NREGS_READ(out) = -1; - memcpy(out_ref, out, out_sz); - - /* Test invalid argument(s) errors */ -#define TEST_UTIL_QUERY_EINVAL(a, b, c, d, why_inval) \ - TEST_UTIL_EINVAL("query", a, b, c, d, why_inval) - - TEST_UTIL_QUERY_EINVAL(NULL, &out_sz, in, in_sz, "old is NULL"); - TEST_UTIL_QUERY_EINVAL(out, NULL, in, in_sz, "oldlenp is NULL"); - TEST_UTIL_QUERY_EINVAL(out, &out_sz, NULL, in_sz, "newp is NULL"); - TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, 0, "newlen is zero"); - in_sz -= 1; - TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, in_sz, "invalid newlen"); - in_sz += 1; - out_sz_ref = out_sz -= 2 * sizeof(size_t); - TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, in_sz, "invalid *oldlenp"); - out_sz_ref = out_sz += 2 * sizeof(size_t); - -#undef TEST_UTIL_QUERY_EINVAL - - /* Examine output for valid call */ - TEST_UTIL_VALID("query"); - assert_zu_le(NFREE_READ(out), NREGS_READ(out), - "Extent free count exceeded region count"); - assert_zu_le(NREGS_READ(out), SIZE_READ(out), - "Extent region count exceeded size"); - assert_zu_ne(NREGS_READ(out), 0, - "Extent region count must be positive"); - assert_zu_ne(SIZE_READ(out), 0, "Extent size must be positive"); - if (config_stats) { - assert_zu_le(BIN_NFREE_READ(out), BIN_NREGS_READ(out), - "Bin free count exceeded region count"); - assert_zu_ne(BIN_NREGS_READ(out), 0, - "Bin region count must be positive"); - assert_zu_le(NFREE_READ(out), BIN_NFREE_READ(out), - "Extent free count exceeded bin free count"); - assert_zu_le(NREGS_READ(out), BIN_NREGS_READ(out), - "Extent region count exceeded bin region count"); - assert_zu_eq(BIN_NREGS_READ(out) % NREGS_READ(out), 0, - "Bin region count isn't a multiple of extent region count"); - assert_zu_le(NREGS_READ(out) - NFREE_READ(out), - BIN_NREGS_READ(out) - BIN_NFREE_READ(out), - "Extent utilized count exceeded bin utilized count"); - } else { - assert_zu_eq(BIN_NFREE_READ(out), 0, - "Bin free count should be zero when stats are disabled"); - assert_zu_eq(BIN_NREGS_READ(out), 0, - "Bin region count should be zero when stats are disabled"); - } - assert_ptr_not_null(SLABCUR_READ(out), "Current slab is null"); - assert_true(NFREE_READ(out) == 0 || SLABCUR_READ(out) <= p, - "Allocation should follow first fit principle"); - -#undef BIN_NREGS_READ -#undef BIN_NFREE_READ -#undef SIZE_READ -#undef NREGS_READ -#undef NFREE_READ -#undef COUNTS -#undef SLABCUR_READ - - free(out_ref); - free(out); - free(p); -} -TEST_END - -TEST_BEGIN(test_utilization_batch_query) { - void *p = mallocx(1, 0); - void *q = mallocx(1, 0); - void *in[] = {p, q}; - size_t in_sz = sizeof(const void *) * 2; - size_t out[] = {-1, -1, -1, -1, -1, -1}; - size_t out_sz = sizeof(size_t) * 6; - size_t out_ref[] = {-1, -1, -1, -1, -1, -1}; - size_t out_sz_ref = out_sz; - - assert_ptr_not_null(p, "test pointer allocation failed"); - assert_ptr_not_null(q, "test pointer allocation failed"); - - /* Test invalid argument(s) errors */ -#define TEST_UTIL_BATCH_EINVAL(a, b, c, d, why_inval) \ - TEST_UTIL_EINVAL("batch_query", a, b, c, d, why_inval) - - TEST_UTIL_BATCH_EINVAL(NULL, &out_sz, in, in_sz, "old is NULL"); - TEST_UTIL_BATCH_EINVAL(out, NULL, in, in_sz, "oldlenp is NULL"); - TEST_UTIL_BATCH_EINVAL(out, &out_sz, NULL, in_sz, "newp is NULL"); - TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, 0, "newlen is zero"); - in_sz -= 1; - TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz, - "newlen is not an exact multiple"); - in_sz += 1; - out_sz_ref = out_sz -= 2 * sizeof(size_t); - TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz, - "*oldlenp is not an exact multiple"); - out_sz_ref = out_sz += 2 * sizeof(size_t); - in_sz -= sizeof(const void *); - TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz, - "*oldlenp and newlen do not match"); - in_sz += sizeof(const void *); - -#undef TEST_UTIL_BATCH_EINVAL - - /* Examine output for valid calls */ -#define TEST_UTIL_BATCH_VALID TEST_UTIL_VALID("batch_query") -#define TEST_EQUAL_REF(i, message) \ - assert_d_eq(memcmp(out + (i) * 3, out_ref + (i) * 3, 3), 0, message) - -#define NFREE_READ(out, i) out[(i) * 3] -#define NREGS_READ(out, i) out[(i) * 3 + 1] -#define SIZE_READ(out, i) out[(i) * 3 + 2] - - out_sz_ref = out_sz /= 2; - in_sz /= 2; - TEST_UTIL_BATCH_VALID; - assert_zu_le(NFREE_READ(out, 0), NREGS_READ(out, 0), - "Extent free count exceeded region count"); - assert_zu_le(NREGS_READ(out, 0), SIZE_READ(out, 0), - "Extent region count exceeded size"); - assert_zu_ne(NREGS_READ(out, 0), 0, - "Extent region count must be positive"); - assert_zu_ne(SIZE_READ(out, 0), 0, "Extent size must be positive"); - TEST_EQUAL_REF(1, "Should not overwrite content beyond what's needed"); - in_sz *= 2; - out_sz_ref = out_sz *= 2; - - memcpy(out_ref, out, 3 * sizeof(size_t)); - TEST_UTIL_BATCH_VALID; - TEST_EQUAL_REF(0, "Statistics should be stable across calls"); - assert_zu_le(NFREE_READ(out, 1), NREGS_READ(out, 1), - "Extent free count exceeded region count"); - assert_zu_eq(NREGS_READ(out, 0), NREGS_READ(out, 1), - "Extent region count should be same for same region size"); - assert_zu_eq(SIZE_READ(out, 0), SIZE_READ(out, 1), - "Extent size should be same for same region size"); - -#undef SIZE_READ -#undef NREGS_READ -#undef NFREE_READ - -#undef TEST_EQUAL_REF -#undef TEST_UTIL_BATCH_VALID - - free(q); - free(p); -} -TEST_END - -#undef TEST_UTIL_VALID -#undef TEST_UTIL_EINVAL - int main(void) { return test( @@ -1075,7 +883,5 @@ main(void) { test_arenas_lookup, test_stats_arenas, test_hooks, - test_hooks_exhaustion, - test_utilization_query, - test_utilization_batch_query); + test_hooks_exhaustion); } -- cgit v0.12 From 020b5dc7ac5138a347e5462508b2b5e4ecd6bc52 Mon Sep 17 00:00:00 2001 From: zoulasc Date: Fri, 15 Mar 2019 12:56:03 -0400 Subject: Convert the format generator function to an annotated format function, so that the generated formats can be checked by the compiler. --- include/jemalloc/internal/emitter.h | 33 ++++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/include/jemalloc/internal/emitter.h b/include/jemalloc/internal/emitter.h index 0a8bc2c..981dbe0 100644 --- a/include/jemalloc/internal/emitter.h +++ b/include/jemalloc/internal/emitter.h @@ -86,10 +86,11 @@ emitter_printf(emitter_t *emitter, const char *format, ...) { va_end(ap); } -static inline void +static inline const char * __attribute__((__format_arg__(3))) emitter_gen_fmt(char *out_fmt, size_t out_size, const char *fmt_specifier, emitter_justify_t justify, int width) { size_t written; + fmt_specifier++; if (justify == emitter_justify_none) { written = malloc_snprintf(out_fmt, out_size, "%%%s", fmt_specifier); @@ -102,6 +103,7 @@ emitter_gen_fmt(char *out_fmt, size_t out_size, const char *fmt_specifier, } /* Only happens in case of bad format string, which *we* choose. */ assert(written < out_size); + return out_fmt; } /* @@ -127,26 +129,27 @@ emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width, char buf[BUF_SIZE]; #define EMIT_SIMPLE(type, format) \ - emitter_gen_fmt(fmt, FMT_SIZE, format, justify, width); \ - emitter_printf(emitter, fmt, *(const type *)value); \ + emitter_printf(emitter, \ + emitter_gen_fmt(fmt, FMT_SIZE, format, justify, width), \ + *(const type *)value); switch (value_type) { case emitter_type_bool: - emitter_gen_fmt(fmt, FMT_SIZE, "s", justify, width); - emitter_printf(emitter, fmt, *(const bool *)value ? - "true" : "false"); + emitter_printf(emitter, + emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width), + *(const bool *)value ? "true" : "false"); break; case emitter_type_int: - EMIT_SIMPLE(int, "d") + EMIT_SIMPLE(int, "%d") break; case emitter_type_unsigned: - EMIT_SIMPLE(unsigned, "u") + EMIT_SIMPLE(unsigned, "%u") break; case emitter_type_ssize: - EMIT_SIMPLE(ssize_t, "zd") + EMIT_SIMPLE(ssize_t, "%zd") break; case emitter_type_size: - EMIT_SIMPLE(size_t, "zu") + EMIT_SIMPLE(size_t, "%zu") break; case emitter_type_string: str_written = malloc_snprintf(buf, BUF_SIZE, "\"%s\"", @@ -156,17 +159,17 @@ emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width, * anywhere near the fmt size. */ assert(str_written < BUF_SIZE); - emitter_gen_fmt(fmt, FMT_SIZE, "s", justify, width); - emitter_printf(emitter, fmt, buf); + emitter_printf(emitter, + emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width), buf); break; case emitter_type_uint32: - EMIT_SIMPLE(uint32_t, FMTu32) + EMIT_SIMPLE(uint32_t, "%" FMTu32) break; case emitter_type_uint64: - EMIT_SIMPLE(uint64_t, FMTu64) + EMIT_SIMPLE(uint64_t, "%" FMTu64) break; case emitter_type_title: - EMIT_SIMPLE(char *const, "s"); + EMIT_SIMPLE(char *const, "%s"); break; default: unreachable(); -- cgit v0.12 From 14e4176758379875c4ef486d6c57327ed07edd86 Mon Sep 17 00:00:00 2001 From: zoulasc Date: Fri, 15 Mar 2019 12:59:56 -0400 Subject: Fix incorrect macro use. Compiling with warnings produces missing prototype warnings. --- include/jemalloc/internal/extent_externs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/jemalloc/internal/extent_externs.h b/include/jemalloc/internal/extent_externs.h index 5d53aad..8aba576 100644 --- a/include/jemalloc/internal/extent_externs.h +++ b/include/jemalloc/internal/extent_externs.h @@ -24,7 +24,7 @@ size_t extent_size_quantize_floor(size_t size); size_t extent_size_quantize_ceil(size_t size); #endif -rb_proto(, extent_avail_, extent_tree_t, extent_t) +ph_proto(, extent_avail_, extent_tree_t, extent_t) ph_proto(, extent_heap_, extent_heap_t, extent_t) bool extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state, -- cgit v0.12 From 7f7935cf7805036d42fb510592ab8b40bcfb0690 Mon Sep 17 00:00:00 2001 From: zoulasc Date: Fri, 15 Mar 2019 20:19:16 -0400 Subject: Add an autoconf feature test for format_arg and a jemalloc-specific macro for it. --- configure.ac | 12 ++++++++++++ include/jemalloc/internal/emitter.h | 2 +- include/jemalloc/jemalloc_defs.h.in | 3 +++ include/jemalloc/jemalloc_macros.h.in | 6 ++++++ 4 files changed, 22 insertions(+), 1 deletion(-) diff --git a/configure.ac b/configure.ac index 96f76d3..9cc2a6b 100644 --- a/configure.ac +++ b/configure.ac @@ -851,6 +851,18 @@ if test "x${je_cv_format_printf}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_PRINTF], [ ]) fi +dnl Check for format_arg(...) attribute support. +JE_CFLAGS_SAVE() +JE_CFLAGS_ADD([-Werror]) +JE_CFLAGS_ADD([-herror_on_warning]) +JE_COMPILABLE([format(printf, ...) attribute], [#include ], + [const char * __attribute__((__format_arg__(1))) foo(const char *format);], + [je_cv_format_arg]) +JE_CFLAGS_RESTORE() +if test "x${je_cv_format_arg}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_ARG], [ ]) +fi + dnl Support optional additions to rpath. AC_ARG_WITH([rpath], [AS_HELP_STRING([--with-rpath=], [Colon-separated rpath (ELF systems only)])], diff --git a/include/jemalloc/internal/emitter.h b/include/jemalloc/internal/emitter.h index 981dbe0..542bc79 100644 --- a/include/jemalloc/internal/emitter.h +++ b/include/jemalloc/internal/emitter.h @@ -86,7 +86,7 @@ emitter_printf(emitter_t *emitter, const char *format, ...) { va_end(ap); } -static inline const char * __attribute__((__format_arg__(3))) +static inline const char * JEMALLOC_FORMAT_ARG(3) emitter_gen_fmt(char *out_fmt, size_t out_size, const char *fmt_specifier, emitter_justify_t justify, int width) { size_t written; diff --git a/include/jemalloc/jemalloc_defs.h.in b/include/jemalloc/jemalloc_defs.h.in index 6d89435..11c3918 100644 --- a/include/jemalloc/jemalloc_defs.h.in +++ b/include/jemalloc/jemalloc_defs.h.in @@ -4,6 +4,9 @@ /* Defined if alloc_size attribute is supported. */ #undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE +/* Defined if format_arg(...) attribute is supported. */ +#undef JEMALLOC_HAVE_ATTR_FORMAT_ARG + /* Defined if format(gnu_printf, ...) attribute is supported. */ #undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF diff --git a/include/jemalloc/jemalloc_macros.h.in b/include/jemalloc/jemalloc_macros.h.in index a00ce11..59e2955 100644 --- a/include/jemalloc/jemalloc_macros.h.in +++ b/include/jemalloc/jemalloc_macros.h.in @@ -69,6 +69,7 @@ # define JEMALLOC_EXPORT __declspec(dllimport) # endif # endif +# define JEMALLOC_FORMAT_ARG(i) # define JEMALLOC_FORMAT_PRINTF(s, i) # define JEMALLOC_NOINLINE __declspec(noinline) # ifdef __cplusplus @@ -96,6 +97,11 @@ # ifndef JEMALLOC_EXPORT # define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) # endif +# ifdef JEMALLOC_HAVE_ATTR_FORMAT_ARG +# define JEMALLOC_FORMAT_ARG(i) JEMALLOC_ATTR(__format_arg__(3)) +# else +# define JEMALLOC_FORMAT_ARG(i) +# endif # ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF # define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i)) # elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF) -- cgit v0.12 From f4d24f05e1f270c43bc4129c0d18d673b8ac85b8 Mon Sep 17 00:00:00 2001 From: David Goldblatt Date: Tue, 19 Mar 2019 16:04:35 -0700 Subject: Move extra size checks behind a config flag. This will let us turn that flag into a generic "turn on runtime checks" flag that guards other functionality we have planned. --- configure.ac | 22 +++++++++++----------- .../jemalloc/internal/jemalloc_internal_defs.h.in | 4 ++-- include/jemalloc/internal/jemalloc_preamble.h.in | 19 +++++++++++++++++++ src/tcache.c | 17 ++++++++--------- 4 files changed, 40 insertions(+), 22 deletions(-) diff --git a/configure.ac b/configure.ac index 9cc2a6b..7a83a1a 100644 --- a/configure.ac +++ b/configure.ac @@ -1418,22 +1418,22 @@ if test "x$enable_readlinkat" = "x1" ; then fi AC_SUBST([enable_readlinkat]) -dnl Avoid the extra size checking by default -AC_ARG_ENABLE([extra-size-check], - [AS_HELP_STRING([--enable-extra-size-check], - [Perform additonal size related sanity checks])], -[if test "x$enable_extra_size_check" = "xno" ; then - enable_extra_size_check="0" +dnl Avoid extra safety checks by default +AC_ARG_ENABLE([opt-safety-checks], + [AS_HELP_STRING([--enable-opt-safety-checks], + [Perform certain low-overhead checks, even in opt mode])], +[if test "x$enable_opt_safety_checks" = "xno" ; then + enable_opt_safety_checks="0" else - enable_extra_size_check="1" + enable_opt_safety_checks="1" fi ], -[enable_extra_size_check="0"] +[enable_opt_safety_checks="0"] ) -if test "x$enable_extra_size_check" = "x1" ; then - AC_DEFINE([JEMALLOC_EXTRA_SIZE_CHECK], [ ]) +if test "x$enable_opt_safety_checks" = "x1" ; then + AC_DEFINE([JEMALLOC_OPT_SAFETY_CHECKS], [ ]) fi -AC_SUBST([enable_extra_size_check]) +AC_SUBST([enable_opt_safety_checks]) JE_COMPILABLE([a program using __builtin_unreachable], [ void foo (void) { diff --git a/include/jemalloc/internal/jemalloc_internal_defs.h.in b/include/jemalloc/internal/jemalloc_internal_defs.h.in index 21b6514..c442a21 100644 --- a/include/jemalloc/internal/jemalloc_internal_defs.h.in +++ b/include/jemalloc/internal/jemalloc_internal_defs.h.in @@ -360,7 +360,7 @@ */ #undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE -/* Performs additional size-matching sanity checks when defined. */ -#undef JEMALLOC_EXTRA_SIZE_CHECK +/* Performs additional safety checks when defined. */ +#undef JEMALLOC_OPT_SAFETY_CHECKS #endif /* JEMALLOC_INTERNAL_DEFS_H_ */ diff --git a/include/jemalloc/internal/jemalloc_preamble.h.in b/include/jemalloc/internal/jemalloc_preamble.h.in index 4bfdb32..9fd2a7f 100644 --- a/include/jemalloc/internal/jemalloc_preamble.h.in +++ b/include/jemalloc/internal/jemalloc_preamble.h.in @@ -161,6 +161,25 @@ static const bool config_log = false #endif ; +/* + * Are extra safety checks enabled; things like checking the size of sized + * deallocations, double-frees, etc. + */ +static const bool config_opt_safety_checks = +#if defined(JEMALLOC_EXTRA_SAFETY_CHECKS) + true +#elif defined(JEMALLOC_DEBUG) + /* + * This lets us only guard safety checks by one flag instead of two; fast + * checks can guard solely by config_opt_safety_checks and run in debug mode + * too. + */ + true +#else + false +#endif + ; + #if defined(_WIN32) || defined(JEMALLOC_HAVE_SCHED_GETCPU) /* Currently percpu_arena depends on sched_getcpu. */ #define JEMALLOC_PERCPU_ARENA diff --git a/src/tcache.c b/src/tcache.c index e7b970d..160b0b7 100644 --- a/src/tcache.c +++ b/src/tcache.c @@ -101,7 +101,6 @@ tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, } /* Enabled with --enable-extra-size-check. */ -#ifdef JEMALLOC_EXTRA_SIZE_CHECK static void tbin_extents_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind, size_t nflush, extent_t **extents){ @@ -129,7 +128,6 @@ tbin_extents_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind, abort(); } } -#endif void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, @@ -144,15 +142,16 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, unsigned nflush = tbin->ncached - rem; VARIABLE_ARRAY(extent_t *, item_extent, nflush); -#ifndef JEMALLOC_EXTRA_SIZE_CHECK /* Look up extent once per item. */ - for (unsigned i = 0 ; i < nflush; i++) { - item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i)); + if (config_opt_safety_checks) { + tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind, + nflush, item_extent); + } else { + for (unsigned i = 0 ; i < nflush; i++) { + item_extent[i] = iealloc(tsd_tsdn(tsd), + *(tbin->avail - 1 - i)); + } } -#else - tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind, nflush, - item_extent); -#endif while (nflush > 0) { /* Lock the arena bin associated with the first object. */ extent_t *extent = item_extent[0]; -- cgit v0.12 From f95a88fcd92e8ead1a6c5c8b2ca8c401c6eba162 Mon Sep 17 00:00:00 2001 From: David Goldblatt Date: Fri, 22 Mar 2019 17:13:45 -0700 Subject: Safety checks: Expose config value via mallctl and stats. --- src/ctl.c | 3 +++ src/stats.c | 1 + 2 files changed, 4 insertions(+) diff --git a/src/ctl.c b/src/ctl.c index 193d2b0..c113bf2 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -72,6 +72,7 @@ CTL_PROTO(config_debug) CTL_PROTO(config_fill) CTL_PROTO(config_lazy_lock) CTL_PROTO(config_malloc_conf) +CTL_PROTO(config_opt_safety_checks) CTL_PROTO(config_prof) CTL_PROTO(config_prof_libgcc) CTL_PROTO(config_prof_libunwind) @@ -286,6 +287,7 @@ static const ctl_named_node_t config_node[] = { {NAME("fill"), CTL(config_fill)}, {NAME("lazy_lock"), CTL(config_lazy_lock)}, {NAME("malloc_conf"), CTL(config_malloc_conf)}, + {NAME("opt_safety_checks"), CTL(config_opt_safety_checks)}, {NAME("prof"), CTL(config_prof)}, {NAME("prof_libgcc"), CTL(config_prof_libgcc)}, {NAME("prof_libunwind"), CTL(config_prof_libunwind)}, @@ -1706,6 +1708,7 @@ CTL_RO_CONFIG_GEN(config_debug, bool) CTL_RO_CONFIG_GEN(config_fill, bool) CTL_RO_CONFIG_GEN(config_lazy_lock, bool) CTL_RO_CONFIG_GEN(config_malloc_conf, const char *) +CTL_RO_CONFIG_GEN(config_opt_safety_checks, bool) CTL_RO_CONFIG_GEN(config_prof, bool) CTL_RO_CONFIG_GEN(config_prof_libgcc, bool) CTL_RO_CONFIG_GEN(config_prof_libunwind, bool) diff --git a/src/stats.c b/src/stats.c index 4c427e0..2be9a7e 100644 --- a/src/stats.c +++ b/src/stats.c @@ -976,6 +976,7 @@ stats_general_print(emitter_t *emitter) { emitter_kv(emitter, "malloc_conf", "config.malloc_conf", emitter_type_string, &config_malloc_conf); + CONFIG_WRITE_BOOL(opt_safety_checks); CONFIG_WRITE_BOOL(prof); CONFIG_WRITE_BOOL(prof_libgcc); CONFIG_WRITE_BOOL(prof_libunwind); -- cgit v0.12 From b92c9a1a81f3f68da87afe5887d8450fef0700d3 Mon Sep 17 00:00:00 2001 From: David Goldblatt Date: Wed, 20 Mar 2019 13:06:53 -0700 Subject: Safety checks: Indirect through a function. This will let us share code on failure pathways.pathways --- Makefile.in | 1 + include/jemalloc/internal/safety_check.h | 6 ++++++ src/safety_check.c | 11 +++++++++++ src/tcache.c | 3 ++- 4 files changed, 20 insertions(+), 1 deletion(-) create mode 100644 include/jemalloc/internal/safety_check.h create mode 100644 src/safety_check.c diff --git a/Makefile.in b/Makefile.in index 3a09442..8b4a98f 100644 --- a/Makefile.in +++ b/Makefile.in @@ -117,6 +117,7 @@ C_SRCS := $(srcroot)src/jemalloc.c \ $(srcroot)src/prng.c \ $(srcroot)src/prof.c \ $(srcroot)src/rtree.c \ + $(srcroot)src/safety_check.c \ $(srcroot)src/stats.c \ $(srcroot)src/sc.c \ $(srcroot)src/sz.c \ diff --git a/include/jemalloc/internal/safety_check.h b/include/jemalloc/internal/safety_check.h new file mode 100644 index 0000000..52157d1 --- /dev/null +++ b/include/jemalloc/internal/safety_check.h @@ -0,0 +1,6 @@ +#ifndef JEMALLOC_INTERNAL_SAFETY_CHECK_H +#define JEMALLOC_INTERNAL_SAFETY_CHECK_H + +void safety_check_fail(const char *format, ...); + +#endif /*JEMALLOC_INTERNAL_SAFETY_CHECK_H */ diff --git a/src/safety_check.c b/src/safety_check.c new file mode 100644 index 0000000..cbec190 --- /dev/null +++ b/src/safety_check.c @@ -0,0 +1,11 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +void safety_check_fail(const char *format, ...) { + va_list ap; + + va_start(ap, format); + malloc_vcprintf(NULL, NULL, format, ap); + va_end(ap); + abort(); +} diff --git a/src/tcache.c b/src/tcache.c index 160b0b7..034c69a 100644 --- a/src/tcache.c +++ b/src/tcache.c @@ -4,6 +4,7 @@ #include "jemalloc/internal/assert.h" #include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/safety_check.h" #include "jemalloc/internal/sc.h" /******************************************************************************/ @@ -122,7 +123,7 @@ tbin_extents_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind, sz_sum -= szind; } if (sz_sum != 0) { - malloc_printf(": size mismatch in thread cache " + safety_check_fail(": size mismatch in thread cache " "detected, likely caused by sized deallocation bugs by " "application. Abort.\n"); abort(); -- cgit v0.12 From 33e1dad6803ea3e20971b46baa299045f736d22a Mon Sep 17 00:00:00 2001 From: David Goldblatt Date: Fri, 22 Mar 2019 12:53:11 -0700 Subject: Safety checks: Add a redzoning feature. --- Makefile.in | 1 + include/jemalloc/internal/arena_externs.h | 2 +- include/jemalloc/internal/arena_inlines_b.h | 2 +- include/jemalloc/internal/jemalloc_preamble.h.in | 2 +- include/jemalloc/internal/prof_inlines_b.h | 3 +- include/jemalloc/internal/safety_check.h | 20 +++ src/arena.c | 22 +++- src/jemalloc.c | 1 + src/prof.c | 16 +-- src/safety_check.c | 19 ++- test/unit/safety_check.c | 156 +++++++++++++++++++++++ test/unit/safety_check.sh | 5 + 12 files changed, 230 insertions(+), 19 deletions(-) create mode 100644 test/unit/safety_check.c create mode 100644 test/unit/safety_check.sh diff --git a/Makefile.in b/Makefile.in index 8b4a98f..38722ff 100644 --- a/Makefile.in +++ b/Makefile.in @@ -210,6 +210,7 @@ TESTS_UNIT := \ $(srcroot)test/unit/rb.c \ $(srcroot)test/unit/retained.c \ $(srcroot)test/unit/rtree.c \ + $(srcroot)test/unit/safety_check.c \ $(srcroot)test/unit/seq.c \ $(srcroot)test/unit/SFMT.c \ $(srcroot)test/unit/sc.c \ diff --git a/include/jemalloc/internal/arena_externs.h b/include/jemalloc/internal/arena_externs.h index 2bdddb7..a4523ae 100644 --- a/include/jemalloc/internal/arena_externs.h +++ b/include/jemalloc/internal/arena_externs.h @@ -60,7 +60,7 @@ void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero); void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache); -void arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize); +void arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize); void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path); void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin, diff --git a/include/jemalloc/internal/arena_inlines_b.h b/include/jemalloc/internal/arena_inlines_b.h index 614dedd..7e61a44 100644 --- a/include/jemalloc/internal/arena_inlines_b.h +++ b/include/jemalloc/internal/arena_inlines_b.h @@ -90,7 +90,7 @@ arena_prof_alloc_time_get(tsdn_t *tsdn, const void *ptr, assert(ptr != NULL); extent_t *extent = iealloc(tsdn, ptr); - /* + /* * Unlike arena_prof_prof_tctx_{get, set}, we only call this once we're * sure we have a sampled allocation. */ diff --git a/include/jemalloc/internal/jemalloc_preamble.h.in b/include/jemalloc/internal/jemalloc_preamble.h.in index 9fd2a7f..3418cbf 100644 --- a/include/jemalloc/internal/jemalloc_preamble.h.in +++ b/include/jemalloc/internal/jemalloc_preamble.h.in @@ -166,7 +166,7 @@ static const bool config_log = * deallocations, double-frees, etc. */ static const bool config_opt_safety_checks = -#if defined(JEMALLOC_EXTRA_SAFETY_CHECKS) +#ifdef JEMALLOC_OPT_SAFETY_CHECKS true #elif defined(JEMALLOC_DEBUG) /* diff --git a/include/jemalloc/internal/prof_inlines_b.h b/include/jemalloc/internal/prof_inlines_b.h index 8358bff..8ba8a1e 100644 --- a/include/jemalloc/internal/prof_inlines_b.h +++ b/include/jemalloc/internal/prof_inlines_b.h @@ -1,6 +1,7 @@ #ifndef JEMALLOC_INTERNAL_PROF_INLINES_B_H #define JEMALLOC_INTERNAL_PROF_INLINES_B_H +#include "jemalloc/internal/safety_check.h" #include "jemalloc/internal/sz.h" JEMALLOC_ALWAYS_INLINE bool @@ -71,7 +72,7 @@ prof_alloc_time_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) { JEMALLOC_ALWAYS_INLINE void prof_alloc_time_set(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx, - nstime_t t) { + nstime_t t) { cassert(config_prof); assert(ptr != NULL); diff --git a/include/jemalloc/internal/safety_check.h b/include/jemalloc/internal/safety_check.h index 52157d1..1b53fc4 100644 --- a/include/jemalloc/internal/safety_check.h +++ b/include/jemalloc/internal/safety_check.h @@ -2,5 +2,25 @@ #define JEMALLOC_INTERNAL_SAFETY_CHECK_H void safety_check_fail(const char *format, ...); +/* Can set to NULL for a default. */ +void safety_check_set_abort(void (*abort_fn)()); + +JEMALLOC_ALWAYS_INLINE void +safety_check_set_redzone(void *ptr, size_t usize, size_t bumped_usize) { + assert(usize < bumped_usize); + for (size_t i = usize; i < bumped_usize && i < usize + 32; ++i) { + *((unsigned char *)ptr + usize) = 0xBC; + } +} + +JEMALLOC_ALWAYS_INLINE void +safety_check_verify_redzone(const void *ptr, size_t usize, size_t bumped_usize) +{ + for (size_t i = usize; i < bumped_usize && i < usize + 32; ++i) { + if (unlikely(*((unsigned char *)ptr + usize) != 0xBC)) { + safety_check_fail("Use after free error\n"); + } + } +} #endif /*JEMALLOC_INTERNAL_SAFETY_CHECK_H */ diff --git a/src/arena.c b/src/arena.c index 60eac23..084df85 100644 --- a/src/arena.c +++ b/src/arena.c @@ -8,6 +8,7 @@ #include "jemalloc/internal/extent_mmap.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/safety_check.h" #include "jemalloc/internal/util.h" JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS @@ -1531,12 +1532,16 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, } void -arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) { +arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) { cassert(config_prof); assert(ptr != NULL); assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS); assert(usize <= SC_SMALL_MAXCLASS); + if (config_opt_safety_checks) { + safety_check_set_redzone(ptr, usize, SC_LARGE_MINCLASS); + } + rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); @@ -1577,10 +1582,19 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, assert(opt_prof); extent_t *extent = iealloc(tsdn, ptr); - size_t usize = arena_prof_demote(tsdn, extent, ptr); - if (usize <= tcache_maxclass) { + size_t usize = extent_usize_get(extent); + size_t bumped_usize = arena_prof_demote(tsdn, extent, ptr); + if (config_opt_safety_checks && usize < SC_LARGE_MINCLASS) { + /* + * Currently, we only do redzoning for small sampled + * allocations. + */ + assert(bumped_usize == SC_LARGE_MINCLASS); + safety_check_verify_redzone(ptr, usize, bumped_usize); + } + if (bumped_usize <= tcache_maxclass) { tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, - sz_size2index(usize), slow_path); + sz_size2index(bumped_usize), slow_path); } else { large_dalloc(tsdn, extent); } diff --git a/src/jemalloc.c b/src/jemalloc.c index 7bc7b95..818ce3a 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -13,6 +13,7 @@ #include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/safety_check.h" #include "jemalloc/internal/sc.h" #include "jemalloc/internal/spin.h" #include "jemalloc/internal/sz.h" diff --git a/src/prof.c b/src/prof.c index 4d7d65d..a4e30f4 100644 --- a/src/prof.c +++ b/src/prof.c @@ -125,7 +125,7 @@ struct prof_thr_node_s { uint64_t thr_uid; /* Variable size based on thr_name_sz. */ char name[1]; -}; +}; typedef struct prof_alloc_node_s prof_alloc_node_t; @@ -388,7 +388,7 @@ prof_log_bt_index(tsd_t *tsd, prof_bt_t *bt) { new_node->next = NULL; new_node->index = log_bt_index; - /* + /* * Copy the backtrace: bt is inside a tdata or gctx, which * might die before prof_log_stop is called. */ @@ -402,7 +402,7 @@ prof_log_bt_index(tsd_t *tsd, prof_bt_t *bt) { } else { return node->index; } -} +} static size_t prof_log_thr_index(tsd_t *tsd, uint64_t thr_uid, const char *name) { assert(prof_logging_state == prof_logging_state_started); @@ -452,7 +452,7 @@ prof_try_log(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) { * it's being destroyed). */ return; - } + } malloc_mutex_lock(tsd_tsdn(tsd), &log_mtx); @@ -514,11 +514,11 @@ prof_try_log(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) { } label_done: - malloc_mutex_unlock(tsd_tsdn(tsd), &log_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &log_mtx); } void -prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize, +prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) { malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); @@ -2604,8 +2604,8 @@ static void prof_log_emit_traces(tsd_t *tsd, emitter_t *emitter) { emitter_json_array_kv_begin(emitter, "stack_traces"); prof_bt_node_t *bt_node = log_bt_first; - prof_bt_node_t *bt_old_node; - /* + prof_bt_node_t *bt_old_node; + /* * Calculate how many hex digits we need: twice number of bytes, two for * "0x", and then one more for terminating '\0'. */ diff --git a/src/safety_check.c b/src/safety_check.c index cbec190..804155d 100644 --- a/src/safety_check.c +++ b/src/safety_check.c @@ -1,11 +1,24 @@ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" +static void (*safety_check_abort)(const char *message); + +void safety_check_set_abort(void (*abort_fn)(const char *)) { + safety_check_abort = abort_fn; +} + void safety_check_fail(const char *format, ...) { - va_list ap; + char buf[MALLOC_PRINTF_BUFSIZE]; + va_list ap; va_start(ap, format); - malloc_vcprintf(NULL, NULL, format, ap); + malloc_vsnprintf(buf, MALLOC_PRINTF_BUFSIZE, format, ap); va_end(ap); - abort(); + + if (safety_check_abort == NULL) { + malloc_write(buf); + abort(); + } else { + safety_check_abort(buf); + } } diff --git a/test/unit/safety_check.c b/test/unit/safety_check.c new file mode 100644 index 0000000..bf4bd86 --- /dev/null +++ b/test/unit/safety_check.c @@ -0,0 +1,156 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/safety_check.h" + +/* + * Note that we get called through safety_check.sh, which turns on sampling for + * everything. + */ + +bool fake_abort_called; +void fake_abort(const char *message) { + (void)message; + fake_abort_called = true; +} + +TEST_BEGIN(test_malloc_free_overflow) { + test_skip_if(!config_prof); + test_skip_if(!config_opt_safety_checks); + + safety_check_set_abort(&fake_abort); + /* Buffer overflow! */ + char* ptr = malloc(128); + ptr[128] = 0; + free(ptr); + safety_check_set_abort(NULL); + + assert_b_eq(fake_abort_called, true, "Redzone check didn't fire."); + fake_abort_called = false; +} +TEST_END + +TEST_BEGIN(test_mallocx_dallocx_overflow) { + test_skip_if(!config_prof); + test_skip_if(!config_opt_safety_checks); + + safety_check_set_abort(&fake_abort); + /* Buffer overflow! */ + char* ptr = mallocx(128, 0); + ptr[128] = 0; + dallocx(ptr, 0); + safety_check_set_abort(NULL); + + assert_b_eq(fake_abort_called, true, "Redzone check didn't fire."); + fake_abort_called = false; +} +TEST_END + +TEST_BEGIN(test_malloc_sdallocx_overflow) { + test_skip_if(!config_prof); + test_skip_if(!config_opt_safety_checks); + + safety_check_set_abort(&fake_abort); + /* Buffer overflow! */ + char* ptr = malloc(128); + ptr[128] = 0; + sdallocx(ptr, 128, 0); + safety_check_set_abort(NULL); + + assert_b_eq(fake_abort_called, true, "Redzone check didn't fire."); + fake_abort_called = false; +} +TEST_END + +TEST_BEGIN(test_realloc_overflow) { + test_skip_if(!config_prof); + test_skip_if(!config_opt_safety_checks); + + safety_check_set_abort(&fake_abort); + /* Buffer overflow! */ + char* ptr = malloc(128); + ptr[128] = 0; + ptr = realloc(ptr, 129); + safety_check_set_abort(NULL); + free(ptr); + + assert_b_eq(fake_abort_called, true, "Redzone check didn't fire."); + fake_abort_called = false; +} +TEST_END + +TEST_BEGIN(test_rallocx_overflow) { + test_skip_if(!config_prof); + test_skip_if(!config_opt_safety_checks); + + safety_check_set_abort(&fake_abort); + /* Buffer overflow! */ + char* ptr = malloc(128); + ptr[128] = 0; + ptr = rallocx(ptr, 129, 0); + safety_check_set_abort(NULL); + free(ptr); + + assert_b_eq(fake_abort_called, true, "Redzone check didn't fire."); + fake_abort_called = false; +} +TEST_END + +TEST_BEGIN(test_xallocx_overflow) { + test_skip_if(!config_prof); + test_skip_if(!config_opt_safety_checks); + + safety_check_set_abort(&fake_abort); + /* Buffer overflow! */ + char* ptr = malloc(128); + ptr[128] = 0; + size_t result = xallocx(ptr, 129, 0, 0); + assert_zu_eq(result, 128, ""); + free(ptr); + assert_b_eq(fake_abort_called, true, "Redzone check didn't fire."); + fake_abort_called = false; + safety_check_set_abort(NULL); +} +TEST_END + +TEST_BEGIN(test_realloc_no_overflow) { + char* ptr = malloc(128); + ptr = realloc(ptr, 256); + ptr[128] = 0; + ptr[255] = 0; + free(ptr); + + ptr = malloc(128); + ptr = realloc(ptr, 64); + ptr[63] = 0; + ptr[0] = 0; + free(ptr); +} +TEST_END + +TEST_BEGIN(test_rallocx_no_overflow) { + char* ptr = malloc(128); + ptr = rallocx(ptr, 256, 0); + ptr[128] = 0; + ptr[255] = 0; + free(ptr); + + ptr = malloc(128); + ptr = rallocx(ptr, 64, 0); + ptr[63] = 0; + ptr[0] = 0; + free(ptr); +} +TEST_END + +int +main(void) { + return test( + test_malloc_free_overflow, + test_mallocx_dallocx_overflow, + test_malloc_sdallocx_overflow, + test_realloc_overflow, + test_rallocx_overflow, + test_xallocx_overflow, + test_realloc_no_overflow, + test_rallocx_no_overflow); +} diff --git a/test/unit/safety_check.sh b/test/unit/safety_check.sh new file mode 100644 index 0000000..8fcc7d8 --- /dev/null +++ b/test/unit/safety_check.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "x${enable_prof}" = "x1" ] ; then + export MALLOC_CONF="prof:true,lg_prof_sample:0" +fi -- cgit v0.12 From 21cfe59ff7b10a61dabe26cd3dbfb7a255e1f5e8 Mon Sep 17 00:00:00 2001 From: David Goldblatt Date: Fri, 22 Mar 2019 17:08:53 -0700 Subject: Safety checks: Run tests by default --- .travis.yml | 25 +++++++++++++++++++++++++ scripts/gen_run_tests.py | 1 + scripts/gen_travis.py | 1 + 3 files changed, 27 insertions(+) diff --git a/.travis.yml b/.travis.yml index 40b2eb5..2da5da8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -24,6 +24,8 @@ matrix: - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" @@ -42,6 +44,8 @@ matrix: - os: osx env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: osx + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: osx env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=clang CXX=clang++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" @@ -55,6 +59,8 @@ matrix: - os: linux env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux + env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" @@ -75,6 +81,9 @@ matrix: env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds" addons: *gcc_multilib - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + addons: *gcc_multilib + - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" addons: *gcc_multilib - os: linux @@ -93,6 +102,8 @@ matrix: - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" @@ -105,6 +116,8 @@ matrix: - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" @@ -115,6 +128,8 @@ matrix: - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" @@ -123,6 +138,8 @@ matrix: - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl --enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" @@ -131,6 +148,14 @@ matrix: - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-opt-safety-checks --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-opt-safety-checks --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-opt-safety-checks --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-opt-safety-checks --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false,dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" - os: linux env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false,percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds" diff --git a/scripts/gen_run_tests.py b/scripts/gen_run_tests.py index 5052b3e..a414f81 100755 --- a/scripts/gen_run_tests.py +++ b/scripts/gen_run_tests.py @@ -40,6 +40,7 @@ possible_config_opts = [ '--enable-debug', '--enable-prof', '--disable-stats', + '--enable-opt-safety-checks', ] if bits_64: possible_config_opts.append('--with-lg-vaddr=56') diff --git a/scripts/gen_travis.py b/scripts/gen_travis.py index 65b0b67..f1478c6 100755 --- a/scripts/gen_travis.py +++ b/scripts/gen_travis.py @@ -46,6 +46,7 @@ configure_flag_unusuals = [ '--enable-prof', '--disable-stats', '--disable-libdl', + '--enable-opt-safety-checks', ] malloc_conf_unusuals = [ -- cgit v0.12 From 1aabab5fdca1cd76be3900e9272ef83549006ac0 Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Mon, 15 Apr 2019 15:36:31 -0700 Subject: Enforce TLS_MODEL attribute. Caught by @zoulasc in #1460. The attribute needs to be added in the headers as well. --- include/jemalloc/internal/tsd_malloc_thread_cleanup.h | 6 ++++-- include/jemalloc/internal/tsd_tls.h | 4 +++- src/tsd.c | 6 +++--- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/include/jemalloc/internal/tsd_malloc_thread_cleanup.h b/include/jemalloc/internal/tsd_malloc_thread_cleanup.h index bf8801e..65852d5 100644 --- a/include/jemalloc/internal/tsd_malloc_thread_cleanup.h +++ b/include/jemalloc/internal/tsd_malloc_thread_cleanup.h @@ -3,8 +3,10 @@ #endif #define JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H -extern __thread tsd_t tsd_tls; -extern __thread bool tsd_initialized; +#define JEMALLOC_TSD_TYPE_ATTR(type) __thread type JEMALLOC_TLS_MODEL + +extern JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls; +extern JEMALLOC_TSD_TYPE_ATTR(bool) tsd_initialized; extern bool tsd_booted; /* Initialization/cleanup. */ diff --git a/include/jemalloc/internal/tsd_tls.h b/include/jemalloc/internal/tsd_tls.h index f4f165c..7d6c805 100644 --- a/include/jemalloc/internal/tsd_tls.h +++ b/include/jemalloc/internal/tsd_tls.h @@ -3,7 +3,9 @@ #endif #define JEMALLOC_INTERNAL_TSD_TLS_H -extern __thread tsd_t tsd_tls; +#define JEMALLOC_TSD_TYPE_ATTR(type) __thread type JEMALLOC_TLS_MODEL + +extern JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls; extern pthread_key_t tsd_tsd; extern bool tsd_booted; diff --git a/src/tsd.c b/src/tsd.c index d5fb4d6..a31f6b9 100644 --- a/src/tsd.c +++ b/src/tsd.c @@ -17,11 +17,11 @@ JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP -__thread tsd_t JEMALLOC_TLS_MODEL tsd_tls = TSD_INITIALIZER; -__thread bool JEMALLOC_TLS_MODEL tsd_initialized = false; +JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls = TSD_INITIALIZER; +JEMALLOC_TSD_TYPE_ATTR(bool) JEMALLOC_TLS_MODEL tsd_initialized = false; bool tsd_booted = false; #elif (defined(JEMALLOC_TLS)) -__thread tsd_t JEMALLOC_TLS_MODEL tsd_tls = TSD_INITIALIZER; +JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls = TSD_INITIALIZER; pthread_key_t tsd_tsd; bool tsd_booted = false; #elif (defined(_WIN32)) -- cgit v0.12 From 498f47e1ec83431426cdff256c23eceade41b4ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=A3=8E?= Date: Mon, 22 Apr 2019 14:21:12 +0800 Subject: Fix typo derived from tcmalloc's pprof The same pr is submitted into gperftools: https://github.com/gperftools/gperftools/pull/1105 --- bin/jeprof.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/jeprof.in b/bin/jeprof.in index 16a76c6..3ed408c 100644 --- a/bin/jeprof.in +++ b/bin/jeprof.in @@ -5367,7 +5367,7 @@ sub GetProcedureBoundaries { my $demangle_flag = ""; my $cppfilt_flag = ""; my $to_devnull = ">$dev_null 2>&1"; - if (system(ShellEscape($nm, "--demangle", "image") . $to_devnull) == 0) { + if (system(ShellEscape($nm, "--demangle", $image) . $to_devnull) == 0) { # In this mode, we do "nm --demangle " $demangle_flag = "--demangle"; $cppfilt_flag = ""; -- cgit v0.12 From 702d76dbd03e4fe7347399e1e322c80102c95544 Mon Sep 17 00:00:00 2001 From: Fabrice Fontaine Date: Fri, 19 Apr 2019 13:44:18 +0200 Subject: configure.ac: Add an option to disable doc Signed-off-by: Fabrice Fontaine --- Makefile.in | 7 ++++++- configure.ac | 14 ++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/Makefile.in b/Makefile.in index 38722ff..7128b00 100644 --- a/Makefile.in +++ b/Makefile.in @@ -56,6 +56,7 @@ cfghdrs_out := @cfghdrs_out@ cfgoutputs_in := $(addprefix $(srcroot),@cfgoutputs_in@) cfgoutputs_out := @cfgoutputs_out@ enable_autogen := @enable_autogen@ +enable_doc := @enable_doc@ enable_shared := @enable_shared@ enable_static := @enable_static@ enable_prof := @enable_prof@ @@ -516,7 +517,11 @@ done install_doc: build_doc install_doc_html install_doc_man -install: install_bin install_include install_lib install_doc +install: install_bin install_include install_lib + +ifeq ($(enable_doc), 1) +install: install_doc +endif tests_unit: $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%$(EXE)) tests_integration: $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%$(EXE)) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%$(EXE)) diff --git a/configure.ac b/configure.ac index 7a83a1a..39a540f 100644 --- a/configure.ac +++ b/configure.ac @@ -893,6 +893,19 @@ AC_PROG_RANLIB AC_PATH_PROG([LD], [ld], [false], [$PATH]) AC_PATH_PROG([AUTOCONF], [autoconf], [false], [$PATH]) +dnl Enable documentation +AC_ARG_ENABLE([doc], + [AS_HELP_STRING([--enable-documentation], [Build documentation])], +if test "x$enable_doc" = "xno" ; then + enable_doc="0" +else + enable_doc="1" +fi +, +enable_doc="1" +) +AC_SUBST([enable_doc]) + dnl Enable shared libs AC_ARG_ENABLE([shared], [AS_HELP_STRING([--enable-shared], [Build shared libaries])], @@ -2369,6 +2382,7 @@ AC_MSG_RESULT([JEMALLOC_PRIVATE_NAMESPACE]) AC_MSG_RESULT([ : ${JEMALLOC_PRIVATE_NAMESPACE}]) AC_MSG_RESULT([install_suffix : ${install_suffix}]) AC_MSG_RESULT([malloc_conf : ${config_malloc_conf}]) +AC_MSG_RESULT([documentation : ${enable_doc}]) AC_MSG_RESULT([shared libs : ${enable_shared}]) AC_MSG_RESULT([static libs : ${enable_static}]) AC_MSG_RESULT([autogen : ${enable_autogen}]) -- cgit v0.12 From ae124b86849bb5464940db6731183dede6a70873 Mon Sep 17 00:00:00 2001 From: Yinan Zhang Date: Thu, 18 Apr 2019 15:11:07 -0700 Subject: Improve size class header Mainly fixing typos. The only non-trivial change is in the computation for SC_NPSIZES, though the result wouldn't be any different when SC_NGROUP = 4 as is always the case at the moment. --- include/jemalloc/internal/sc.h | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/include/jemalloc/internal/sc.h b/include/jemalloc/internal/sc.h index ef0a451..9a099d8 100644 --- a/include/jemalloc/internal/sc.h +++ b/include/jemalloc/internal/sc.h @@ -18,7 +18,7 @@ * each one covers allocations for base / SC_NGROUP possible allocation sizes. * We call that value (base / SC_NGROUP) the delta of the group. Each size class * is delta larger than the one before it (including the initial size class in a - * group, which is delta large than 2**base, the largest size class in the + * group, which is delta larger than base, the largest size class in the * previous group). * To make the math all work out nicely, we require that SC_NGROUP is a power of * two, and define it in terms of SC_LG_NGROUP. We'll often talk in terms of @@ -53,10 +53,11 @@ * classes; one per power of two, up until we hit the quantum size. There are * therefore LG_QUANTUM - SC_LG_TINY_MIN such size classes. * - * Next, we have a size class of size LG_QUANTUM. This can't be the start of a - * group in the sense we described above (covering a power of two range) since, - * if we divided into it to pick a value of delta, we'd get a delta smaller than - * (1 << LG_QUANTUM) for sizes >= (1 << LG_QUANTUM), which is against the rules. + * Next, we have a size class of size (1 << LG_QUANTUM). This can't be the + * start of a group in the sense we described above (covering a power of two + * range) since, if we divided into it to pick a value of delta, we'd get a + * delta smaller than (1 << LG_QUANTUM) for sizes >= (1 << LG_QUANTUM), which + * is against the rules. * * The first base we can divide by SC_NGROUP while still being at least * (1 << LG_QUANTUM) is SC_NGROUP * (1 << LG_QUANTUM). We can get there by @@ -196,7 +197,7 @@ (SC_LG_BASE_MAX - SC_LG_FIRST_REGULAR_BASE + 1) - 1) #define SC_NSIZES (SC_NTINY + SC_NPSEUDO + SC_NREGULAR) - /* The number of size classes that are a multiple of the page size. */ +/* The number of size classes that are a multiple of the page size. */ #define SC_NPSIZES ( \ /* Start with all the size classes. */ \ SC_NSIZES \ @@ -206,8 +207,20 @@ - SC_NPSEUDO \ /* And the tiny group. */ \ - SC_NTINY \ - /* Groups where ndelta*delta is not a multiple of the page size. */ \ - - (2 * (SC_NGROUP))) + /* Sizes where ndelta*delta is not a multiple of the page size. */ \ + - (SC_LG_NGROUP * SC_NGROUP)) +/* + * Note that the last line is computed as the sum of the second column in the + * following table: + * lg(base) | count of sizes to exclude + * ------------------------------|----------------------------- + * LG_PAGE - 1 | SC_NGROUP - 1 + * LG_PAGE | SC_NGROUP - 1 + * LG_PAGE + 1 | SC_NGROUP - 2 + * LG_PAGE + 2 | SC_NGROUP - 4 + * ... | ... + * LG_PAGE + (SC_LG_NGROUP - 1) | SC_NGROUP - (SC_NGROUP / 2) + */ /* * We declare a size class is binnable if size < page size * group. Or, in other -- cgit v0.12 From 7fc4f2a32c74701e40e98c8ac05aa7cf12d876c9 Mon Sep 17 00:00:00 2001 From: Doron Roberts-Kedes Date: Fri, 12 Apr 2019 07:08:50 -0400 Subject: Add nonfull_slabs to bin_stats_t. When config_stats is enabled track the size of bin->slabs_nonfull in the new nonfull_slabs counter in bin_stats_t. This metric should be useful for establishing an upper ceiling on the savings possible by meshing. --- doc/jemalloc.xml.in | 11 +++++++++++ include/jemalloc/internal/bin.h | 1 + include/jemalloc/internal/bin_stats.h | 3 +++ src/arena.c | 7 +++++++ src/ctl.c | 7 +++++++ src/stats.c | 7 +++++++ test/unit/stats.c | 7 ++++++- 7 files changed, 42 insertions(+), 1 deletion(-) diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in index fd0edb3..2bdbe97 100644 --- a/doc/jemalloc.xml.in +++ b/doc/jemalloc.xml.in @@ -2947,6 +2947,17 @@ struct extent_hooks_s { Current number of slabs. + + + + stats.arenas.<i>.bins.<j>.nonfull_slabs + (size_t) + r- + [] + + Current number of nonfull slabs. + + stats.arenas.<i>.bins.<j>.mutex.{counter} diff --git a/include/jemalloc/internal/bin.h b/include/jemalloc/internal/bin.h index f542c88..8547e89 100644 --- a/include/jemalloc/internal/bin.h +++ b/include/jemalloc/internal/bin.h @@ -116,6 +116,7 @@ bin_stats_merge(tsdn_t *tsdn, bin_stats_t *dst_bin_stats, bin_t *bin) { dst_bin_stats->nslabs += bin->stats.nslabs; dst_bin_stats->reslabs += bin->stats.reslabs; dst_bin_stats->curslabs += bin->stats.curslabs; + dst_bin_stats->nonfull_slabs += bin->stats.nonfull_slabs; malloc_mutex_unlock(tsdn, &bin->lock); } diff --git a/include/jemalloc/internal/bin_stats.h b/include/jemalloc/internal/bin_stats.h index 86e673e..d04519c 100644 --- a/include/jemalloc/internal/bin_stats.h +++ b/include/jemalloc/internal/bin_stats.h @@ -45,6 +45,9 @@ struct bin_stats_s { /* Current number of slabs in this bin. */ size_t curslabs; + /* Current size of nonfull slabs heap in this bin. */ + size_t nonfull_slabs; + mutex_prof_data_t mutex_data; }; diff --git a/src/arena.c b/src/arena.c index 084df85..a0804f6 100644 --- a/src/arena.c +++ b/src/arena.c @@ -1002,11 +1002,17 @@ static void arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) { assert(extent_nfree_get(slab) > 0); extent_heap_insert(&bin->slabs_nonfull, slab); + if (config_stats) { + bin->stats.nonfull_slabs++; + } } static void arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) { extent_heap_remove(&bin->slabs_nonfull, slab); + if (config_stats) { + bin->stats.nonfull_slabs--; + } } static extent_t * @@ -1017,6 +1023,7 @@ arena_bin_slabs_nonfull_tryget(bin_t *bin) { } if (config_stats) { bin->stats.reslabs++; + bin->stats.nonfull_slabs--; } return slab; } diff --git a/src/ctl.c b/src/ctl.c index c113bf2..d258b8e 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -169,6 +169,7 @@ CTL_PROTO(stats_arenas_i_bins_j_nflushes) CTL_PROTO(stats_arenas_i_bins_j_nslabs) CTL_PROTO(stats_arenas_i_bins_j_nreslabs) CTL_PROTO(stats_arenas_i_bins_j_curslabs) +CTL_PROTO(stats_arenas_i_bins_j_nonfull_slabs) INDEX_PROTO(stats_arenas_i_bins_j) CTL_PROTO(stats_arenas_i_lextents_j_nmalloc) CTL_PROTO(stats_arenas_i_lextents_j_ndalloc) @@ -454,6 +455,7 @@ static const ctl_named_node_t stats_arenas_i_bins_j_node[] = { {NAME("nslabs"), CTL(stats_arenas_i_bins_j_nslabs)}, {NAME("nreslabs"), CTL(stats_arenas_i_bins_j_nreslabs)}, {NAME("curslabs"), CTL(stats_arenas_i_bins_j_curslabs)}, + {NAME("nonfull_slabs"), CTL(stats_arenas_i_bins_j_nonfull_slabs)}, {NAME("mutex"), CHILD(named, stats_arenas_i_bins_j_mutex)} }; @@ -907,8 +909,11 @@ MUTEX_PROF_ARENA_MUTEXES if (!destroyed) { sdstats->bstats[i].curslabs += astats->bstats[i].curslabs; + sdstats->bstats[i].nonfull_slabs += + astats->bstats[i].nonfull_slabs; } else { assert(astats->bstats[i].curslabs == 0); + assert(astats->bstats[i].nonfull_slabs == 0); } malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data, &astats->bstats[i].mutex_data); @@ -2966,6 +2971,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs, arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs, arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nonfull_slabs, + arenas_i(mib[2])->astats->bstats[mib[4]].nonfull_slabs, size_t) static const ctl_named_node_t * stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, diff --git a/src/stats.c b/src/stats.c index 2be9a7e..d196666 100644 --- a/src/stats.c +++ b/src/stats.c @@ -294,6 +294,7 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti COL_HDR(row, nshards, NULL, right, 9, unsigned) COL_HDR(row, curregs, NULL, right, 13, size) COL_HDR(row, curslabs, NULL, right, 13, size) + COL_HDR(row, nonfull_slabs, NULL, right, 15, size) COL_HDR(row, regs, NULL, right, 5, unsigned) COL_HDR(row, pgs, NULL, right, 4, size) /* To buffer a right- and left-justified column. */ @@ -337,6 +338,7 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti uint64_t nslabs; size_t reg_size, slab_size, curregs; size_t curslabs; + size_t nonfull_slabs; uint32_t nregs, nshards; uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; uint64_t nreslabs; @@ -372,6 +374,8 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti uint64_t); CTL_M2_M4_GET("stats.arenas.0.bins.0.curslabs", i, j, &curslabs, size_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nonfull_slabs", i, j, &nonfull_slabs, + size_t); if (mutex) { mutex_stats_read_arena_bin(i, j, col_mutex64, @@ -395,6 +399,8 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti &nreslabs); emitter_json_kv(emitter, "curslabs", emitter_type_size, &curslabs); + emitter_json_kv(emitter, "nonfull_slabs", emitter_type_size, + &nonfull_slabs); if (mutex) { emitter_json_object_kv_begin(emitter, "mutex"); mutex_stats_emit(emitter, NULL, col_mutex64, @@ -434,6 +440,7 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti col_nshards.unsigned_val = nshards; col_curregs.size_val = curregs; col_curslabs.size_val = curslabs; + col_nonfull_slabs.size_val = nonfull_slabs; col_regs.unsigned_val = nregs; col_pgs.size_val = slab_size / page; col_util.str_val = util; diff --git a/test/unit/stats.c b/test/unit/stats.c index 4323bfa..646768e 100644 --- a/test/unit/stats.c +++ b/test/unit/stats.c @@ -228,7 +228,7 @@ gen_mallctl_str(char *cmd, char *name, unsigned arena_ind) { TEST_BEGIN(test_stats_arenas_bins) { void *p; - size_t sz, curslabs, curregs; + size_t sz, curslabs, curregs, nonfull_slabs; uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes; uint64_t nslabs, nreslabs; int expected = config_stats ? 0 : ENOENT; @@ -289,6 +289,9 @@ TEST_BEGIN(test_stats_arenas_bins) { gen_mallctl_str(cmd, "curslabs", arena_ind); assert_d_eq(mallctl(cmd, (void *)&curslabs, &sz, NULL, 0), expected, "Unexpected mallctl() result"); + gen_mallctl_str(cmd, "nonfull_slabs", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&nonfull_slabs, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); if (config_stats) { assert_u64_gt(nmalloc, 0, @@ -309,6 +312,8 @@ TEST_BEGIN(test_stats_arenas_bins) { "At least one slab should have been allocated"); assert_zu_gt(curslabs, 0, "At least one slab should be currently allocated"); + assert_zu_eq(nonfull_slabs, 0, + "slabs_nonfull should be empty"); } dallocx(p, 0); -- cgit v0.12 From b62d126df894dac00772eb5f3d170a1c1d3d1614 Mon Sep 17 00:00:00 2001 From: Dave Watson Date: Mon, 8 Apr 2019 09:37:58 -0700 Subject: Add max_active_fit to first_fit The max_active_fit check is currently only on the best_fit path, add it to the first_fit path also. --- src/extent.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/extent.c b/src/extent.c index 66cbf05..c8d1dd5 100644 --- a/src/extent.c +++ b/src/extent.c @@ -483,7 +483,16 @@ extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, assert(!extent_heap_empty(&extents->heaps[i])); extent_t *extent = extent_heap_first(&extents->heaps[i]); assert(extent_size_get(extent) >= size); - if (ret == NULL || extent_snad_comp(extent, ret) < 0) { + bool size_ok = true; + /* + * In order to reduce fragmentation, avoid reusing and splitting + * large extents for much smaller sizes. + */ + if ((sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) { + size_ok = false; + } + if (size_ok && + (ret == NULL || extent_snad_comp(extent, ret) < 0)) { ret = extent; } if (i == SC_NPSIZES) { -- cgit v0.12 From 56797512083fe1457163170dfa44ee5ec12abe5f Mon Sep 17 00:00:00 2001 From: Dave Watson Date: Thu, 7 Mar 2019 11:14:31 -0800 Subject: Remove best fit This option saves a few CPU cycles, but potentially adds a lot of fragmentation - so much so that there are workarounds like max_active. Instead, let's just drop it entirely. It only made a difference in one service I tested (.3% cpu regression), while many services saw a memory win (also small, less than 1% mem P99) --- src/extent.c | 40 ++++++++-------------------------------- 1 file changed, 8 insertions(+), 32 deletions(-) diff --git a/src/extent.c b/src/extent.c index c8d1dd5..e83d9c8 100644 --- a/src/extent.c +++ b/src/extent.c @@ -441,30 +441,6 @@ extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size, return NULL; } -/* Do any-best-fit extent selection, i.e. select any extent that best fits. */ -static extent_t * -extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, - size_t size) { - pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size)); - pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, - (size_t)pind); - if (i < SC_NPSIZES + 1) { - /* - * In order to reduce fragmentation, avoid reusing and splitting - * large extents for much smaller sizes. - */ - if ((sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) { - return NULL; - } - assert(!extent_heap_empty(&extents->heaps[i])); - extent_t *extent = extent_heap_first(&extents->heaps[i]); - assert(extent_size_get(extent) >= size); - return extent; - } - - return NULL; -} - /* * Do first-fit extent selection, i.e. select the oldest/lowest extent that is * large enough. @@ -487,12 +463,15 @@ extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, /* * In order to reduce fragmentation, avoid reusing and splitting * large extents for much smaller sizes. + * + * Only do check for dirty extents (delay_coalesce). */ - if ((sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) { + if (extents->delay_coalesce && + (sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) { size_ok = false; } if (size_ok && - (ret == NULL || extent_snad_comp(extent, ret) < 0)) { + (ret == NULL || extent_snad_comp(extent, ret) < 0)) { ret = extent; } if (i == SC_NPSIZES) { @@ -505,10 +484,8 @@ extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, } /* - * Do {best,first}-fit extent selection, where the selection policy choice is - * based on extents->delay_coalesce. Best-fit selection requires less - * searching, but its layout policy is less stable and may cause higher virtual - * memory fragmentation as a side effect. + * Do first-fit extent selection, where the selection policy choice is + * based on extents->delay_coalesce. */ static extent_t * extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, @@ -521,8 +498,7 @@ extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, return NULL; } - extent_t *extent = extents->delay_coalesce ? - extents_best_fit_locked(tsdn, arena, extents, max_size) : + extent_t *extent = extents_first_fit_locked(tsdn, arena, extents, max_size); if (alignment > PAGE && extent == NULL) { -- cgit v0.12 From 259b15dec5bff8b67b331b63703aa8511c759077 Mon Sep 17 00:00:00 2001 From: Yinan Zhang Date: Thu, 2 May 2019 16:22:10 -0700 Subject: Improve macro readability in malloc_conf_init Define more readable macros than yes and no. --- src/jemalloc.c | 44 ++++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/src/jemalloc.c b/src/jemalloc.c index 818ce3a..04ebe51 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -1041,10 +1041,10 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS -#define CONF_MIN_no(um, min) false -#define CONF_MIN_yes(um, min) ((um) < (min)) -#define CONF_MAX_no(um, max) false -#define CONF_MAX_yes(um, max) ((um) > (max)) +#define CONF_DONT_CHECK_MIN(um, min) false +#define CONF_CHECK_MIN(um, min) ((um) < (min)) +#define CONF_DONT_CHECK_MAX(um, max) false +#define CONF_CHECK_MAX(um, max) ((um) > (max)) #define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ if (CONF_MATCH(n)) { \ uintmax_t um; \ @@ -1058,21 +1058,17 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { "Invalid conf value", \ k, klen, v, vlen); \ } else if (clip) { \ - if (CONF_MIN_##check_min(um, \ - (t)(min))) { \ + if (check_min(um, (t)(min))) { \ o = (t)(min); \ } else if ( \ - CONF_MAX_##check_max(um, \ - (t)(max))) { \ + check_max(um, (t)(max))) { \ o = (t)(max); \ } else { \ o = (t)um; \ } \ } else { \ - if (CONF_MIN_##check_min(um, \ - (t)(min)) || \ - CONF_MAX_##check_max(um, \ - (t)(max))) { \ + if (check_min(um, (t)(min)) || \ + check_max(um, (t)(max))) { \ malloc_conf_error( \ "Out-of-range " \ "conf value", \ @@ -1167,7 +1163,8 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { continue; } CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, - UINT_MAX, yes, no, false) + UINT_MAX, CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, + false) if (CONF_MATCH("bin_shards")) { const char *bin_shards_segment_cur = v; size_t vlen_left = vlen; @@ -1249,11 +1246,12 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { * contention on the huge arena. */ CONF_HANDLE_SIZE_T(opt_oversize_threshold, - "oversize_threshold", 0, SC_LARGE_MAXCLASS, no, yes, - false) + "oversize_threshold", 0, SC_LARGE_MAXCLASS, + CONF_DONT_CHECK_MIN, CONF_CHECK_MAX, false) CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit, "lg_extent_max_active_fit", 0, - (sizeof(size_t) << 3), no, yes, false) + (sizeof(size_t) << 3), CONF_DONT_CHECK_MIN, + CONF_CHECK_MAX, false) if (strncmp("percpu_arena", k, klen) == 0) { bool match = false; @@ -1281,7 +1279,8 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { "background_thread"); CONF_HANDLE_SIZE_T(opt_max_background_threads, "max_background_threads", 1, - opt_max_background_threads, yes, yes, + opt_max_background_threads, + CONF_CHECK_MIN, CONF_CHECK_MAX, true); if (CONF_MATCH("slab_sizes")) { bool err; @@ -1317,7 +1316,8 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { "prof_thread_active_init") CONF_HANDLE_SIZE_T(opt_lg_prof_sample, "lg_prof_sample", 0, (sizeof(uint64_t) << 3) - - 1, no, yes, true) + - 1, CONF_DONT_CHECK_MIN, CONF_CHECK_MAX, + true) CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum") CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, "lg_prof_interval", -1, @@ -1363,10 +1363,10 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { #undef CONF_MATCH #undef CONF_MATCH_VALUE #undef CONF_HANDLE_BOOL -#undef CONF_MIN_no -#undef CONF_MIN_yes -#undef CONF_MAX_no -#undef CONF_MAX_yes +#undef CONF_DONT_CHECK_MIN +#undef CONF_CHECK_MIN +#undef CONF_DONT_CHECK_MAX +#undef CONF_CHECK_MAX #undef CONF_HANDLE_T_U #undef CONF_HANDLE_UNSIGNED #undef CONF_HANDLE_SIZE_T -- cgit v0.12 From 13e88ae9700416b43bf88c596ea15c85bdb9f9e7 Mon Sep 17 00:00:00 2001 From: Yinan Zhang Date: Wed, 15 May 2019 07:50:10 -0700 Subject: Fix assert in free fastpath rtree_szind_slab_read_fast() may have not initialized alloc_ctx.szind, unless after confirming the return is true. --- src/jemalloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/jemalloc.c b/src/jemalloc.c index 04ebe51..ec6b400 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -2744,12 +2744,12 @@ bool free_fastpath(void *ptr, size_t size, bool size_hint) { bool res = rtree_szind_slab_read_fast(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, (uintptr_t)ptr, &alloc_ctx.szind, &alloc_ctx.slab); - assert(alloc_ctx.szind != SC_NSIZES); /* Note: profiled objects will have alloc_ctx.slab set */ if (!res || !alloc_ctx.slab) { return false; } + assert(alloc_ctx.szind != SC_NSIZES); } else { /* * Check for both sizes that are too large, and for sampled objects. -- cgit v0.12 From 07c44847c24634d0d11f9ceab7318400ffc1a16e Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Mon, 6 May 2019 16:36:55 -0700 Subject: Track nfills and nflushes for arenas.i.small / large. Small is added purely for convenience. Large flushes wasn't tracked before and can be useful in analysis. Large fill simply reports nmalloc, since there is no batch fill for large currently. --- doc/jemalloc.xml.in | 44 +++++++++++++++++++++++++++++++++ include/jemalloc/internal/arena_stats.h | 16 +++++++++--- include/jemalloc/internal/ctl.h | 2 ++ src/arena.c | 9 +++++++ src/ctl.c | 37 +++++++++++++++++++++++---- src/stats.c | 36 +++++++++++++++++++++++++-- src/tcache.c | 8 +++--- 7 files changed, 138 insertions(+), 14 deletions(-) diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in index 2bdbe97..04a4764 100644 --- a/doc/jemalloc.xml.in +++ b/doc/jemalloc.xml.in @@ -2798,6 +2798,28 @@ struct extent_hooks_s { all bin size classes. + + + stats.arenas.<i>.small.nfills + (uint64_t) + r- + [] + + Cumulative number of tcache fills by all small size + classes. + + + + + stats.arenas.<i>.small.nflushes + (uint64_t) + r- + [] + + Cumulative number of tcache flushes by all small size + classes. + + stats.arenas.<i>.large.allocated @@ -2848,6 +2870,28 @@ struct extent_hooks_s { all large size classes. + + + stats.arenas.<i>.large.nfills + (uint64_t) + r- + [] + + Cumulative number of tcache fills by all large size + classes. + + + + + stats.arenas.<i>.large.nflushes + (uint64_t) + r- + [] + + Cumulative number of tcache flushes by all large size + classes. + + stats.arenas.<i>.bins.<j>.nmalloc diff --git a/include/jemalloc/internal/arena_stats.h b/include/jemalloc/internal/arena_stats.h index ef1e25b..3ffe9c7 100644 --- a/include/jemalloc/internal/arena_stats.h +++ b/include/jemalloc/internal/arena_stats.h @@ -35,6 +35,13 @@ struct arena_stats_large_s { * periodically merges into this counter. */ arena_stats_u64_t nrequests; /* Partially derived. */ + /* + * Number of tcache fills / flushes for large (similarly, periodically + * merged). Note that there is no large tcache batch-fill currently + * (i.e. only fill 1 at a time); however flush may be batched. + */ + arena_stats_u64_t nfills; /* Partially derived. */ + arena_stats_u64_t nflushes; /* Partially derived. */ /* Current number of allocations of this size class. */ size_t curlextents; /* Derived. */ @@ -101,6 +108,8 @@ struct arena_stats_s { atomic_zu_t allocated_large; /* Derived. */ arena_stats_u64_t nmalloc_large; /* Derived. */ arena_stats_u64_t ndalloc_large; /* Derived. */ + arena_stats_u64_t nfills_large; /* Derived. */ + arena_stats_u64_t nflushes_large; /* Derived. */ arena_stats_u64_t nrequests_large; /* Derived. */ /* Number of bytes cached in tcache associated with this arena. */ @@ -240,11 +249,12 @@ arena_stats_accum_zu(atomic_zu_t *dst, size_t src) { } static inline void -arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats, +arena_stats_large_flush_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats, szind_t szind, uint64_t nrequests) { arena_stats_lock(tsdn, arena_stats); - arena_stats_add_u64(tsdn, arena_stats, &arena_stats->lstats[szind - - SC_NBINS].nrequests, nrequests); + arena_stats_large_t *lstats = &arena_stats->lstats[szind - SC_NBINS]; + arena_stats_add_u64(tsdn, arena_stats, &lstats->nrequests, nrequests); + arena_stats_add_u64(tsdn, arena_stats, &lstats->nflushes, 1); arena_stats_unlock(tsdn, arena_stats); } diff --git a/include/jemalloc/internal/ctl.h b/include/jemalloc/internal/ctl.h index 775fdec..1d1aacc 100644 --- a/include/jemalloc/internal/ctl.h +++ b/include/jemalloc/internal/ctl.h @@ -39,6 +39,8 @@ typedef struct ctl_arena_stats_s { uint64_t nmalloc_small; uint64_t ndalloc_small; uint64_t nrequests_small; + uint64_t nfills_small; + uint64_t nflushes_small; bin_stats_t bstats[SC_NBINS]; arena_stats_large_t lstats[SC_NSIZES - SC_NBINS]; diff --git a/src/arena.c b/src/arena.c index a0804f6..f9336fe 100644 --- a/src/arena.c +++ b/src/arena.c @@ -151,6 +151,15 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, arena_stats_accum_u64(&astats->nrequests_large, nmalloc + nrequests); + /* nfill == nmalloc for large currently. */ + arena_stats_accum_u64(&lstats[i].nfills, nmalloc); + arena_stats_accum_u64(&astats->nfills_large, nmalloc); + + uint64_t nflush = arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.lstats[i].nflushes); + arena_stats_accum_u64(&lstats[i].nflushes, nflush); + arena_stats_accum_u64(&astats->nflushes_large, nflush); + assert(nmalloc >= ndalloc); assert(nmalloc - ndalloc <= SIZE_T_MAX); size_t curlextents = (size_t)(nmalloc - ndalloc); diff --git a/src/ctl.c b/src/ctl.c index d258b8e..f0d51df 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -156,10 +156,14 @@ CTL_PROTO(stats_arenas_i_small_allocated) CTL_PROTO(stats_arenas_i_small_nmalloc) CTL_PROTO(stats_arenas_i_small_ndalloc) CTL_PROTO(stats_arenas_i_small_nrequests) +CTL_PROTO(stats_arenas_i_small_nfills) +CTL_PROTO(stats_arenas_i_small_nflushes) CTL_PROTO(stats_arenas_i_large_allocated) CTL_PROTO(stats_arenas_i_large_nmalloc) CTL_PROTO(stats_arenas_i_large_ndalloc) CTL_PROTO(stats_arenas_i_large_nrequests) +CTL_PROTO(stats_arenas_i_large_nfills) +CTL_PROTO(stats_arenas_i_large_nflushes) CTL_PROTO(stats_arenas_i_bins_j_nmalloc) CTL_PROTO(stats_arenas_i_bins_j_ndalloc) CTL_PROTO(stats_arenas_i_bins_j_nrequests) @@ -414,14 +418,18 @@ static const ctl_named_node_t stats_arenas_i_small_node[] = { {NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)} + {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)}, + {NAME("nfills"), CTL(stats_arenas_i_small_nfills)}, + {NAME("nflushes"), CTL(stats_arenas_i_small_nflushes)} }; static const ctl_named_node_t stats_arenas_i_large_node[] = { {NAME("allocated"), CTL(stats_arenas_i_large_allocated)}, {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)} + {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}, + {NAME("nfills"), CTL(stats_arenas_i_large_nfills)}, + {NAME("nflushes"), CTL(stats_arenas_i_large_nflushes)} }; #define MUTEX_PROF_DATA_NODE(prefix) \ @@ -754,6 +762,8 @@ ctl_arena_clear(ctl_arena_t *ctl_arena) { ctl_arena->astats->nmalloc_small = 0; ctl_arena->astats->ndalloc_small = 0; ctl_arena->astats->nrequests_small = 0; + ctl_arena->astats->nfills_small = 0; + ctl_arena->astats->nflushes_small = 0; memset(ctl_arena->astats->bstats, 0, SC_NBINS * sizeof(bin_stats_t)); memset(ctl_arena->astats->lstats, 0, (SC_NSIZES - SC_NBINS) * @@ -785,6 +795,10 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) { ctl_arena->astats->bstats[i].ndalloc; ctl_arena->astats->nrequests_small += ctl_arena->astats->bstats[i].nrequests; + ctl_arena->astats->nfills_small += + ctl_arena->astats->bstats[i].nfills; + ctl_arena->astats->nflushes_small += + ctl_arena->astats->bstats[i].nflushes; } } else { arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads, @@ -867,6 +881,8 @@ MUTEX_PROF_ARENA_MUTEXES sdstats->nmalloc_small += astats->nmalloc_small; sdstats->ndalloc_small += astats->ndalloc_small; sdstats->nrequests_small += astats->nrequests_small; + sdstats->nfills_small += astats->nfills_small; + sdstats->nflushes_small += astats->nflushes_small; if (!destroyed) { accum_atomic_zu(&sdstats->astats.allocated_large, @@ -2847,6 +2863,10 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc, arenas_i(mib[2])->astats->ndalloc_small, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests, arenas_i(mib[2])->astats->nrequests_small, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_small_nfills, + arenas_i(mib[2])->astats->nfills_small, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_small_nflushes, + arenas_i(mib[2])->astats->nflushes_small, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated, atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large, ATOMIC_RELAXED), size_t) @@ -2856,12 +2876,19 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc, CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc, ctl_arena_stats_read_u64( &arenas_i(mib[2])->astats->astats.ndalloc_large), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.nrequests_large), uint64_t) /* - * Note: "nmalloc" here instead of "nrequests" in the read. This is intentional. + * Note: "nmalloc_large" here instead of "nfills" in the read. This is + * intentional (large has no batch fill). */ -CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests, +CTL_RO_CGEN(config_stats, stats_arenas_i_large_nfills, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_large_nflushes, ctl_arena_stats_read_u64( - &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t) /* Intentional. */ + &arenas_i(mib[2])->astats->astats.nflushes_large), uint64_t) /* Lock profiling related APIs below. */ #define RO_MUTEX_CTL_GEN(n, l) \ diff --git a/src/stats.c b/src/stats.c index d196666..55a5999 100644 --- a/src/stats.c +++ b/src/stats.c @@ -668,9 +668,11 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large, uint64_t dirty_npurge, dirty_nmadvise, dirty_purged; uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged; size_t small_allocated; - uint64_t small_nmalloc, small_ndalloc, small_nrequests; + uint64_t small_nmalloc, small_ndalloc, small_nrequests, small_nfills, + small_nflushes; size_t large_allocated; - uint64_t large_nmalloc, large_ndalloc, large_nrequests; + uint64_t large_nmalloc, large_ndalloc, large_nrequests, large_nfills, + large_nflushes; size_t tcache_bytes; uint64_t uptime; @@ -828,11 +830,23 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large, COL(alloc_count_row, count_nrequests_ps, right, 10, title); col_count_nrequests_ps.str_val = "(#/sec)"; + COL(alloc_count_row, count_nfills, right, 16, title); + col_count_nfills.str_val = "nfill"; + COL(alloc_count_row, count_nfills_ps, right, 10, title); + col_count_nfills_ps.str_val = "(#/sec)"; + + COL(alloc_count_row, count_nflushes, right, 16, title); + col_count_nflushes.str_val = "nflush"; + COL(alloc_count_row, count_nflushes_ps, right, 10, title); + col_count_nflushes_ps.str_val = "(#/sec)"; + emitter_table_row(emitter, &alloc_count_row); col_count_nmalloc_ps.type = emitter_type_uint64; col_count_ndalloc_ps.type = emitter_type_uint64; col_count_nrequests_ps.type = emitter_type_uint64; + col_count_nfills_ps.type = emitter_type_uint64; + col_count_nflushes_ps.type = emitter_type_uint64; #define GET_AND_EMIT_ALLOC_STAT(small_or_large, name, valtype) \ CTL_M2_GET("stats.arenas.0." #small_or_large "." #name, i, \ @@ -855,6 +869,12 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large, GET_AND_EMIT_ALLOC_STAT(small, nrequests, uint64) col_count_nrequests_ps.uint64_val = rate_per_second(col_count_nrequests.uint64_val, uptime); + GET_AND_EMIT_ALLOC_STAT(small, nfills, uint64) + col_count_nfills_ps.uint64_val = + rate_per_second(col_count_nfills.uint64_val, uptime); + GET_AND_EMIT_ALLOC_STAT(small, nflushes, uint64) + col_count_nflushes_ps.uint64_val = + rate_per_second(col_count_nflushes.uint64_val, uptime); emitter_table_row(emitter, &alloc_count_row); emitter_json_object_end(emitter); /* Close "small". */ @@ -872,6 +892,12 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large, GET_AND_EMIT_ALLOC_STAT(large, nrequests, uint64) col_count_nrequests_ps.uint64_val = rate_per_second(col_count_nrequests.uint64_val, uptime); + GET_AND_EMIT_ALLOC_STAT(large, nfills, uint64) + col_count_nfills_ps.uint64_val = + rate_per_second(col_count_nfills.uint64_val, uptime); + GET_AND_EMIT_ALLOC_STAT(large, nflushes, uint64) + col_count_nflushes_ps.uint64_val = + rate_per_second(col_count_nflushes.uint64_val, uptime); emitter_table_row(emitter, &alloc_count_row); emitter_json_object_end(emitter); /* Close "large". */ @@ -884,12 +910,18 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large, col_count_nmalloc.uint64_val = small_nmalloc + large_nmalloc; col_count_ndalloc.uint64_val = small_ndalloc + large_ndalloc; col_count_nrequests.uint64_val = small_nrequests + large_nrequests; + col_count_nfills.uint64_val = small_nfills + large_nfills; + col_count_nflushes.uint64_val = small_nflushes + large_nflushes; col_count_nmalloc_ps.uint64_val = rate_per_second(col_count_nmalloc.uint64_val, uptime); col_count_ndalloc_ps.uint64_val = rate_per_second(col_count_ndalloc.uint64_val, uptime); col_count_nrequests_ps.uint64_val = rate_per_second(col_count_nrequests.uint64_val, uptime); + col_count_nfills_ps.uint64_val = + rate_per_second(col_count_nfills.uint64_val, uptime); + col_count_nflushes_ps.uint64_val = + rate_per_second(col_count_nflushes.uint64_val, uptime); emitter_table_row(emitter, &alloc_count_row); emitter_row_t mem_count_row; diff --git a/src/tcache.c b/src/tcache.c index 034c69a..50099a9 100644 --- a/src/tcache.c +++ b/src/tcache.c @@ -282,8 +282,8 @@ tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind, } if (config_stats) { merged_stats = true; - arena_stats_large_nrequests_add(tsd_tsdn(tsd), - &tcache_arena->stats, binind, + arena_stats_large_flush_nrequests_add( + tsd_tsdn(tsd), &tcache_arena->stats, binind, tbin->tstats.nrequests); tbin->tstats.nrequests = 0; } @@ -324,7 +324,7 @@ tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind, * The flush loop didn't happen to flush to this thread's * arena, so the stats didn't get merged. Manually do so now. */ - arena_stats_large_nrequests_add(tsd_tsdn(tsd), + arena_stats_large_flush_nrequests_add(tsd_tsdn(tsd), &tcache_arena->stats, binind, tbin->tstats.nrequests); tbin->tstats.nrequests = 0; } @@ -615,7 +615,7 @@ tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { for (; i < nhbins; i++) { cache_bin_t *tbin = tcache_large_bin_get(tcache, i); - arena_stats_large_nrequests_add(tsdn, &arena->stats, i, + arena_stats_large_flush_nrequests_add(tsdn, &arena->stats, i, tbin->tstats.nrequests); tbin->tstats.nrequests = 0; } -- cgit v0.12 From 2d6d099fed05b1509e81e54458516528bfbbf38d Mon Sep 17 00:00:00 2001 From: Vaibhav Jain Date: Tue, 21 May 2019 12:06:16 +0530 Subject: Fix GCC-9.1 warning with macro GET_ARG_NUMERIC MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit GCC-9.1 reports following error when trying to compile file src/malloc_io.c and with CFLAGS='-Werror' : src/malloc_io.c: In function ‘malloc_vsnprintf’: src/malloc_io.c:369:2: error: case label value exceeds maximum value for type [-Werror] 369 | case '?' | 0x80: \ | ^~~~ src/malloc_io.c:581:5: note: in expansion of macro ‘GET_ARG_NUMERIC’ 581 | GET_ARG_NUMERIC(val, 'p'); | ^~~~~~~~~~~~~~~ ... cc1: all warnings being treated as errors make: *** [Makefile:388: src/malloc_io.sym.o] Error 1 The warning is reported as by default the type 'char' is 'signed char' and or-ing 0x80 will turn the case label char negative which will be beyond the printable ascii range (0 - 127). The patch fixes this by explicitly casting the 'len' variable as unsigned char' inside the 'switch' statement so that value of expression " '?' | 0x80 " falls within the legal values of the variable 'len'. --- src/malloc_io.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/malloc_io.c b/src/malloc_io.c index 7bdc13f..dd88265 100644 --- a/src/malloc_io.c +++ b/src/malloc_io.c @@ -362,7 +362,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { } \ } while (0) #define GET_ARG_NUMERIC(val, len) do { \ - switch (len) { \ + switch ((unsigned char)len) { \ case '?': \ val = va_arg(ap, int); \ break; \ -- cgit v0.12 From 4c63b0e76a693b0cfdf209cb4f8fbd1ed74453b0 Mon Sep 17 00:00:00 2001 From: Yinan Zhang Date: Mon, 13 May 2019 14:59:33 -0700 Subject: Improve memory utilization tests Added tests for large size classes and expanded the tests to cover wider range of allocation sizes. --- src/ctl.c | 8 +- test/unit/extent_util.c | 313 ++++++++++++++++++++++++++++++------------------ 2 files changed, 200 insertions(+), 121 deletions(-) diff --git a/src/ctl.c b/src/ctl.c index f0d51df..176cb65 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -3143,15 +3143,15 @@ label_return: * (f) total number of regions in the bin the extent belongs to. * * Note that "(e)" and "(f)" are only available when stats are enabled; - * otherwise both are set zero. + * otherwise their values are undefined. * * This API is mainly intended for small class allocations, where extents are * used as slab. * * In case of large class allocations, "(a)" will be NULL, and "(e)" and "(f)" - * will be zero. The other three fields will be properly set though the values - * are trivial: "(b)" will be 0, "(c)" will be 1, and "(d)" will be the usable - * size. + * will be zero (if stats are enabled; otherwise undefined). The other three + * fields will be properly set though the values are trivial: "(b)" will be 0, + * "(c)" will be 1, and "(d)" will be the usable size. * * The input pointer and size are respectively passed in by newp and newlen, * and the output fields and size are respectively oldp and *oldlenp. diff --git a/test/unit/extent_util.c b/test/unit/extent_util.c index 6995325..97e55f0 100644 --- a/test/unit/extent_util.c +++ b/test/unit/extent_util.c @@ -25,18 +25,30 @@ #define TEST_UTIL_BATCH_VALID TEST_UTIL_VALID("batch_query") +#define TEST_MAX_SIZE (1 << 20) + TEST_BEGIN(test_query) { - void *p = mallocx(1, 0); - void **in = &p; - size_t in_sz = sizeof(const void *); - size_t out_sz = sizeof(void *) + sizeof(size_t) * 5; - void *out = mallocx(out_sz, 0); - void *out_ref = mallocx(out_sz, 0); - size_t out_sz_ref = out_sz; - - assert_ptr_not_null(p, "test pointer allocation failed"); - assert_ptr_not_null(out, "test output allocation failed"); - assert_ptr_not_null(out_ref, "test reference output allocation failed"); + size_t sz; + /* + * Select some sizes that can span both small and large sizes, and are + * numerically unrelated to any size boundaries. + */ + for (sz = 7; sz <= TEST_MAX_SIZE && sz <= SC_LARGE_MAXCLASS; + sz += (sz <= SC_SMALL_MAXCLASS ? 1009 : 99989)) { + void *p = mallocx(sz, 0); + void **in = &p; + size_t in_sz = sizeof(const void *); + size_t out_sz = sizeof(void *) + sizeof(size_t) * 5; + void *out = mallocx(out_sz, 0); + void *out_ref = mallocx(out_sz, 0); + size_t out_sz_ref = out_sz; + + assert_ptr_not_null(p, + "test pointer allocation failed"); + assert_ptr_not_null(out, + "test output allocation failed"); + assert_ptr_not_null(out_ref, + "test reference output allocation failed"); #define SLABCUR_READ(out) (*(void **)out) #define COUNTS(out) ((size_t *)((void **)out + 1)) @@ -46,55 +58,91 @@ TEST_BEGIN(test_query) { #define BIN_NFREE_READ(out) COUNTS(out)[3] #define BIN_NREGS_READ(out) COUNTS(out)[4] - SLABCUR_READ(out) = NULL; - NFREE_READ(out) = NREGS_READ(out) = SIZE_READ(out) = -1; - BIN_NFREE_READ(out) = BIN_NREGS_READ(out) = -1; - memcpy(out_ref, out, out_sz); - - /* Test invalid argument(s) errors */ - TEST_UTIL_QUERY_EINVAL(NULL, &out_sz, in, in_sz, "old is NULL"); - TEST_UTIL_QUERY_EINVAL(out, NULL, in, in_sz, "oldlenp is NULL"); - TEST_UTIL_QUERY_EINVAL(out, &out_sz, NULL, in_sz, "newp is NULL"); - TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, 0, "newlen is zero"); - in_sz -= 1; - TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, in_sz, "invalid newlen"); - in_sz += 1; - out_sz_ref = out_sz -= 2 * sizeof(size_t); - TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, in_sz, "invalid *oldlenp"); - out_sz_ref = out_sz += 2 * sizeof(size_t); - - /* Examine output for valid call */ - TEST_UTIL_VALID("query"); - assert_zu_le(NFREE_READ(out), NREGS_READ(out), - "Extent free count exceeded region count"); - assert_zu_le(NREGS_READ(out), SIZE_READ(out), - "Extent region count exceeded size"); - assert_zu_ne(NREGS_READ(out), 0, - "Extent region count must be positive"); - assert_zu_ne(SIZE_READ(out), 0, "Extent size must be positive"); - if (config_stats) { - assert_zu_le(BIN_NFREE_READ(out), BIN_NREGS_READ(out), - "Bin free count exceeded region count"); - assert_zu_ne(BIN_NREGS_READ(out), 0, - "Bin region count must be positive"); - assert_zu_le(NFREE_READ(out), BIN_NFREE_READ(out), - "Extent free count exceeded bin free count"); - assert_zu_le(NREGS_READ(out), BIN_NREGS_READ(out), - "Extent region count exceeded bin region count"); - assert_zu_eq(BIN_NREGS_READ(out) % NREGS_READ(out), 0, - "Bin region count isn't a multiple of extent region count"); - assert_zu_le(NREGS_READ(out) - NFREE_READ(out), - BIN_NREGS_READ(out) - BIN_NFREE_READ(out), - "Extent utilized count exceeded bin utilized count"); - } else { - assert_zu_eq(BIN_NFREE_READ(out), 0, - "Bin free count should be zero when stats are disabled"); - assert_zu_eq(BIN_NREGS_READ(out), 0, - "Bin region count should be zero when stats are disabled"); - } - assert_ptr_not_null(SLABCUR_READ(out), "Current slab is null"); - assert_true(NFREE_READ(out) == 0 || SLABCUR_READ(out) <= p, - "Allocation should follow first fit principle"); + SLABCUR_READ(out) = NULL; + NFREE_READ(out) = NREGS_READ(out) = SIZE_READ(out) = -1; + BIN_NFREE_READ(out) = BIN_NREGS_READ(out) = -1; + memcpy(out_ref, out, out_sz); + + /* Test invalid argument(s) errors */ + TEST_UTIL_QUERY_EINVAL(NULL, &out_sz, in, in_sz, + "old is NULL"); + TEST_UTIL_QUERY_EINVAL(out, NULL, in, in_sz, + "oldlenp is NULL"); + TEST_UTIL_QUERY_EINVAL(out, &out_sz, NULL, in_sz, + "newp is NULL"); + TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, 0, + "newlen is zero"); + in_sz -= 1; + TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, in_sz, + "invalid newlen"); + in_sz += 1; + out_sz_ref = out_sz -= 2 * sizeof(size_t); + TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, in_sz, + "invalid *oldlenp"); + out_sz_ref = out_sz += 2 * sizeof(size_t); + + /* Examine output for valid call */ + TEST_UTIL_VALID("query"); + assert_zu_le(sz, SIZE_READ(out), + "Extent size should be at least allocation size"); + assert_zu_eq(SIZE_READ(out) & (PAGE - 1), 0, + "Extent size should be a multiple of page size"); + if (sz <= SC_SMALL_MAXCLASS) { + assert_zu_le(NFREE_READ(out), NREGS_READ(out), + "Extent free count exceeded region count"); + assert_zu_le(NREGS_READ(out), SIZE_READ(out), + "Extent region count exceeded size"); + assert_zu_ne(NREGS_READ(out), 0, + "Extent region count must be positive"); + assert_ptr_not_null(SLABCUR_READ(out), + "Current slab is null"); + assert_true(NFREE_READ(out) == 0 + || SLABCUR_READ(out) <= p, + "Allocation should follow first fit principle"); + if (config_stats) { + assert_zu_le(BIN_NFREE_READ(out), + BIN_NREGS_READ(out), + "Bin free count exceeded region count"); + assert_zu_ne(BIN_NREGS_READ(out), 0, + "Bin region count must be positive"); + assert_zu_le(NFREE_READ(out), + BIN_NFREE_READ(out), + "Extent free count exceeded bin free count"); + assert_zu_le(NREGS_READ(out), + BIN_NREGS_READ(out), + "Extent region count exceeded " + "bin region count"); + assert_zu_eq(BIN_NREGS_READ(out) + % NREGS_READ(out), 0, + "Bin region count isn't a multiple of " + "extent region count"); + assert_zu_le( + BIN_NFREE_READ(out) - NFREE_READ(out), + BIN_NREGS_READ(out) - NREGS_READ(out), + "Free count in other extents in the bin " + "exceeded region count in other extents " + "in the bin"); + assert_zu_le(NREGS_READ(out) - NFREE_READ(out), + BIN_NREGS_READ(out) - BIN_NFREE_READ(out), + "Extent utilized count exceeded " + "bin utilized count"); + } + } else { + assert_zu_eq(NFREE_READ(out), 0, + "Extent free count should be zero"); + assert_zu_eq(NREGS_READ(out), 1, + "Extent region count should be one"); + assert_ptr_null(SLABCUR_READ(out), + "Current slab must be null for large size classes"); + if (config_stats) { + assert_zu_eq(BIN_NFREE_READ(out), 0, + "Bin free count must be zero for " + "large sizes"); + assert_zu_eq(BIN_NREGS_READ(out), 0, + "Bin region count must be zero for " + "large sizes"); + } + } #undef BIN_NREGS_READ #undef BIN_NFREE_READ @@ -104,42 +152,54 @@ TEST_BEGIN(test_query) { #undef COUNTS #undef SLABCUR_READ - free(out_ref); - free(out); - free(p); + free(out_ref); + free(out); + free(p); + } } TEST_END TEST_BEGIN(test_batch) { - void *p = mallocx(1, 0); - void *q = mallocx(1, 0); - void *in[] = {p, q}; - size_t in_sz = sizeof(const void *) * 2; - size_t out[] = {-1, -1, -1, -1, -1, -1}; - size_t out_sz = sizeof(size_t) * 6; - size_t out_ref[] = {-1, -1, -1, -1, -1, -1}; - size_t out_sz_ref = out_sz; - - assert_ptr_not_null(p, "test pointer allocation failed"); - assert_ptr_not_null(q, "test pointer allocation failed"); - - /* Test invalid argument(s) errors */ - TEST_UTIL_BATCH_EINVAL(NULL, &out_sz, in, in_sz, "old is NULL"); - TEST_UTIL_BATCH_EINVAL(out, NULL, in, in_sz, "oldlenp is NULL"); - TEST_UTIL_BATCH_EINVAL(out, &out_sz, NULL, in_sz, "newp is NULL"); - TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, 0, "newlen is zero"); - in_sz -= 1; - TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz, - "newlen is not an exact multiple"); - in_sz += 1; - out_sz_ref = out_sz -= 2 * sizeof(size_t); - TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz, - "*oldlenp is not an exact multiple"); - out_sz_ref = out_sz += 2 * sizeof(size_t); - in_sz -= sizeof(const void *); - TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz, - "*oldlenp and newlen do not match"); - in_sz += sizeof(const void *); + size_t sz; + /* + * Select some sizes that can span both small and large sizes, and are + * numerically unrelated to any size boundaries. + */ + for (sz = 17; sz <= TEST_MAX_SIZE && sz <= SC_LARGE_MAXCLASS; + sz += (sz <= SC_SMALL_MAXCLASS ? 1019 : 99991)) { + void *p = mallocx(sz, 0); + void *q = mallocx(sz, 0); + void *in[] = {p, q}; + size_t in_sz = sizeof(const void *) * 2; + size_t out[] = {-1, -1, -1, -1, -1, -1}; + size_t out_sz = sizeof(size_t) * 6; + size_t out_ref[] = {-1, -1, -1, -1, -1, -1}; + size_t out_sz_ref = out_sz; + + assert_ptr_not_null(p, "test pointer allocation failed"); + assert_ptr_not_null(q, "test pointer allocation failed"); + + /* Test invalid argument(s) errors */ + TEST_UTIL_BATCH_EINVAL(NULL, &out_sz, in, in_sz, + "old is NULL"); + TEST_UTIL_BATCH_EINVAL(out, NULL, in, in_sz, + "oldlenp is NULL"); + TEST_UTIL_BATCH_EINVAL(out, &out_sz, NULL, in_sz, + "newp is NULL"); + TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, 0, + "newlen is zero"); + in_sz -= 1; + TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz, + "newlen is not an exact multiple"); + in_sz += 1; + out_sz_ref = out_sz -= 2 * sizeof(size_t); + TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz, + "*oldlenp is not an exact multiple"); + out_sz_ref = out_sz += 2 * sizeof(size_t); + in_sz -= sizeof(const void *); + TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz, + "*oldlenp and newlen do not match"); + in_sz += sizeof(const void *); /* Examine output for valid calls */ #define TEST_EQUAL_REF(i, message) \ @@ -149,29 +209,45 @@ TEST_BEGIN(test_batch) { #define NREGS_READ(out, i) out[(i) * 3 + 1] #define SIZE_READ(out, i) out[(i) * 3 + 2] - out_sz_ref = out_sz /= 2; - in_sz /= 2; - TEST_UTIL_BATCH_VALID; - assert_zu_le(NFREE_READ(out, 0), NREGS_READ(out, 0), - "Extent free count exceeded region count"); - assert_zu_le(NREGS_READ(out, 0), SIZE_READ(out, 0), - "Extent region count exceeded size"); - assert_zu_ne(NREGS_READ(out, 0), 0, - "Extent region count must be positive"); - assert_zu_ne(SIZE_READ(out, 0), 0, "Extent size must be positive"); - TEST_EQUAL_REF(1, "Should not overwrite content beyond what's needed"); - in_sz *= 2; - out_sz_ref = out_sz *= 2; - - memcpy(out_ref, out, 3 * sizeof(size_t)); - TEST_UTIL_BATCH_VALID; - TEST_EQUAL_REF(0, "Statistics should be stable across calls"); - assert_zu_le(NFREE_READ(out, 1), NREGS_READ(out, 1), - "Extent free count exceeded region count"); - assert_zu_eq(NREGS_READ(out, 0), NREGS_READ(out, 1), - "Extent region count should be same for same region size"); - assert_zu_eq(SIZE_READ(out, 0), SIZE_READ(out, 1), - "Extent size should be same for same region size"); + out_sz_ref = out_sz /= 2; + in_sz /= 2; + TEST_UTIL_BATCH_VALID; + assert_zu_le(sz, SIZE_READ(out, 0), + "Extent size should be at least allocation size"); + assert_zu_eq(SIZE_READ(out, 0) & (PAGE - 1), 0, + "Extent size should be a multiple of page size"); + if (sz <= SC_SMALL_MAXCLASS) { + assert_zu_le(NFREE_READ(out, 0), NREGS_READ(out, 0), + "Extent free count exceeded region count"); + assert_zu_le(NREGS_READ(out, 0), SIZE_READ(out, 0), + "Extent region count exceeded size"); + assert_zu_ne(NREGS_READ(out, 0), 0, + "Extent region count must be positive"); + } else { + assert_zu_eq(NFREE_READ(out, 0), 0, + "Extent free count should be zero"); + assert_zu_eq(NREGS_READ(out, 0), 1, + "Extent region count should be one"); + } + TEST_EQUAL_REF(1, + "Should not overwrite content beyond what's needed"); + in_sz *= 2; + out_sz_ref = out_sz *= 2; + + memcpy(out_ref, out, 3 * sizeof(size_t)); + TEST_UTIL_BATCH_VALID; + TEST_EQUAL_REF(0, "Statistics should be stable across calls"); + if (sz <= SC_SMALL_MAXCLASS) { + assert_zu_le(NFREE_READ(out, 1), NREGS_READ(out, 1), + "Extent free count exceeded region count"); + } else { + assert_zu_eq(NFREE_READ(out, 0), 0, + "Extent free count should be zero"); + } + assert_zu_eq(NREGS_READ(out, 0), NREGS_READ(out, 1), + "Extent region count should be same for same region size"); + assert_zu_eq(SIZE_READ(out, 0), SIZE_READ(out, 1), + "Extent size should be same for same region size"); #undef SIZE_READ #undef NREGS_READ @@ -179,12 +255,15 @@ TEST_BEGIN(test_batch) { #undef TEST_EQUAL_REF - free(q); - free(p); + free(q); + free(p); + } } TEST_END int main(void) { + assert_zu_lt(SC_SMALL_MAXCLASS, TEST_MAX_SIZE, + "Test case cannot cover large classes"); return test(test_query, test_batch); } -- cgit v0.12 From c92ac306013bc95cd5f34de421b1aa5eb1f28971 Mon Sep 17 00:00:00 2001 From: Yinan Zhang Date: Tue, 30 Apr 2019 13:54:00 -0700 Subject: Add confirm_conf option If the confirm_conf option is set, when the program starts, each of the four malloc_conf strings will be printed, and each option will be printed when being set. --- doc/jemalloc.xml.in | 17 ++ .../jemalloc/internal/jemalloc_internal_externs.h | 1 + src/ctl.c | 3 + src/jemalloc.c | 280 +++++++++++++-------- src/stats.c | 1 + test/unit/mallctl.c | 1 + 6 files changed, 196 insertions(+), 107 deletions(-) diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in index 04a4764..194f1ef 100644 --- a/doc/jemalloc.xml.in +++ b/doc/jemalloc.xml.in @@ -904,6 +904,23 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay", + + + opt.confirm_conf + (bool) + r- + + Confirm-runtime-options-when-program-starts + enabled/disabled. If true, the string specified via + , the string pointed to by the + global variable malloc_conf, the name + of the file referenced by the symbolic link named + /etc/malloc.conf, and the value of + the environment variable MALLOC_CONF, will be printed in + order. Then, each option being set will be individually printed. This + option is disabled by default. + + opt.abort_conf diff --git a/include/jemalloc/internal/jemalloc_internal_externs.h b/include/jemalloc/internal/jemalloc_internal_externs.h index cdbc33a..d291170 100644 --- a/include/jemalloc/internal/jemalloc_internal_externs.h +++ b/include/jemalloc/internal/jemalloc_internal_externs.h @@ -10,6 +10,7 @@ extern bool malloc_slow; /* Run-time options. */ extern bool opt_abort; extern bool opt_abort_conf; +extern bool opt_confirm_conf; extern const char *opt_junk; extern bool opt_junk_alloc; extern bool opt_junk_free; diff --git a/src/ctl.c b/src/ctl.c index 176cb65..271881e 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -81,6 +81,7 @@ CTL_PROTO(config_utrace) CTL_PROTO(config_xmalloc) CTL_PROTO(opt_abort) CTL_PROTO(opt_abort_conf) +CTL_PROTO(opt_confirm_conf) CTL_PROTO(opt_metadata_thp) CTL_PROTO(opt_retain) CTL_PROTO(opt_dss) @@ -304,6 +305,7 @@ static const ctl_named_node_t config_node[] = { static const ctl_named_node_t opt_node[] = { {NAME("abort"), CTL(opt_abort)}, {NAME("abort_conf"), CTL(opt_abort_conf)}, + {NAME("confirm_conf"), CTL(opt_confirm_conf)}, {NAME("metadata_thp"), CTL(opt_metadata_thp)}, {NAME("retain"), CTL(opt_retain)}, {NAME("dss"), CTL(opt_dss)}, @@ -1741,6 +1743,7 @@ CTL_RO_CONFIG_GEN(config_xmalloc, bool) CTL_RO_NL_GEN(opt_abort, opt_abort, bool) CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool) +CTL_RO_NL_GEN(opt_confirm_conf, opt_confirm_conf, bool) CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp], const char *) CTL_RO_NL_GEN(opt_retain, opt_retain, bool) diff --git a/src/jemalloc.c b/src/jemalloc.c index ec6b400..1e99a59 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -43,6 +43,8 @@ bool opt_abort_conf = false #endif ; +/* Intentionally default off, even with debug builds. */ +bool opt_confirm_conf = false; const char *opt_junk = #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) "true" @@ -929,93 +931,140 @@ malloc_slow_flag_init(void) { malloc_slow = (malloc_slow_flags != 0); } -static void -malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { - unsigned i; - char buf[PATH_MAX + 1]; - const char *opts, *k, *v; - size_t klen, vlen; +/* Number of sources for initializing malloc_conf */ +#define MALLOC_CONF_NSOURCES 4 - for (i = 0; i < 4; i++) { - /* Get runtime configuration. */ - switch (i) { - case 0: - opts = config_malloc_conf; - break; - case 1: - if (je_malloc_conf != NULL) { - /* - * Use options that were compiled into the - * program. - */ - opts = je_malloc_conf; - } else { - /* No configuration specified. */ - buf[0] = '\0'; - opts = buf; - } - break; - case 2: { - ssize_t linklen = 0; +static const char * +obtain_malloc_conf(unsigned which_source, char buf[PATH_MAX + 1]) { + if (config_debug) { + static unsigned read_source = 0; + /* + * Each source should only be read once, to minimize # of + * syscalls on init. + */ + assert(read_source++ == which_source); + } + assert(which_source < MALLOC_CONF_NSOURCES); + + const char *ret; + switch (which_source) { + case 0: + ret = config_malloc_conf; + break; + case 1: + if (je_malloc_conf != NULL) { + /* Use options that were compiled into the program. */ + ret = je_malloc_conf; + } else { + /* No configuration specified. */ + ret = NULL; + } + break; + case 2: { + ssize_t linklen = 0; #ifndef _WIN32 - int saved_errno = errno; - const char *linkname = + int saved_errno = errno; + const char *linkname = # ifdef JEMALLOC_PREFIX - "/etc/"JEMALLOC_PREFIX"malloc.conf" + "/etc/"JEMALLOC_PREFIX"malloc.conf" # else - "/etc/malloc.conf" + "/etc/malloc.conf" # endif - ; + ; - /* - * Try to use the contents of the "/etc/malloc.conf" - * symbolic link's name. - */ + /* + * Try to use the contents of the "/etc/malloc.conf" symbolic + * link's name. + */ #ifndef JEMALLOC_READLINKAT - linklen = readlink(linkname, buf, sizeof(buf) - 1); + linklen = readlink(linkname, buf, PATH_MAX); #else - linklen = readlinkat(AT_FDCWD, linkname, buf, - sizeof(buf) - 1); + linklen = readlinkat(AT_FDCWD, linkname, buf, PATH_MAX); #endif - if (linklen == -1) { - /* No configuration specified. */ - linklen = 0; - /* Restore errno. */ - set_errno(saved_errno); - } + if (linklen == -1) { + /* No configuration specified. */ + linklen = 0; + /* Restore errno. */ + set_errno(saved_errno); + } #endif - buf[linklen] = '\0'; - opts = buf; - break; - } case 3: { - const char *envname = + buf[linklen] = '\0'; + ret = buf; + break; + } case 3: { + const char *envname = #ifdef JEMALLOC_PREFIX - JEMALLOC_CPREFIX"MALLOC_CONF" + JEMALLOC_CPREFIX"MALLOC_CONF" #else - "MALLOC_CONF" + "MALLOC_CONF" #endif - ; + ; - if ((opts = jemalloc_secure_getenv(envname)) != NULL) { - /* - * Do nothing; opts is already initialized to - * the value of the MALLOC_CONF environment - * variable. - */ - } else { - /* No configuration specified. */ - buf[0] = '\0'; - opts = buf; - } - break; - } default: - not_reached(); - buf[0] = '\0'; - opts = buf; + if ((ret = jemalloc_secure_getenv(envname)) != NULL) { + /* + * Do nothing; opts is already initialized to the value + * of the MALLOC_CONF environment variable. + */ + } else { + /* No configuration specified. */ + ret = NULL; + } + break; + } default: + not_reached(); + ret = NULL; + } + return ret; +} + +static void +malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], + bool initial_call, const char *opts_cache[MALLOC_CONF_NSOURCES], + char buf[PATH_MAX + 1]) { + static const char *opts_explain[MALLOC_CONF_NSOURCES] = { + "string specified via --with-malloc-conf", + "string pointed to by the global variable malloc_conf", + "\"name\" of the file referenced by the symbolic link named " + "/etc/malloc.conf", + "value of the environment variable MALLOC_CONF" + }; + unsigned i; + const char *opts, *k, *v; + size_t klen, vlen; + + for (i = 0; i < MALLOC_CONF_NSOURCES; i++) { + /* Get runtime configuration. */ + if (initial_call) { + opts_cache[i] = obtain_malloc_conf(i, buf); + } + opts = opts_cache[i]; + if (!initial_call && opt_confirm_conf) { + malloc_printf( + ": malloc_conf #%u (%s): \"%s\"\n", + i + 1, opts_explain[i], opts != NULL ? opts : ""); + } + if (opts == NULL) { + continue; } while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, &vlen)) { + +#define CONF_ERROR(msg, k, klen, v, vlen) \ + if (!initial_call) { \ + malloc_conf_error( \ + msg, k, klen, v, vlen); \ + cur_opt_valid = false; \ + } +#define CONF_CONTINUE { \ + if (!initial_call && opt_confirm_conf \ + && cur_opt_valid) { \ + malloc_printf(": Set "\ + "conf value: %.*s:%.*s\n", \ + (int)klen, k, (int)vlen, v);\ + } \ + continue; \ + } #define CONF_MATCH(n) \ (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) #define CONF_MATCH_VALUE(n) \ @@ -1027,11 +1076,10 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { } else if (CONF_MATCH_VALUE("false")) { \ o = false; \ } else { \ - malloc_conf_error( \ - "Invalid conf value", \ + CONF_ERROR("Invalid conf value",\ k, klen, v, vlen); \ } \ - continue; \ + CONF_CONTINUE; \ } /* * One of the CONF_MIN macros below expands, in one of the use points, @@ -1054,8 +1102,7 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { um = malloc_strtoumax(v, &end, 0); \ if (get_errno() != 0 || (uintptr_t)end -\ (uintptr_t)v != vlen) { \ - malloc_conf_error( \ - "Invalid conf value", \ + CONF_ERROR("Invalid conf value",\ k, klen, v, vlen); \ } else if (clip) { \ if (check_min(um, (t)(min))) { \ @@ -1069,7 +1116,7 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { } else { \ if (check_min(um, (t)(min)) || \ check_max(um, (t)(max))) { \ - malloc_conf_error( \ + CONF_ERROR( \ "Out-of-range " \ "conf value", \ k, klen, v, vlen); \ @@ -1077,7 +1124,7 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { o = (t)um; \ } \ } \ - continue; \ + CONF_CONTINUE; \ } #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \ clip) \ @@ -1095,18 +1142,17 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { l = strtol(v, &end, 0); \ if (get_errno() != 0 || (uintptr_t)end -\ (uintptr_t)v != vlen) { \ - malloc_conf_error( \ - "Invalid conf value", \ + CONF_ERROR("Invalid conf value",\ k, klen, v, vlen); \ } else if (l < (ssize_t)(min) || l > \ (ssize_t)(max)) { \ - malloc_conf_error( \ + CONF_ERROR( \ "Out-of-range conf value", \ k, klen, v, vlen); \ } else { \ o = l; \ } \ - continue; \ + CONF_CONTINUE; \ } #define CONF_HANDLE_CHAR_P(o, n, d) \ if (CONF_MATCH(n)) { \ @@ -1115,7 +1161,14 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { sizeof(o)-1; \ strncpy(o, v, cpylen); \ o[cpylen] = '\0'; \ - continue; \ + CONF_CONTINUE; \ + } + + bool cur_opt_valid = true; + + CONF_HANDLE_BOOL(opt_confirm_conf, "confirm_conf") + if (initial_call) { + continue; } CONF_HANDLE_BOOL(opt_abort, "abort") @@ -1132,10 +1185,10 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { } } if (!match) { - malloc_conf_error("Invalid conf value", + CONF_ERROR("Invalid conf value", k, klen, v, vlen); } - continue; + CONF_CONTINUE; } CONF_HANDLE_BOOL(opt_retain, "retain") if (strncmp("dss", k, klen) == 0) { @@ -1145,7 +1198,7 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { if (strncmp(dss_prec_names[i], v, vlen) == 0) { if (extent_dss_prec_set(i)) { - malloc_conf_error( + CONF_ERROR( "Error setting dss", k, klen, v, vlen); } else { @@ -1157,10 +1210,10 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { } } if (!match) { - malloc_conf_error("Invalid conf value", + CONF_ERROR("Invalid conf value", k, klen, v, vlen); } - continue; + CONF_CONTINUE; } CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, UINT_MAX, CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, @@ -1178,14 +1231,14 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { if (err || bin_update_shard_size( bin_shard_sizes, size_start, size_end, nshards)) { - malloc_conf_error( + CONF_ERROR( "Invalid settings for " "bin_shards", k, klen, v, vlen); break; } } while (vlen_left > 0); - continue; + CONF_CONTINUE; } CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms, "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < @@ -1198,7 +1251,7 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { CONF_HANDLE_BOOL(opt_stats_print, "stats_print") if (CONF_MATCH("stats_print_opts")) { init_opt_stats_print_opts(v, vlen); - continue; + CONF_CONTINUE; } if (config_fill) { if (CONF_MATCH("junk")) { @@ -1219,11 +1272,11 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { opt_junk_alloc = false; opt_junk_free = true; } else { - malloc_conf_error( - "Invalid conf value", k, - klen, v, vlen); + CONF_ERROR( + "Invalid conf value", + k, klen, v, vlen); } - continue; + CONF_CONTINUE; } CONF_HANDLE_BOOL(opt_zero, "zero") } @@ -1260,7 +1313,7 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { if (strncmp(percpu_arena_mode_names[i], v, vlen) == 0) { if (!have_percpu_arena) { - malloc_conf_error( + CONF_ERROR( "No getcpu support", k, klen, v, vlen); } @@ -1270,10 +1323,10 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { } } if (!match) { - malloc_conf_error("Invalid conf value", + CONF_ERROR("Invalid conf value", k, klen, v, vlen); } - continue; + CONF_CONTINUE; } CONF_HANDLE_BOOL(opt_background_thread, "background_thread"); @@ -1299,13 +1352,12 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { sc_data, slab_start, slab_end, (int)pgs); } else { - malloc_conf_error( - "Invalid settings for " - "slab_sizes", k, klen, v, - vlen); + CONF_ERROR("Invalid settings " + "for slab_sizes", + k, klen, v, vlen); } } while (!err && vlen_left > 0); - continue; + CONF_CONTINUE; } if (config_prof) { CONF_HANDLE_BOOL(opt_prof, "prof") @@ -1334,7 +1386,7 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { vlen : sizeof(log_var_names) - 1); strncpy(log_var_names, v, cpylen); log_var_names[cpylen] = '\0'; - continue; + CONF_CONTINUE; } } if (CONF_MATCH("thp")) { @@ -1343,7 +1395,7 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { if (strncmp(thp_mode_names[i],v, vlen) == 0) { if (!have_madvise_huge) { - malloc_conf_error( + CONF_ERROR( "No THP support", k, klen, v, vlen); } @@ -1353,13 +1405,14 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { } } if (!match) { - malloc_conf_error("Invalid conf value", + CONF_ERROR("Invalid conf value", k, klen, v, vlen); } - continue; + CONF_CONTINUE; } - malloc_conf_error("Invalid conf pair", k, klen, v, - vlen); + CONF_ERROR("Invalid conf pair", k, klen, v, vlen); +#undef CONF_ERROR +#undef CONF_CONTINUE #undef CONF_MATCH #undef CONF_MATCH_VALUE #undef CONF_HANDLE_BOOL @@ -1382,6 +1435,19 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { atomic_store_b(&log_init_done, true, ATOMIC_RELEASE); } +static void +malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { + const char *opts_cache[MALLOC_CONF_NSOURCES] = {NULL, NULL, NULL, NULL}; + char buf[PATH_MAX + 1]; + + /* The first call only set the confirm_conf option and opts_cache */ + malloc_conf_init_helper(NULL, NULL, true, opts_cache, buf); + malloc_conf_init_helper(sc_data, bin_shard_sizes, false, opts_cache, + NULL); +} + +#undef MALLOC_CONF_NSOURCES + static bool malloc_init_hard_needed(void) { if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == diff --git a/src/stats.c b/src/stats.c index 55a5999..bce9f45 100644 --- a/src/stats.c +++ b/src/stats.c @@ -1065,6 +1065,7 @@ stats_general_print(emitter_t *emitter) { OPT_WRITE_BOOL("abort") OPT_WRITE_BOOL("abort_conf") + OPT_WRITE_BOOL("confirm_conf") OPT_WRITE_BOOL("retain") OPT_WRITE_CHAR_P("dss") OPT_WRITE_UNSIGNED("narenas") diff --git a/test/unit/mallctl.c b/test/unit/mallctl.c index 498f9e0..3a75ac0 100644 --- a/test/unit/mallctl.c +++ b/test/unit/mallctl.c @@ -159,6 +159,7 @@ TEST_BEGIN(test_mallctl_opt) { TEST_MALLCTL_OPT(bool, abort, always); TEST_MALLCTL_OPT(bool, abort_conf, always); + TEST_MALLCTL_OPT(bool, confirm_conf, always); TEST_MALLCTL_OPT(const char *, metadata_thp, always); TEST_MALLCTL_OPT(bool, retain, always); TEST_MALLCTL_OPT(const char *, dss, always); -- cgit v0.12 From e13cf65a5f37bbd9b44badb198ccc138cbacc219 Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Mon, 20 May 2019 13:41:43 -0700 Subject: Add experimental.arenas.i.pactivep. The new experimental mallctl exposes the arena pactive counter to applications, which allows fast read w/o going through the mallctl / epoch steps. This is particularly useful when frequent balancing is required, e.g. when having multiple manual arenas, and threads are multiplexed to them based on usage. --- src/ctl.c | 89 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 82 insertions(+), 7 deletions(-) diff --git a/src/ctl.c b/src/ctl.c index 271881e..1d83087 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -225,6 +225,8 @@ CTL_PROTO(experimental_hooks_install) CTL_PROTO(experimental_hooks_remove) CTL_PROTO(experimental_utilization_query) CTL_PROTO(experimental_utilization_batch_query) +CTL_PROTO(experimental_arenas_i_pactivep) +INDEX_PROTO(experimental_arenas_i) #define MUTEX_STATS_CTL_PROTO_GEN(n) \ CTL_PROTO(stats_##n##_num_ops) \ @@ -588,19 +590,31 @@ static const ctl_named_node_t stats_node[] = { {NAME("arenas"), CHILD(indexed, stats_arenas)} }; -static const ctl_named_node_t hooks_node[] = { +static const ctl_named_node_t experimental_hooks_node[] = { {NAME("install"), CTL(experimental_hooks_install)}, {NAME("remove"), CTL(experimental_hooks_remove)} }; -static const ctl_named_node_t utilization_node[] = { +static const ctl_named_node_t experimental_utilization_node[] = { {NAME("query"), CTL(experimental_utilization_query)}, {NAME("batch_query"), CTL(experimental_utilization_batch_query)} }; +static const ctl_named_node_t experimental_arenas_i_node[] = { + {NAME("pactivep"), CTL(experimental_arenas_i_pactivep)} +}; +static const ctl_named_node_t super_experimental_arenas_i_node[] = { + {NAME(""), CHILD(named, experimental_arenas_i)} +}; + +static const ctl_indexed_node_t experimental_arenas_node[] = { + {INDEX(experimental_arenas_i)} +}; + static const ctl_named_node_t experimental_node[] = { - {NAME("hooks"), CHILD(named, hooks)}, - {NAME("utilization"), CHILD(named, utilization)} + {NAME("hooks"), CHILD(named, experimental_hooks)}, + {NAME("utilization"), CHILD(named, experimental_utilization)}, + {NAME("arenas"), CHILD(indexed, experimental_arenas)} }; static const ctl_named_node_t root_node[] = { @@ -3068,15 +3082,23 @@ stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib, return super_stats_arenas_i_extents_j_node; } +static bool +ctl_arenas_i_verify(size_t i) { + size_t a = arenas_i2a_impl(i, true, true); + if (a == UINT_MAX || !ctl_arenas->arenas[a]->initialized) { + return true; + } + + return false; +} + static const ctl_named_node_t * stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { const ctl_named_node_t *ret; - size_t a; malloc_mutex_lock(tsdn, &ctl_mtx); - a = arenas_i2a_impl(i, true, true); - if (a == UINT_MAX || !ctl_arenas->arenas[a]->initialized) { + if (ctl_arenas_i_verify(i)) { ret = NULL; goto label_return; } @@ -3351,3 +3373,56 @@ experimental_utilization_batch_query_ctl(tsd_t *tsd, const size_t *mib, label_return: return ret; } + +static const ctl_named_node_t * +experimental_arenas_i_index(tsdn_t *tsdn, const size_t *mib, + size_t miblen, size_t i) { + const ctl_named_node_t *ret; + + malloc_mutex_lock(tsdn, &ctl_mtx); + if (ctl_arenas_i_verify(i)) { + ret = NULL; + goto label_return; + } + ret = super_experimental_arenas_i_node; +label_return: + malloc_mutex_unlock(tsdn, &ctl_mtx); + return ret; +} + +static int +experimental_arenas_i_pactivep_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + if (!config_stats) { + return ENOENT; + } + if (oldp == NULL || oldlenp == NULL || *oldlenp != sizeof(size_t *)) { + return EINVAL; + } + + unsigned arena_ind; + arena_t *arena; + int ret; + size_t *pactivep; + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + READONLY(); + MIB_UNSIGNED(arena_ind, 2); + if (arena_ind < narenas_total_get() && (arena = + arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) { +#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) || \ + defined(JEMALLOC_GCC_SYNC_ATOMICS) || defined(_MSC_VER) + /* Expose the underlying counter for fast read. */ + pactivep = (size_t *)&(arena->nactive.repr); + READ(pactivep, size_t *); + ret = 0; +#else + ret = EFAULT; +#endif + } else { + ret = EFAULT; + } +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; +} -- cgit v0.12 From 1a71533511027dbe3f9d989659efeec446915d6b Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Wed, 22 May 2019 10:21:53 -0700 Subject: Avoid blocking on background thread lock for stats. Background threads may run for a long time, especially when the # of dirty pages is high. Avoid blocking stats calls because of this (which may cause latency spikes). --- src/background_thread.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/background_thread.c b/src/background_thread.c index 5ed6c1c..57b9b25 100644 --- a/src/background_thread.c +++ b/src/background_thread.c @@ -799,7 +799,13 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) { nstime_init(&stats->run_interval, 0); for (unsigned i = 0; i < max_background_threads; i++) { background_thread_info_t *info = &background_thread_info[i]; - malloc_mutex_lock(tsdn, &info->mtx); + if (malloc_mutex_trylock(tsdn, &info->mtx)) { + /* + * Each background thread run may take a long time; + * avoid waiting on the stats if the thread is active. + */ + continue; + } if (info->state != background_thread_stopped) { num_runs += info->tot_n_runs; nstime_add(&stats->run_interval, &info->tot_sleep_time); -- cgit v0.12 From 40a3435b8dc225ad61329aca89d9c8d0dfbc03ab Mon Sep 17 00:00:00 2001 From: frederik-h Date: Fri, 24 May 2019 11:36:21 +0200 Subject: Add missing safety_check.c to MSBuild projects The file is included in the list of source files in Makefile.in, but it is missing from the project files. This causes the build to fail due to unresolved symbols. --- msvc/projects/vc2015/jemalloc/jemalloc.vcxproj | 3 ++- msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters | 5 ++++- msvc/projects/vc2017/jemalloc/jemalloc.vcxproj | 3 ++- msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters | 5 ++++- 4 files changed, 12 insertions(+), 4 deletions(-) diff --git a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj index ddc6781..228e8be 100644 --- a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj +++ b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj @@ -66,6 +66,7 @@ + {8D6BB292-9E1C-413D-9F98-4864BDC1514A} @@ -346,4 +347,4 @@ - \ No newline at end of file + diff --git a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters index 1dcf4ed..d839515 100644 --- a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters +++ b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters @@ -100,5 +100,8 @@ Source Files + + Source Files + - \ No newline at end of file + diff --git a/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj b/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj index 21481d5..edcceed 100644 --- a/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj +++ b/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj @@ -67,6 +67,7 @@ + {8D6BB292-9E1C-413D-9F98-4864BDC1514A} @@ -346,4 +347,4 @@ - \ No newline at end of file + diff --git a/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters b/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters index 466dc63..6df7260 100644 --- a/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters +++ b/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters @@ -103,5 +103,8 @@ Source Files + + Source Files + - \ No newline at end of file + -- cgit v0.12 From 7720b6e3851d200449914448c7163f7af92cd63f Mon Sep 17 00:00:00 2001 From: Yinan Zhang Date: Wed, 3 Jul 2019 16:48:47 -0700 Subject: Fix redzone setting and checking --- include/jemalloc/internal/safety_check.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/jemalloc/internal/safety_check.h b/include/jemalloc/internal/safety_check.h index 1b53fc4..53339ac 100644 --- a/include/jemalloc/internal/safety_check.h +++ b/include/jemalloc/internal/safety_check.h @@ -9,7 +9,7 @@ JEMALLOC_ALWAYS_INLINE void safety_check_set_redzone(void *ptr, size_t usize, size_t bumped_usize) { assert(usize < bumped_usize); for (size_t i = usize; i < bumped_usize && i < usize + 32; ++i) { - *((unsigned char *)ptr + usize) = 0xBC; + *((unsigned char *)ptr + i) = 0xBC; } } @@ -17,7 +17,7 @@ JEMALLOC_ALWAYS_INLINE void safety_check_verify_redzone(const void *ptr, size_t usize, size_t bumped_usize) { for (size_t i = usize; i < bumped_usize && i < usize + 32; ++i) { - if (unlikely(*((unsigned char *)ptr + usize) != 0xBC)) { + if (unlikely(*((unsigned char *)ptr + i) != 0xBC)) { safety_check_fail("Use after free error\n"); } } -- cgit v0.12 From 34e75630cc512423b4f227338056a2f5d7e81740 Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Sat, 13 Jul 2019 20:27:15 -0700 Subject: Reorder the configs for AppVeyor. Enable-debug and 64-bit runs tend to be more relevant. Run them first. --- .appveyor.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.appveyor.yml b/.appveyor.yml index 9a7d00a..90b0368 100644 --- a/.appveyor.yml +++ b/.appveyor.yml @@ -5,27 +5,27 @@ environment: - MSYSTEM: MINGW64 CPU: x86_64 MSVC: amd64 + CONFIG_FLAGS: --enable-debug + - MSYSTEM: MINGW64 + CPU: x86_64 + CONFIG_FLAGS: --enable-debug - MSYSTEM: MINGW32 CPU: i686 MSVC: x86 - - MSYSTEM: MINGW64 - CPU: x86_64 + CONFIG_FLAGS: --enable-debug - MSYSTEM: MINGW32 CPU: i686 + CONFIG_FLAGS: --enable-debug - MSYSTEM: MINGW64 CPU: x86_64 MSVC: amd64 - CONFIG_FLAGS: --enable-debug + - MSYSTEM: MINGW64 + CPU: x86_64 - MSYSTEM: MINGW32 CPU: i686 MSVC: x86 - CONFIG_FLAGS: --enable-debug - - MSYSTEM: MINGW64 - CPU: x86_64 - CONFIG_FLAGS: --enable-debug - MSYSTEM: MINGW32 CPU: i686 - CONFIG_FLAGS: --enable-debug install: - set PATH=c:\msys64\%MSYSTEM%\bin;c:\msys64\usr\bin;%PATH% -- cgit v0.12 From d26636d566167a439ea18da7a234f9040668023b Mon Sep 17 00:00:00 2001 From: Yinan Zhang Date: Tue, 4 Jun 2019 11:13:00 -0700 Subject: Fix logic in printing `cbopaque` can now be overriden without overriding `write_cb` in the first place. (Otherwise there would be no need to have the `cbopaque` parameter in `malloc_message`.) --- doc/jemalloc.xml.in | 2 +- include/jemalloc/internal/malloc_io.h | 2 +- src/malloc_io.c | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in index 194f1ef..e23ccb7 100644 --- a/doc/jemalloc.xml.in +++ b/doc/jemalloc.xml.in @@ -424,7 +424,7 @@ for (i = 0; i < nbins; i++) { called repeatedly. General information that never changes during execution can be omitted by specifying g as a character within the opts string. Note that - malloc_message() uses the + malloc_stats_print() uses the mallctl*() functions internally, so inconsistent statistics can be reported if multiple threads use these functions simultaneously. If is specified during diff --git a/include/jemalloc/internal/malloc_io.h b/include/jemalloc/internal/malloc_io.h index bfe556b..1d1a414 100644 --- a/include/jemalloc/internal/malloc_io.h +++ b/include/jemalloc/internal/malloc_io.h @@ -54,7 +54,7 @@ size_t malloc_vsnprintf(char *str, size_t size, const char *format, size_t malloc_snprintf(char *str, size_t size, const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4); /* - * The caller can set write_cb and cbopaque to null to choose to print with the + * The caller can set write_cb to null to choose to print with the * je_malloc_message hook. */ void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, diff --git a/src/malloc_io.c b/src/malloc_io.c index dd88265..d7cb0f5 100644 --- a/src/malloc_io.c +++ b/src/malloc_io.c @@ -632,7 +632,6 @@ malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, */ write_cb = (je_malloc_message != NULL) ? je_malloc_message : wrtmessage; - cbopaque = NULL; } malloc_vsnprintf(buf, sizeof(buf), format, ap); -- cgit v0.12 From e0a0c8d4bf512283e8c85fb4a51761fce5e0c08f Mon Sep 17 00:00:00 2001 From: Yinan Zhang Date: Tue, 4 Jun 2019 16:34:29 -0700 Subject: Fix a bug in prof_dump_write The original logic can be disastrous if `PROF_DUMP_BUFSIZE` is less than `slen` -- `prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE` would always be `false`, so `memcpy` would always try to copy `PROF_DUMP_BUFSIZE - prof_dump_buf_end` chars, which can be dangerous: in the last round of the `while` loop it would not only illegally read the memory beyond `s` (which might not always be disastrous), but it would also illegally overwrite the memory beyond `prof_dump_buf` (which can be pretty disastrous). `slen` probably has never gone beyond `PROF_DUMP_BUFSIZE` so we were just lucky. --- src/prof.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/prof.c b/src/prof.c index a4e30f4..4ebe279 100644 --- a/src/prof.c +++ b/src/prof.c @@ -1292,7 +1292,7 @@ prof_dump_write(bool propagate_err, const char *s) { } } - if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) { + if (prof_dump_buf_end + slen - i <= PROF_DUMP_BUFSIZE) { /* Finish writing. */ n = slen - i; } else { -- cgit v0.12 From a2a693e722d3ec0f0fb7dfcac54e775b1837efda Mon Sep 17 00:00:00 2001 From: Yinan Zhang Date: Wed, 5 Jun 2019 15:26:08 -0700 Subject: Remove prof_accumbytes in arena `prof_accumbytes` was supposed to be replaced by `prof_accum` in https://github.com/jemalloc/jemalloc/pull/623. --- include/jemalloc/internal/arena_structs_b.h | 1 - 1 file changed, 1 deletion(-) diff --git a/include/jemalloc/internal/arena_structs_b.h b/include/jemalloc/internal/arena_structs_b.h index 950bd13..eeab57f 100644 --- a/include/jemalloc/internal/arena_structs_b.h +++ b/include/jemalloc/internal/arena_structs_b.h @@ -116,7 +116,6 @@ struct arena_s { /* Synchronization: internal. */ prof_accum_t prof_accum; - uint64_t prof_accumbytes; /* * PRNG state for cache index randomization of large allocation base -- cgit v0.12 From f32f23d6cc3ac9e663983ae62371acd47405c886 Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Tue, 16 Jul 2019 14:35:53 -0700 Subject: Fix posix_memalign with input size 0. Return a valid pointer instead of failed assertion. --- src/jemalloc.c | 22 +++++++++++++++++----- test/integration/posix_memalign.c | 5 +++-- 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/src/jemalloc.c b/src/jemalloc.c index 1e99a59..b6c8d99 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -1817,6 +1817,11 @@ struct static_opts_s { bool may_overflow; /* + * Whether or not allocations (with alignment) of size 0 should be + * treated as size 1. + */ + bool bump_empty_aligned_alloc; + /* * Whether to assert that allocations are not of size 0 (after any * bumping). */ @@ -1857,6 +1862,7 @@ struct static_opts_s { JEMALLOC_ALWAYS_INLINE void static_opts_init(static_opts_t *static_opts) { static_opts->may_overflow = false; + static_opts->bump_empty_aligned_alloc = false; static_opts->assert_nonempty_alloc = false; static_opts->null_out_result_on_error = false; static_opts->set_errno_on_error = false; @@ -2044,11 +2050,6 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { goto label_oom; } - /* Validate the user input. */ - if (sopts->assert_nonempty_alloc) { - assert (size != 0); - } - if (unlikely(dopts->alignment < sopts->min_alignment || (dopts->alignment & (dopts->alignment - 1)) != 0)) { goto label_invalid_alignment; @@ -2068,6 +2069,11 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { <= SC_LARGE_MAXCLASS); } } else { + if (sopts->bump_empty_aligned_alloc) { + if (unlikely(size == 0)) { + size = 1; + } + } usize = sz_sa2u(size, dopts->alignment); dopts->usize = usize; if (unlikely(usize == 0 @@ -2075,6 +2081,10 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { goto label_oom; } } + /* Validate the user input. */ + if (sopts->assert_nonempty_alloc) { + assert (size != 0); + } check_entry_exit_locking(tsd_tsdn(tsd)); @@ -2390,6 +2400,7 @@ je_posix_memalign(void **memptr, size_t alignment, size_t size) { static_opts_init(&sopts); dynamic_opts_init(&dopts); + sopts.bump_empty_aligned_alloc = true; sopts.min_alignment = sizeof(void *); sopts.oom_string = ": Error allocating aligned memory: out of memory\n"; @@ -2430,6 +2441,7 @@ je_aligned_alloc(size_t alignment, size_t size) { static_opts_init(&sopts); dynamic_opts_init(&dopts); + sopts.bump_empty_aligned_alloc = true; sopts.null_out_result_on_error = true; sopts.set_errno_on_error = true; sopts.min_alignment = 1; diff --git a/test/integration/posix_memalign.c b/test/integration/posix_memalign.c index 2c2726d..d992260 100644 --- a/test/integration/posix_memalign.c +++ b/test/integration/posix_memalign.c @@ -85,9 +85,10 @@ TEST_BEGIN(test_alignment_and_size) { alignment <= MAXALIGN; alignment <<= 1) { total = 0; - for (size = 1; + for (size = 0; size < 3 * alignment && size < (1U << 31); - size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { + size += ((size == 0) ? 1 : + (alignment >> (LG_SIZEOF_PTR-1)) - 1)) { for (i = 0; i < NITER; i++) { err = posix_memalign(&ps[i], alignment, size); -- cgit v0.12 From 9a86c65abc2cf242efe9354c9ce16901673eeb0c Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Fri, 12 Jul 2019 16:20:23 -0700 Subject: Implement retain on Windows. The VirtualAlloc and VirtualFree APIs are different because MEM_DECOMMIT cannot be used across multiple VirtualAlloc regions. To properly support decommit, only allow merge / split within the same region -- this is done by tracking the "is_head" state of extents and not merging cross-region. Add a new state is_head (only relevant for retain && !maps_coalesce), which is true for the first extent in each VirtualAlloc region. Determine if two extents can be merged based on the head state, and use serial numbers for sanity checks. --- include/jemalloc/internal/extent_inlines.h | 26 +++++++++- include/jemalloc/internal/extent_structs.h | 6 ++- include/jemalloc/internal/extent_types.h | 5 ++ src/extent.c | 76 +++++++++++++++++++++++------- src/extent_dss.c | 5 +- test/unit/arena_reset.c | 9 +++- test/unit/rtree.c | 8 ++-- test/unit/slab.c | 3 +- 8 files changed, 110 insertions(+), 28 deletions(-) diff --git a/include/jemalloc/internal/extent_inlines.h b/include/jemalloc/internal/extent_inlines.h index 63b710d..77fa4c4 100644 --- a/include/jemalloc/internal/extent_inlines.h +++ b/include/jemalloc/internal/extent_inlines.h @@ -343,10 +343,30 @@ extent_prof_alloc_time_set(extent_t *extent, nstime_t t) { nstime_copy(&extent->e_alloc_time, &t); } +static inline bool +extent_is_head_get(extent_t *extent) { + if (maps_coalesce) { + not_reached(); + } + + return (bool)((extent->e_bits & EXTENT_BITS_IS_HEAD_MASK) >> + EXTENT_BITS_IS_HEAD_SHIFT); +} + +static inline void +extent_is_head_set(extent_t *extent, bool is_head) { + if (maps_coalesce) { + not_reached(); + } + + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_IS_HEAD_MASK) | + ((uint64_t)is_head << EXTENT_BITS_IS_HEAD_SHIFT); +} + static inline void extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size, bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed, - bool committed, bool dumpable) { + bool committed, bool dumpable, extent_head_state_t is_head) { assert(addr == PAGE_ADDR2BASE(addr) || !slab); extent_arena_set(extent, arena); @@ -360,6 +380,10 @@ extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size, extent_committed_set(extent, committed); extent_dumpable_set(extent, dumpable); ql_elm_new(extent, ql_link); + if (!maps_coalesce) { + extent_is_head_set(extent, (is_head == EXTENT_IS_HEAD) ? true : + false); + } if (config_prof) { extent_prof_tctx_set(extent, NULL); } diff --git a/include/jemalloc/internal/extent_structs.h b/include/jemalloc/internal/extent_structs.h index ad6710e..767cd89 100644 --- a/include/jemalloc/internal/extent_structs.h +++ b/include/jemalloc/internal/extent_structs.h @@ -128,7 +128,11 @@ struct extent_s { #define EXTENT_BITS_BINSHARD_SHIFT (EXTENT_BITS_NFREE_WIDTH + EXTENT_BITS_NFREE_SHIFT) #define EXTENT_BITS_BINSHARD_MASK MASK(EXTENT_BITS_BINSHARD_WIDTH, EXTENT_BITS_BINSHARD_SHIFT) -#define EXTENT_BITS_SN_SHIFT (EXTENT_BITS_BINSHARD_WIDTH + EXTENT_BITS_BINSHARD_SHIFT) +#define EXTENT_BITS_IS_HEAD_WIDTH 1 +#define EXTENT_BITS_IS_HEAD_SHIFT (EXTENT_BITS_BINSHARD_WIDTH + EXTENT_BITS_BINSHARD_SHIFT) +#define EXTENT_BITS_IS_HEAD_MASK MASK(EXTENT_BITS_IS_HEAD_WIDTH, EXTENT_BITS_IS_HEAD_SHIFT) + +#define EXTENT_BITS_SN_SHIFT (EXTENT_BITS_IS_HEAD_WIDTH + EXTENT_BITS_IS_HEAD_SHIFT) #define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT) /* Pointer to the extent that this structure is responsible for. */ diff --git a/include/jemalloc/internal/extent_types.h b/include/jemalloc/internal/extent_types.h index 865f8a1..96925cf 100644 --- a/include/jemalloc/internal/extent_types.h +++ b/include/jemalloc/internal/extent_types.h @@ -15,4 +15,9 @@ typedef struct extent_util_stats_verbose_s extent_util_stats_verbose_t; */ #define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6 +typedef enum { + EXTENT_NOT_HEAD, + EXTENT_IS_HEAD /* Only relevant for Windows && opt.retain. */ +} extent_head_state_t; + #endif /* JEMALLOC_INTERNAL_EXTENT_TYPES_H */ diff --git a/src/extent.c b/src/extent.c index e83d9c8..416d68f 100644 --- a/src/extent.c +++ b/src/extent.c @@ -50,20 +50,16 @@ static bool extent_purge_forced_default(extent_hooks_t *extent_hooks, static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, size_t length, bool growing_retained); -#ifdef JEMALLOC_MAPS_COALESCE static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t size_a, size_t size_b, bool committed, unsigned arena_ind); -#endif static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b, bool growing_retained); -#ifdef JEMALLOC_MAPS_COALESCE static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, void *addr_b, size_t size_b, bool committed, unsigned arena_ind); -#endif static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b, bool growing_retained); @@ -88,11 +84,9 @@ const extent_hooks_t extent_hooks_default = { , NULL #endif -#ifdef JEMALLOC_MAPS_COALESCE , extent_split_default, extent_merge_default -#endif }; /* Used exclusively for gdump triggering. */ @@ -1323,7 +1317,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, extent_init(extent, arena, ptr, alloc_size, false, SC_NSIZES, arena_extent_sn_next(arena), extent_state_active, zeroed, - committed, true); + committed, true, EXTENT_IS_HEAD); if (ptr == NULL) { extent_dalloc(tsdn, arena, extent); goto label_err; @@ -1495,7 +1489,7 @@ extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, } extent_init(extent, arena, addr, esize, slab, szind, arena_extent_sn_next(arena), extent_state_active, *zero, *commit, - true); + true, EXTENT_NOT_HEAD); if (pad != 0) { extent_addr_randomize(tsdn, extent, alignment); } @@ -2045,13 +2039,20 @@ extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, offset, length, false); } -#ifdef JEMALLOC_MAPS_COALESCE static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size, size_t size_a, size_t size_b, bool committed, unsigned arena_ind) { - return !maps_coalesce; + if (!maps_coalesce) { + /* + * Without retain, only whole regions can be purged (required by + * MEM_RELEASE on Windows) -- therefore disallow splitting. See + * comments in extent_head_no_merge(). + */ + return !opt_retain; + } + + return false; } -#endif /* * Accepts the extent to split, and the characteristics of each side of the @@ -2083,7 +2084,8 @@ extent_split_impl(tsdn_t *tsdn, arena_t *arena, extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) + size_a), size_b, slab_b, szind_b, extent_sn_get(extent), extent_state_get(extent), extent_zeroed_get(extent), - extent_committed_get(extent), extent_dumpable_get(extent)); + extent_committed_get(extent), extent_dumpable_get(extent), + EXTENT_NOT_HEAD); rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); @@ -2094,7 +2096,8 @@ extent_split_impl(tsdn_t *tsdn, arena_t *arena, extent_init(&lead, arena, extent_addr_get(extent), size_a, slab_a, szind_a, extent_sn_get(extent), extent_state_get(extent), extent_zeroed_get(extent), - extent_committed_get(extent), extent_dumpable_get(extent)); + extent_committed_get(extent), extent_dumpable_get(extent), + EXTENT_NOT_HEAD); extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false, true, &lead_elm_a, &lead_elm_b); @@ -2152,7 +2155,7 @@ extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, static bool extent_merge_default_impl(void *addr_a, void *addr_b) { - if (!maps_coalesce) { + if (!maps_coalesce && !opt_retain) { return true; } if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) { @@ -2162,13 +2165,51 @@ extent_merge_default_impl(void *addr_a, void *addr_b) { return false; } -#ifdef JEMALLOC_MAPS_COALESCE +/* + * Returns true if the given extents can't be merged because of their head bit + * settings. Assumes the second extent has the higher address. + */ +static bool +extent_head_no_merge(extent_t *a, extent_t *b) { + assert(extent_base_get(a) < extent_base_get(b)); + /* + * When coalesce is not always allowed (Windows), only merge extents + * from the same VirtualAlloc region under opt.retain (in which case + * MEM_DECOMMIT is utilized for purging). + */ + if (maps_coalesce) { + return false; + } + if (!opt_retain) { + return true; + } + /* If b is a head extent, disallow the cross-region merge. */ + if (extent_is_head_get(b)) { + /* + * Additionally, sn should not overflow with retain; sanity + * check that different regions have unique sn. + */ + assert(extent_sn_comp(a, b) != 0); + return true; + } + assert(extent_sn_comp(a, b) == 0); + + return false; +} + static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, void *addr_b, size_t size_b, bool committed, unsigned arena_ind) { + if (!maps_coalesce) { + tsdn_t *tsdn = tsdn_fetch(); + extent_t *a = iealloc(tsdn, addr_a); + extent_t *b = iealloc(tsdn, addr_b); + if (extent_head_no_merge(a, b)) { + return true; + } + } return extent_merge_default_impl(addr_a, addr_b); } -#endif static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena, @@ -2176,10 +2217,11 @@ extent_merge_impl(tsdn_t *tsdn, arena_t *arena, bool growing_retained) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, growing_retained ? 1 : 0); + assert(extent_base_get(a) < extent_base_get(b)); extent_hooks_assure_initialized(arena, r_extent_hooks); - if ((*r_extent_hooks)->merge == NULL) { + if ((*r_extent_hooks)->merge == NULL || extent_head_no_merge(a, b)) { return true; } diff --git a/src/extent_dss.c b/src/extent_dss.c index 69a7bee..8581789 100644 --- a/src/extent_dss.c +++ b/src/extent_dss.c @@ -156,7 +156,8 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, extent_init(gap, arena, gap_addr_page, gap_size_page, false, SC_NSIZES, arena_extent_sn_next(arena), - extent_state_active, false, true, true); + extent_state_active, false, true, true, + EXTENT_NOT_HEAD); } /* * Compute the address just past the end of the desired @@ -200,7 +201,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, extent_init(&extent, arena, ret, size, size, false, SC_NSIZES, extent_state_active, false, true, - true); + true, EXTENT_NOT_HEAD); if (extent_purge_forced_wrapper(tsdn, arena, &extent_hooks, &extent, 0, size)) { diff --git a/test/unit/arena_reset.c b/test/unit/arena_reset.c index 96b042d..b182f31 100644 --- a/test/unit/arena_reset.c +++ b/test/unit/arena_reset.c @@ -279,8 +279,11 @@ extent_dalloc_unmap(extent_hooks_t *extent_hooks, void *addr, size_t size, if (!try_dalloc) { return true; } - pages_unmap(addr, size); did_dalloc = true; + if (!maps_coalesce && opt_retain) { + return true; + } + pages_unmap(addr, size); return false; } @@ -304,7 +307,9 @@ TEST_BEGIN(test_arena_destroy_hooks_unmap) { unsigned nptrs; extent_hooks_prep(); - try_decommit = false; + if (maps_coalesce) { + try_decommit = false; + } memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t)); memcpy(&hooks, &hooks_unmap, sizeof(extent_hooks_t)); diff --git a/test/unit/rtree.c b/test/unit/rtree.c index b017bc0..90adca1 100644 --- a/test/unit/rtree.c +++ b/test/unit/rtree.c @@ -87,9 +87,9 @@ TEST_BEGIN(test_rtree_extrema) { extent_t extent_a, extent_b; extent_init(&extent_a, NULL, NULL, SC_LARGE_MINCLASS, false, sz_size2index(SC_LARGE_MINCLASS), 0, - extent_state_active, false, false, true); + extent_state_active, false, false, true, EXTENT_NOT_HEAD); extent_init(&extent_b, NULL, NULL, 0, false, SC_NSIZES, 0, - extent_state_active, false, false, true); + extent_state_active, false, false, true, EXTENT_NOT_HEAD); tsdn_t *tsdn = tsdn_fetch(); @@ -126,7 +126,7 @@ TEST_BEGIN(test_rtree_bits) { extent_t extent; extent_init(&extent, NULL, NULL, 0, false, SC_NSIZES, 0, - extent_state_active, false, false, true); + extent_state_active, false, false, true, EXTENT_NOT_HEAD); rtree_t *rtree = &test_rtree; rtree_ctx_t rtree_ctx; @@ -167,7 +167,7 @@ TEST_BEGIN(test_rtree_random) { extent_t extent; extent_init(&extent, NULL, NULL, 0, false, SC_NSIZES, 0, - extent_state_active, false, false, true); + extent_state_active, false, false, true, EXTENT_NOT_HEAD); assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); diff --git a/test/unit/slab.c b/test/unit/slab.c index ef71882..c56af25 100644 --- a/test/unit/slab.c +++ b/test/unit/slab.c @@ -9,7 +9,8 @@ TEST_BEGIN(test_arena_slab_regind) { const bin_info_t *bin_info = &bin_infos[binind]; extent_init(&slab, NULL, mallocx(bin_info->slab_size, MALLOCX_LG_ALIGN(LG_PAGE)), bin_info->slab_size, true, - binind, 0, extent_state_active, false, true, true); + binind, 0, extent_state_active, false, true, true, + EXTENT_NOT_HEAD); assert_ptr_not_null(extent_addr_get(&slab), "Unexpected malloc() failure"); for (regind = 0; regind < bin_info->nregs; regind++) { -- cgit v0.12 From badf8d95f11cf8ead0f8b7192663002d1d4dc4b2 Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Mon, 15 Jul 2019 12:09:41 -0700 Subject: Enable opt.retain by default on Windows. --- configure.ac | 3 +++ 1 file changed, 3 insertions(+) diff --git a/configure.ac b/configure.ac index 39a540f..261d81c 100644 --- a/configure.ac +++ b/configure.ac @@ -738,6 +738,9 @@ case "${host}" in libprefix="" SOREV="${so}" PIC_CFLAGS="" + if test "${LG_SIZEOF_PTR}" = "3"; then + default_retain="1" + fi ;; *) AC_MSG_RESULT([Unsupported operating system: ${host}]) -- cgit v0.12 From 57dbab5d6bc764a8b971334ec80977d6333688af Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Mon, 15 Jul 2019 12:16:02 -0700 Subject: Avoid leaking extents / VM when split is not supported. This can only happen on Windows and with opt.retain disabled (which isn't the default). The solution is suboptimal, however not a common case as retain is the long term plan for all platforms anyway. --- src/extent.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/extent.c b/src/extent.c index 416d68f..c6d402b 100644 --- a/src/extent.c +++ b/src/extent.c @@ -1052,6 +1052,17 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena, &to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind, growing_retained); + if (!maps_coalesce && result != extent_split_interior_ok + && !opt_retain) { + /* + * Split isn't supported (implies Windows w/o retain). Avoid + * leaking the extents. + */ + assert(to_leak != NULL && lead == NULL && trail == NULL); + extent_deactivate(tsdn, arena, extents, to_leak); + return NULL; + } + if (result == extent_split_interior_ok) { if (lead != NULL) { extent_deactivate(tsdn, arena, extents, lead); -- cgit v0.12 From 42807fcd9ed68c78f660c6dd85bcf9d82e134244 Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Mon, 15 Jul 2019 14:55:43 -0700 Subject: extent_dalloc instead of leak when register fails. extent_register may only fail if the underlying extent and region got stolen / coalesced before we lock. Avoid doing extent_leak (which purges the region) since we don't really own the region. --- src/extent.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/src/extent.c b/src/extent.c index c6d402b..c2637d2 100644 --- a/src/extent.c +++ b/src/extent.c @@ -1335,8 +1335,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, } if (extent_register_no_gdump_add(tsdn, extent)) { - extents_leak(tsdn, arena, r_extent_hooks, - &arena->extents_retained, extent, true); + extent_dalloc(tsdn, arena, extent); goto label_err; } @@ -1505,8 +1504,7 @@ extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, extent_addr_randomize(tsdn, extent, alignment); } if (extent_register(tsdn, extent)) { - extents_leak(tsdn, arena, r_extent_hooks, - &arena->extents_retained, extent, false); + extent_dalloc(tsdn, arena, extent); return NULL; } @@ -1729,8 +1727,7 @@ extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { WITNESS_RANK_CORE, 0); if (extent_register(tsdn, extent)) { - extents_leak(tsdn, arena, &extent_hooks, - &arena->extents_retained, extent, false); + extent_dalloc(tsdn, arena, extent); return; } extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent); -- cgit v0.12 From 4e36ce34c1e6a6f470a9355b90b0a757c6fdb0b5 Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Mon, 15 Jul 2019 15:56:05 -0700 Subject: Track the leaked VM space via the abandoned_vm counter. The counter is 0 unless metadata allocation failed (indicates OOM), and is mainly for sanity checking. --- include/jemalloc/internal/arena_stats.h | 3 +++ src/arena.c | 2 ++ src/ctl.c | 7 +++++++ src/extent.c | 16 ++++++++++++---- src/stats.c | 3 ++- 5 files changed, 26 insertions(+), 5 deletions(-) diff --git a/include/jemalloc/internal/arena_stats.h b/include/jemalloc/internal/arena_stats.h index 3ffe9c7..23949ed 100644 --- a/include/jemalloc/internal/arena_stats.h +++ b/include/jemalloc/internal/arena_stats.h @@ -112,6 +112,9 @@ struct arena_stats_s { arena_stats_u64_t nflushes_large; /* Derived. */ arena_stats_u64_t nrequests_large; /* Derived. */ + /* VM space had to be leaked (undocumented). Normally 0. */ + atomic_zu_t abandoned_vm; + /* Number of bytes cached in tcache associated with this arena. */ atomic_zu_t tcache_bytes; /* Derived. */ diff --git a/src/arena.c b/src/arena.c index f9336fe..a44d0da 100644 --- a/src/arena.c +++ b/src/arena.c @@ -132,6 +132,8 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) + extents_npages_get(&arena->extents_dirty) + extents_npages_get(&arena->extents_muzzy)) << LG_PAGE))); + arena_stats_accum_zu(&astats->abandoned_vm, atomic_load_zu( + &arena->stats.abandoned_vm, ATOMIC_RELAXED)); for (szind_t i = 0; i < SC_NSIZES - SC_NBINS; i++) { uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats, diff --git a/src/ctl.c b/src/ctl.c index 1d83087..48afaa6 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -210,6 +210,7 @@ CTL_PROTO(stats_arenas_i_internal) CTL_PROTO(stats_arenas_i_metadata_thp) CTL_PROTO(stats_arenas_i_tcache_bytes) CTL_PROTO(stats_arenas_i_resident) +CTL_PROTO(stats_arenas_i_abandoned_vm) INDEX_PROTO(stats_arenas_i) CTL_PROTO(stats_allocated) CTL_PROTO(stats_active) @@ -543,6 +544,7 @@ static const ctl_named_node_t stats_arenas_i_node[] = { {NAME("metadata_thp"), CTL(stats_arenas_i_metadata_thp)}, {NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)}, {NAME("resident"), CTL(stats_arenas_i_resident)}, + {NAME("abandoned_vm"), CTL(stats_arenas_i_abandoned_vm)}, {NAME("small"), CHILD(named, stats_arenas_i_small)}, {NAME("large"), CHILD(named, stats_arenas_i_large)}, {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)}, @@ -913,6 +915,8 @@ MUTEX_PROF_ARENA_MUTEXES &astats->astats.ndalloc_large); ctl_accum_arena_stats_u64(&sdstats->astats.nrequests_large, &astats->astats.nrequests_large); + accum_atomic_zu(&sdstats->astats.abandoned_vm, + &astats->astats.abandoned_vm); accum_atomic_zu(&sdstats->astats.tcache_bytes, &astats->astats.tcache_bytes); @@ -2871,6 +2875,9 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes, CTL_RO_CGEN(config_stats, stats_arenas_i_resident, atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED), size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_abandoned_vm, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.abandoned_vm, + ATOMIC_RELAXED), size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated, arenas_i(mib[2])->astats->allocated_small, size_t) diff --git a/src/extent.c b/src/extent.c index c2637d2..6fdb7b0 100644 --- a/src/extent.c +++ b/src/extent.c @@ -619,16 +619,24 @@ label_return: return extent; } +/* + * This can only happen when we fail to allocate a new extent struct (which + * indicates OOM), e.g. when trying to split an existing extent. + */ static void -extents_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, +extents_abandon_vm(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent, bool growing_retained) { + size_t sz = extent_size_get(extent); + if (config_stats) { + arena_stats_accum_zu(&arena->stats.abandoned_vm, sz); + } /* * Leak extent after making sure its pages have already been purged, so * that this is only a virtual memory leak. */ if (extents_state_get(extents) == extent_state_dirty) { if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, - extent, 0, extent_size_get(extent), growing_retained)) { + extent, 0, sz, growing_retained)) { extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent, 0, extent_size_get(extent), growing_retained); @@ -1083,7 +1091,7 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena, if (to_leak != NULL) { void *leak = extent_base_get(to_leak); extent_deregister_no_gdump_sub(tsdn, to_leak); - extents_leak(tsdn, arena, r_extent_hooks, extents, + extents_abandon_vm(tsdn, arena, r_extent_hooks, extents, to_leak, growing_retained); assert(extent_lock_from_addr(tsdn, rtree_ctx, leak, false) == NULL); @@ -1382,7 +1390,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, } if (to_leak != NULL) { extent_deregister_no_gdump_sub(tsdn, to_leak); - extents_leak(tsdn, arena, r_extent_hooks, + extents_abandon_vm(tsdn, arena, r_extent_hooks, &arena->extents_retained, to_leak, true); } goto label_err; diff --git a/src/stats.c b/src/stats.c index bce9f45..118e05d 100644 --- a/src/stats.c +++ b/src/stats.c @@ -673,7 +673,7 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large, size_t large_allocated; uint64_t large_nmalloc, large_ndalloc, large_nrequests, large_nfills, large_nflushes; - size_t tcache_bytes; + size_t tcache_bytes, abandoned_vm; uint64_t uptime; CTL_GET("arenas.page", &page, size_t); @@ -963,6 +963,7 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large, GET_AND_EMIT_MEM_STAT(metadata_thp) GET_AND_EMIT_MEM_STAT(tcache_bytes) GET_AND_EMIT_MEM_STAT(resident) + GET_AND_EMIT_MEM_STAT(abandoned_vm) GET_AND_EMIT_MEM_STAT(extent_avail) #undef GET_AND_EMIT_MEM_STAT -- cgit v0.12 From 1d148f353a2c71bc12fd066e467649fd17df3c95 Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Tue, 23 Jul 2019 12:49:17 -0700 Subject: Optimize max_active_fit in first_fit. Stop scanning once reached the first max_active_fit size. --- src/extent.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/extent.c b/src/extent.c index 6fdb7b0..a2dbde1 100644 --- a/src/extent.c +++ b/src/extent.c @@ -453,7 +453,6 @@ extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, assert(!extent_heap_empty(&extents->heaps[i])); extent_t *extent = extent_heap_first(&extents->heaps[i]); assert(extent_size_get(extent) >= size); - bool size_ok = true; /* * In order to reduce fragmentation, avoid reusing and splitting * large extents for much smaller sizes. @@ -462,10 +461,9 @@ extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, */ if (extents->delay_coalesce && (sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) { - size_ok = false; + break; } - if (size_ok && - (ret == NULL || extent_snad_comp(extent, ret) < 0)) { + if (ret == NULL || extent_snad_comp(extent, ret) < 0) { ret = extent; } if (i == SC_NPSIZES) { -- cgit v0.12 From bc0998a9052957584b6944b6f43fffe0648f603e Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Wed, 24 Jul 2019 16:12:06 -0700 Subject: Invoke arena_dalloc_promoted() properly w/o tcache. When tcache was disabled, the dalloc promoted case was missing. --- include/jemalloc/internal/arena_inlines_b.h | 16 ++++++++++++---- src/arena.c | 2 +- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/include/jemalloc/internal/arena_inlines_b.h b/include/jemalloc/internal/arena_inlines_b.h index 7e61a44..8b657ab 100644 --- a/include/jemalloc/internal/arena_inlines_b.h +++ b/include/jemalloc/internal/arena_inlines_b.h @@ -229,6 +229,16 @@ arena_vsalloc(tsdn_t *tsdn, const void *ptr) { } static inline void +arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) { + if (config_prof && unlikely(szind < SC_NBINS)) { + arena_dalloc_promoted(tsdn, ptr, NULL, true); + } else { + extent_t *extent = iealloc(tsdn, ptr); + large_dalloc(tsdn, extent); + } +} + +static inline void arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) { assert(ptr != NULL); @@ -252,8 +262,7 @@ arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) { /* Small allocation. */ arena_dalloc_small(tsdn, ptr); } else { - extent_t *extent = iealloc(tsdn, ptr); - large_dalloc(tsdn, extent); + arena_dalloc_large_no_tcache(tsdn, ptr, szind); } } @@ -349,8 +358,7 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) { /* Small allocation. */ arena_dalloc_small(tsdn, ptr); } else { - extent_t *extent = iealloc(tsdn, ptr); - large_dalloc(tsdn, extent); + arena_dalloc_large_no_tcache(tsdn, ptr, szind); } } diff --git a/src/arena.c b/src/arena.c index a44d0da..ba50e41 100644 --- a/src/arena.c +++ b/src/arena.c @@ -1610,7 +1610,7 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, assert(bumped_usize == SC_LARGE_MINCLASS); safety_check_verify_redzone(ptr, usize, bumped_usize); } - if (bumped_usize <= tcache_maxclass) { + if (bumped_usize <= tcache_maxclass && tcache != NULL) { tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, sz_size2index(bumped_usize), slow_path); } else { -- cgit v0.12 From a3fa597921987709eb0aa2258f1b35cc433ae5d4 Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Wed, 24 Jul 2019 16:27:30 -0700 Subject: Refactor arena_dalloc() / _sdalloc(). --- include/jemalloc/internal/arena_inlines_b.h | 42 +++++++++++++---------------- 1 file changed, 18 insertions(+), 24 deletions(-) diff --git a/include/jemalloc/internal/arena_inlines_b.h b/include/jemalloc/internal/arena_inlines_b.h index 8b657ab..dd92657 100644 --- a/include/jemalloc/internal/arena_inlines_b.h +++ b/include/jemalloc/internal/arena_inlines_b.h @@ -267,6 +267,22 @@ arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) { } JEMALLOC_ALWAYS_INLINE void +arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind, + bool slow_path) { + if (szind < nhbins) { + if (config_prof && unlikely(szind < SC_NBINS)) { + arena_dalloc_promoted(tsdn, ptr, tcache, slow_path); + } else { + tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, szind, + slow_path); + } + } else { + extent_t *extent = iealloc(tsdn, ptr); + large_dalloc(tsdn, extent); + } +} + +JEMALLOC_ALWAYS_INLINE void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, alloc_ctx_t *alloc_ctx, bool slow_path) { assert(!tsdn_null(tsdn) || tcache == NULL); @@ -304,18 +320,7 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind, slow_path); } else { - if (szind < nhbins) { - if (config_prof && unlikely(szind < SC_NBINS)) { - arena_dalloc_promoted(tsdn, ptr, tcache, - slow_path); - } else { - tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, - szind, slow_path); - } - } else { - extent_t *extent = iealloc(tsdn, ptr); - large_dalloc(tsdn, extent); - } + arena_dalloc_large(tsdn, ptr, tcache, szind, slow_path); } } @@ -415,18 +420,7 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind, slow_path); } else { - if (szind < nhbins) { - if (config_prof && unlikely(szind < SC_NBINS)) { - arena_dalloc_promoted(tsdn, ptr, tcache, - slow_path); - } else { - tcache_dalloc_large(tsdn_tsd(tsdn), - tcache, ptr, szind, slow_path); - } - } else { - extent_t *extent = iealloc(tsdn, ptr); - large_dalloc(tsdn, extent); - } + arena_dalloc_large(tsdn, ptr, tcache, szind, slow_path); } } -- cgit v0.12 From 10fcff6c38c08bc2b1a672ff92701012944d843a Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Thu, 25 Jul 2019 11:15:08 -0700 Subject: Lower nthreads in test/unit/retained on 32-bit to avoid OOM. --- test/unit/retained.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/unit/retained.c b/test/unit/retained.c index d51a598..7993fd3 100644 --- a/test/unit/retained.c +++ b/test/unit/retained.c @@ -107,6 +107,9 @@ TEST_BEGIN(test_retained) { atomic_store_u(&epoch, 0, ATOMIC_RELAXED); unsigned nthreads = ncpus * 2; + if (LG_SIZEOF_PTR < 3 && nthreads > 16) { + nthreads = 16; /* 32-bit platform could run out of vaddr. */ + } VARIABLE_ARRAY(thd_t, threads, nthreads); for (unsigned i = 0; i < nthreads; i++) { thd_create(&threads[i], thd_start, NULL); -- cgit v0.12 From 9f6a9f4c1f78fd61297e01ae1521af9696d2023b Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Thu, 25 Jul 2019 13:43:59 -0700 Subject: Update manual for opt.retain (new default on Windows). --- doc/jemalloc.xml.in | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in index e23ccb7..7fecda7 100644 --- a/doc/jemalloc.xml.in +++ b/doc/jemalloc.xml.in @@ -963,17 +963,17 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay", linkend="stats.retained">stats.retained for related details). It also makes jemalloc use mmap2 - in a more greedy way, mapping larger chunks in one go. - This option is disabled by default unless discarding virtual memory is - known to trigger - platform-specific performance problems, e.g. for [64-bit] Linux, which - has a quirk in its virtual memory allocation algorithm that causes - semi-permanent VM map holes under normal jemalloc operation. Although - munmap - 2 causes issues on 32-bit Linux as - well, retaining virtual memory for 32-bit Linux is disabled by default - due to the practical possibility of address space exhaustion. - + or equivalent in a more greedy way, mapping larger + chunks in one go. This option is disabled by default unless discarding + virtual memory is known to trigger platform-specific performance + problems, namely 1) for [64-bit] Linux, which has a quirk in its virtual + memory allocation algorithm that causes semi-permanent VM map holes + under normal jemalloc operation; and 2) for [64-bit] Windows, which + disallows split / merged regions with + MEM_RELEASE. Although the + same issues may present on 32-bit platforms as well, retaining virtual + memory for 32-bit Linux and Windows is disabled by default due to the + practical possibility of address space exhaustion. -- cgit v0.12 From 85f0cb2d0c0a05e9fc926544c65ca784c03ab239 Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Thu, 25 Jul 2019 14:16:56 -0700 Subject: Add indent to individual options for confirm_conf. --- src/jemalloc.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/jemalloc.c b/src/jemalloc.c index b6c8d99..ed13718 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -1059,9 +1059,10 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], #define CONF_CONTINUE { \ if (!initial_call && opt_confirm_conf \ && cur_opt_valid) { \ - malloc_printf(": Set "\ - "conf value: %.*s:%.*s\n", \ - (int)klen, k, (int)vlen, v);\ + malloc_printf(": -- " \ + "Set conf value: %.*s:%.*s" \ + "\n", (int)klen, k, \ + (int)vlen, v); \ } \ continue; \ } -- cgit v0.12 From 7618b0b8e458d9c0db6e4b05ccbe6c6308952890 Mon Sep 17 00:00:00 2001 From: Yinan Zhang Date: Fri, 12 Jul 2019 16:37:37 -0700 Subject: Refactor prof log `prof.c` is growing too long, so trying to modularize it. There are a few internal functions that had to be exposed but I think it is a fair trade-off. --- Makefile.in | 1 + include/jemalloc/internal/prof_externs.h | 8 + msvc/projects/vc2015/jemalloc/jemalloc.vcxproj | 1 + .../vc2015/jemalloc/jemalloc.vcxproj.filters | 3 + msvc/projects/vc2017/jemalloc/jemalloc.vcxproj | 1 + .../vc2017/jemalloc/jemalloc.vcxproj.filters | 3 + src/prof.c | 682 +------------------- src/prof_log.c | 698 +++++++++++++++++++++ 8 files changed, 720 insertions(+), 677 deletions(-) create mode 100644 src/prof_log.c diff --git a/Makefile.in b/Makefile.in index 7128b00..1cd973d 100644 --- a/Makefile.in +++ b/Makefile.in @@ -117,6 +117,7 @@ C_SRCS := $(srcroot)src/jemalloc.c \ $(srcroot)src/pages.c \ $(srcroot)src/prng.c \ $(srcroot)src/prof.c \ + $(srcroot)src/prof_log.c \ $(srcroot)src/rtree.c \ $(srcroot)src/safety_check.c \ $(srcroot)src/stats.c \ diff --git a/include/jemalloc/internal/prof_externs.h b/include/jemalloc/internal/prof_externs.h index 094f3e1..e94ac3b 100644 --- a/include/jemalloc/internal/prof_externs.h +++ b/include/jemalloc/internal/prof_externs.h @@ -43,6 +43,8 @@ extern uint64_t prof_interval; */ extern size_t lg_prof_sample; +extern bool prof_booted; + void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated); void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx); @@ -64,10 +66,14 @@ extern prof_dump_header_t *JET_MUTABLE prof_dump_header; void prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs, uint64_t *accumbytes); #endif +int prof_getpid(void); bool prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum); void prof_idump(tsdn_t *tsdn); bool prof_mdump(tsd_t *tsd, const char *filename); void prof_gdump(tsdn_t *tsdn); + +void prof_bt_hash(const void *key, size_t r_hash[2]); +bool prof_bt_keycomp(const void *k1, const void *k2); prof_tdata_t *prof_tdata_init(tsd_t *tsd); prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata); void prof_reset(tsd_t *tsd, size_t lg_sample); @@ -91,8 +97,10 @@ void prof_postfork_parent(tsdn_t *tsdn); void prof_postfork_child(tsdn_t *tsdn); void prof_sample_threshold_update(prof_tdata_t *tdata); +void prof_try_log(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx); bool prof_log_start(tsdn_t *tsdn, const char *filename); bool prof_log_stop(tsdn_t *tsdn); +bool prof_log_init(tsd_t *tsdn); #ifdef JEMALLOC_JET size_t prof_log_bt_count(void); size_t prof_log_alloc_count(void); diff --git a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj index 228e8be..d93d909 100644 --- a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj +++ b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj @@ -58,6 +58,7 @@ + diff --git a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters index d839515..7b09d4e 100644 --- a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters +++ b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters @@ -67,6 +67,9 @@ Source Files + + Source Files + Source Files diff --git a/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj b/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj index edcceed..28bd3cd 100644 --- a/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj +++ b/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj @@ -58,6 +58,7 @@ + diff --git a/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters b/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters index 6df7260..a66c209 100644 --- a/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters +++ b/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters @@ -67,6 +67,9 @@ Source Files + + Source Files + Source Files diff --git a/src/prof.c b/src/prof.c index 4ebe279..9d1edb3 100644 --- a/src/prof.c +++ b/src/prof.c @@ -7,7 +7,6 @@ #include "jemalloc/internal/hash.h" #include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/emitter.h" /******************************************************************************/ @@ -39,7 +38,6 @@ bool opt_prof_gdump = false; bool opt_prof_final = false; bool opt_prof_leak = false; bool opt_prof_accum = false; -bool opt_prof_log = false; char opt_prof_prefix[ /* Minimize memory bloat for non-prof builds. */ #ifdef JEMALLOC_PROF @@ -72,100 +70,6 @@ uint64_t prof_interval = 0; size_t lg_prof_sample; -typedef enum prof_logging_state_e prof_logging_state_t; -enum prof_logging_state_e { - prof_logging_state_stopped, - prof_logging_state_started, - prof_logging_state_dumping -}; - -/* - * - stopped: log_start never called, or previous log_stop has completed. - * - started: log_start called, log_stop not called yet. Allocations are logged. - * - dumping: log_stop called but not finished; samples are not logged anymore. - */ -prof_logging_state_t prof_logging_state = prof_logging_state_stopped; - -#ifdef JEMALLOC_JET -static bool prof_log_dummy = false; -#endif - -/* Incremented for every log file that is output. */ -static uint64_t log_seq = 0; -static char log_filename[ - /* Minimize memory bloat for non-prof builds. */ -#ifdef JEMALLOC_PROF - PATH_MAX + -#endif - 1]; - -/* Timestamp for most recent call to log_start(). */ -static nstime_t log_start_timestamp = NSTIME_ZERO_INITIALIZER; - -/* Increment these when adding to the log_bt and log_thr linked lists. */ -static size_t log_bt_index = 0; -static size_t log_thr_index = 0; - -/* Linked list node definitions. These are only used in prof.c. */ -typedef struct prof_bt_node_s prof_bt_node_t; - -struct prof_bt_node_s { - prof_bt_node_t *next; - size_t index; - prof_bt_t bt; - /* Variable size backtrace vector pointed to by bt. */ - void *vec[1]; -}; - -typedef struct prof_thr_node_s prof_thr_node_t; - -struct prof_thr_node_s { - prof_thr_node_t *next; - size_t index; - uint64_t thr_uid; - /* Variable size based on thr_name_sz. */ - char name[1]; -}; - -typedef struct prof_alloc_node_s prof_alloc_node_t; - -/* This is output when logging sampled allocations. */ -struct prof_alloc_node_s { - prof_alloc_node_t *next; - /* Indices into an array of thread data. */ - size_t alloc_thr_ind; - size_t free_thr_ind; - - /* Indices into an array of backtraces. */ - size_t alloc_bt_ind; - size_t free_bt_ind; - - uint64_t alloc_time_ns; - uint64_t free_time_ns; - - size_t usize; -}; - -/* - * Created on the first call to prof_log_start and deleted on prof_log_stop. - * These are the backtraces and threads that have already been logged by an - * allocation. - */ -static bool log_tables_initialized = false; -static ckh_t log_bt_node_set; -static ckh_t log_thr_node_set; - -/* Store linked lists for logged data. */ -static prof_bt_node_t *log_bt_first = NULL; -static prof_bt_node_t *log_bt_last = NULL; -static prof_thr_node_t *log_thr_first = NULL; -static prof_thr_node_t *log_thr_last = NULL; -static prof_alloc_node_t *log_alloc_first = NULL; -static prof_alloc_node_t *log_alloc_last = NULL; - -/* Protects the prof_logging_state and any log_{...} variable. */ -static malloc_mutex_t log_mtx; - /* * Table of mutexes that are shared among gctx's. These are leaf locks, so * there is no problem with using them for more than one gctx at the same time. @@ -225,7 +129,7 @@ static size_t prof_dump_buf_end; static int prof_dump_fd; /* Do not dump any profiles until bootstrapping is complete. */ -static bool prof_booted = false; +bool prof_booted = false; /******************************************************************************/ /* @@ -241,12 +145,6 @@ static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached); static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name); -/* Hashtable functions for log_bt_node_set and log_thr_node_set. */ -static void prof_thr_node_hash(const void *key, size_t r_hash[2]); -static bool prof_thr_node_keycomp(const void *k1, const void *k2); -static void prof_bt_node_hash(const void *key, size_t r_hash[2]); -static bool prof_bt_node_keycomp(const void *k1, const void *k2); - /******************************************************************************/ /* Red-black trees. */ @@ -361,162 +259,6 @@ prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, malloc_mutex_unlock(tsdn, tctx->tdata->lock); } -static size_t -prof_log_bt_index(tsd_t *tsd, prof_bt_t *bt) { - assert(prof_logging_state == prof_logging_state_started); - malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx); - - prof_bt_node_t dummy_node; - dummy_node.bt = *bt; - prof_bt_node_t *node; - - /* See if this backtrace is already cached in the table. */ - if (ckh_search(&log_bt_node_set, (void *)(&dummy_node), - (void **)(&node), NULL)) { - size_t sz = offsetof(prof_bt_node_t, vec) + - (bt->len * sizeof(void *)); - prof_bt_node_t *new_node = (prof_bt_node_t *) - iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, - true, arena_get(TSDN_NULL, 0, true), true); - if (log_bt_first == NULL) { - log_bt_first = new_node; - log_bt_last = new_node; - } else { - log_bt_last->next = new_node; - log_bt_last = new_node; - } - - new_node->next = NULL; - new_node->index = log_bt_index; - /* - * Copy the backtrace: bt is inside a tdata or gctx, which - * might die before prof_log_stop is called. - */ - new_node->bt.len = bt->len; - memcpy(new_node->vec, bt->vec, bt->len * sizeof(void *)); - new_node->bt.vec = new_node->vec; - - log_bt_index++; - ckh_insert(tsd, &log_bt_node_set, (void *)new_node, NULL); - return new_node->index; - } else { - return node->index; - } -} -static size_t -prof_log_thr_index(tsd_t *tsd, uint64_t thr_uid, const char *name) { - assert(prof_logging_state == prof_logging_state_started); - malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx); - - prof_thr_node_t dummy_node; - dummy_node.thr_uid = thr_uid; - prof_thr_node_t *node; - - /* See if this thread is already cached in the table. */ - if (ckh_search(&log_thr_node_set, (void *)(&dummy_node), - (void **)(&node), NULL)) { - size_t sz = offsetof(prof_thr_node_t, name) + strlen(name) + 1; - prof_thr_node_t *new_node = (prof_thr_node_t *) - iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, - true, arena_get(TSDN_NULL, 0, true), true); - if (log_thr_first == NULL) { - log_thr_first = new_node; - log_thr_last = new_node; - } else { - log_thr_last->next = new_node; - log_thr_last = new_node; - } - - new_node->next = NULL; - new_node->index = log_thr_index; - new_node->thr_uid = thr_uid; - strcpy(new_node->name, name); - - log_thr_index++; - ckh_insert(tsd, &log_thr_node_set, (void *)new_node, NULL); - return new_node->index; - } else { - return node->index; - } -} - -static void -prof_try_log(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) { - malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); - - prof_tdata_t *cons_tdata = prof_tdata_get(tsd, false); - if (cons_tdata == NULL) { - /* - * We decide not to log these allocations. cons_tdata will be - * NULL only when the current thread is in a weird state (e.g. - * it's being destroyed). - */ - return; - } - - malloc_mutex_lock(tsd_tsdn(tsd), &log_mtx); - - if (prof_logging_state != prof_logging_state_started) { - goto label_done; - } - - if (!log_tables_initialized) { - bool err1 = ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS, - prof_bt_node_hash, prof_bt_node_keycomp); - bool err2 = ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS, - prof_thr_node_hash, prof_thr_node_keycomp); - if (err1 || err2) { - goto label_done; - } - log_tables_initialized = true; - } - - nstime_t alloc_time = prof_alloc_time_get(tsd_tsdn(tsd), ptr, - (alloc_ctx_t *)NULL); - nstime_t free_time = NSTIME_ZERO_INITIALIZER; - nstime_update(&free_time); - - size_t sz = sizeof(prof_alloc_node_t); - prof_alloc_node_t *new_node = (prof_alloc_node_t *) - iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, true, - arena_get(TSDN_NULL, 0, true), true); - - const char *prod_thr_name = (tctx->tdata->thread_name == NULL)? - "" : tctx->tdata->thread_name; - const char *cons_thr_name = prof_thread_name_get(tsd); - - prof_bt_t bt; - /* Initialize the backtrace, using the buffer in tdata to store it. */ - bt_init(&bt, cons_tdata->vec); - prof_backtrace(&bt); - prof_bt_t *cons_bt = &bt; - - /* We haven't destroyed tctx yet, so gctx should be good to read. */ - prof_bt_t *prod_bt = &tctx->gctx->bt; - - new_node->next = NULL; - new_node->alloc_thr_ind = prof_log_thr_index(tsd, tctx->tdata->thr_uid, - prod_thr_name); - new_node->free_thr_ind = prof_log_thr_index(tsd, cons_tdata->thr_uid, - cons_thr_name); - new_node->alloc_bt_ind = prof_log_bt_index(tsd, prod_bt); - new_node->free_bt_ind = prof_log_bt_index(tsd, cons_bt); - new_node->alloc_time_ns = nstime_ns(&alloc_time); - new_node->free_time_ns = nstime_ns(&free_time); - new_node->usize = usize; - - if (log_alloc_first == NULL) { - log_alloc_first = new_node; - log_alloc_last = new_node; - } else { - log_alloc_last->next = new_node; - log_alloc_last = new_node; - } - -label_done: - malloc_mutex_unlock(tsd_tsdn(tsd), &log_mtx); -} - void prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) { @@ -1693,7 +1435,7 @@ prof_open_maps(const char *format, ...) { } #endif -static int +int prof_getpid(void) { #ifdef _WIN32 return GetCurrentProcessId(); @@ -2135,7 +1877,7 @@ prof_gdump(tsdn_t *tsdn) { } } -static void +void prof_bt_hash(const void *key, size_t r_hash[2]) { prof_bt_t *bt = (prof_bt_t *)key; @@ -2144,7 +1886,7 @@ prof_bt_hash(const void *key, size_t r_hash[2]) { hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash); } -static bool +bool prof_bt_keycomp(const void *k1, const void *k2) { const prof_bt_t *bt1 = (prof_bt_t *)k1; const prof_bt_t *bt2 = (prof_bt_t *)k2; @@ -2157,33 +1899,6 @@ prof_bt_keycomp(const void *k1, const void *k2) { return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); } -static void -prof_bt_node_hash(const void *key, size_t r_hash[2]) { - const prof_bt_node_t *bt_node = (prof_bt_node_t *)key; - prof_bt_hash((void *)(&bt_node->bt), r_hash); -} - -static bool -prof_bt_node_keycomp(const void *k1, const void *k2) { - const prof_bt_node_t *bt_node1 = (prof_bt_node_t *)k1; - const prof_bt_node_t *bt_node2 = (prof_bt_node_t *)k2; - return prof_bt_keycomp((void *)(&bt_node1->bt), - (void *)(&bt_node2->bt)); -} - -static void -prof_thr_node_hash(const void *key, size_t r_hash[2]) { - const prof_thr_node_t *thr_node = (prof_thr_node_t *)key; - hash(&thr_node->thr_uid, sizeof(uint64_t), 0x94122f35U, r_hash); -} - -static bool -prof_thr_node_keycomp(const void *k1, const void *k2) { - const prof_thr_node_t *thr_node1 = (prof_thr_node_t *)k1; - const prof_thr_node_t *thr_node2 = (prof_thr_node_t *)k2; - return thr_node1->thr_uid == thr_node2->thr_uid; -} - static uint64_t prof_thr_uid_alloc(tsdn_t *tsdn) { uint64_t thr_uid; @@ -2416,368 +2131,6 @@ prof_active_set(tsdn_t *tsdn, bool active) { return prof_active_old; } -#ifdef JEMALLOC_JET -size_t -prof_log_bt_count(void) { - size_t cnt = 0; - prof_bt_node_t *node = log_bt_first; - while (node != NULL) { - cnt++; - node = node->next; - } - return cnt; -} - -size_t -prof_log_alloc_count(void) { - size_t cnt = 0; - prof_alloc_node_t *node = log_alloc_first; - while (node != NULL) { - cnt++; - node = node->next; - } - return cnt; -} - -size_t -prof_log_thr_count(void) { - size_t cnt = 0; - prof_thr_node_t *node = log_thr_first; - while (node != NULL) { - cnt++; - node = node->next; - } - return cnt; -} - -bool -prof_log_is_logging(void) { - return prof_logging_state == prof_logging_state_started; -} - -bool -prof_log_rep_check(void) { - if (prof_logging_state == prof_logging_state_stopped - && log_tables_initialized) { - return true; - } - - if (log_bt_last != NULL && log_bt_last->next != NULL) { - return true; - } - if (log_thr_last != NULL && log_thr_last->next != NULL) { - return true; - } - if (log_alloc_last != NULL && log_alloc_last->next != NULL) { - return true; - } - - size_t bt_count = prof_log_bt_count(); - size_t thr_count = prof_log_thr_count(); - size_t alloc_count = prof_log_alloc_count(); - - - if (prof_logging_state == prof_logging_state_stopped) { - if (bt_count != 0 || thr_count != 0 || alloc_count || 0) { - return true; - } - } - - prof_alloc_node_t *node = log_alloc_first; - while (node != NULL) { - if (node->alloc_bt_ind >= bt_count) { - return true; - } - if (node->free_bt_ind >= bt_count) { - return true; - } - if (node->alloc_thr_ind >= thr_count) { - return true; - } - if (node->free_thr_ind >= thr_count) { - return true; - } - if (node->alloc_time_ns > node->free_time_ns) { - return true; - } - node = node->next; - } - - return false; -} - -void -prof_log_dummy_set(bool new_value) { - prof_log_dummy = new_value; -} -#endif - -bool -prof_log_start(tsdn_t *tsdn, const char *filename) { - if (!opt_prof || !prof_booted) { - return true; - } - - bool ret = false; - size_t buf_size = PATH_MAX + 1; - - malloc_mutex_lock(tsdn, &log_mtx); - - if (prof_logging_state != prof_logging_state_stopped) { - ret = true; - } else if (filename == NULL) { - /* Make default name. */ - malloc_snprintf(log_filename, buf_size, "%s.%d.%"FMTu64".json", - opt_prof_prefix, prof_getpid(), log_seq); - log_seq++; - prof_logging_state = prof_logging_state_started; - } else if (strlen(filename) >= buf_size) { - ret = true; - } else { - strcpy(log_filename, filename); - prof_logging_state = prof_logging_state_started; - } - - if (!ret) { - nstime_update(&log_start_timestamp); - } - - malloc_mutex_unlock(tsdn, &log_mtx); - - return ret; -} - -/* Used as an atexit function to stop logging on exit. */ -static void -prof_log_stop_final(void) { - tsd_t *tsd = tsd_fetch(); - prof_log_stop(tsd_tsdn(tsd)); -} - -struct prof_emitter_cb_arg_s { - int fd; - ssize_t ret; -}; - -static void -prof_emitter_write_cb(void *opaque, const char *to_write) { - struct prof_emitter_cb_arg_s *arg = - (struct prof_emitter_cb_arg_s *)opaque; - size_t bytes = strlen(to_write); -#ifdef JEMALLOC_JET - if (prof_log_dummy) { - return; - } -#endif - arg->ret = write(arg->fd, (void *)to_write, bytes); -} - -/* - * prof_log_emit_{...} goes through the appropriate linked list, emitting each - * node to the json and deallocating it. - */ -static void -prof_log_emit_threads(tsd_t *tsd, emitter_t *emitter) { - emitter_json_array_kv_begin(emitter, "threads"); - prof_thr_node_t *thr_node = log_thr_first; - prof_thr_node_t *thr_old_node; - while (thr_node != NULL) { - emitter_json_object_begin(emitter); - - emitter_json_kv(emitter, "thr_uid", emitter_type_uint64, - &thr_node->thr_uid); - - char *thr_name = thr_node->name; - - emitter_json_kv(emitter, "thr_name", emitter_type_string, - &thr_name); - - emitter_json_object_end(emitter); - thr_old_node = thr_node; - thr_node = thr_node->next; - idalloc(tsd, thr_old_node); - } - emitter_json_array_end(emitter); -} - -static void -prof_log_emit_traces(tsd_t *tsd, emitter_t *emitter) { - emitter_json_array_kv_begin(emitter, "stack_traces"); - prof_bt_node_t *bt_node = log_bt_first; - prof_bt_node_t *bt_old_node; - /* - * Calculate how many hex digits we need: twice number of bytes, two for - * "0x", and then one more for terminating '\0'. - */ - char buf[2 * sizeof(intptr_t) + 3]; - size_t buf_sz = sizeof(buf); - while (bt_node != NULL) { - emitter_json_array_begin(emitter); - size_t i; - for (i = 0; i < bt_node->bt.len; i++) { - malloc_snprintf(buf, buf_sz, "%p", bt_node->bt.vec[i]); - char *trace_str = buf; - emitter_json_value(emitter, emitter_type_string, - &trace_str); - } - emitter_json_array_end(emitter); - - bt_old_node = bt_node; - bt_node = bt_node->next; - idalloc(tsd, bt_old_node); - } - emitter_json_array_end(emitter); -} - -static void -prof_log_emit_allocs(tsd_t *tsd, emitter_t *emitter) { - emitter_json_array_kv_begin(emitter, "allocations"); - prof_alloc_node_t *alloc_node = log_alloc_first; - prof_alloc_node_t *alloc_old_node; - while (alloc_node != NULL) { - emitter_json_object_begin(emitter); - - emitter_json_kv(emitter, "alloc_thread", emitter_type_size, - &alloc_node->alloc_thr_ind); - - emitter_json_kv(emitter, "free_thread", emitter_type_size, - &alloc_node->free_thr_ind); - - emitter_json_kv(emitter, "alloc_trace", emitter_type_size, - &alloc_node->alloc_bt_ind); - - emitter_json_kv(emitter, "free_trace", emitter_type_size, - &alloc_node->free_bt_ind); - - emitter_json_kv(emitter, "alloc_timestamp", - emitter_type_uint64, &alloc_node->alloc_time_ns); - - emitter_json_kv(emitter, "free_timestamp", emitter_type_uint64, - &alloc_node->free_time_ns); - - emitter_json_kv(emitter, "usize", emitter_type_uint64, - &alloc_node->usize); - - emitter_json_object_end(emitter); - - alloc_old_node = alloc_node; - alloc_node = alloc_node->next; - idalloc(tsd, alloc_old_node); - } - emitter_json_array_end(emitter); -} - -static void -prof_log_emit_metadata(emitter_t *emitter) { - emitter_json_object_kv_begin(emitter, "info"); - - nstime_t now = NSTIME_ZERO_INITIALIZER; - - nstime_update(&now); - uint64_t ns = nstime_ns(&now) - nstime_ns(&log_start_timestamp); - emitter_json_kv(emitter, "duration", emitter_type_uint64, &ns); - - char *vers = JEMALLOC_VERSION; - emitter_json_kv(emitter, "version", - emitter_type_string, &vers); - - emitter_json_kv(emitter, "lg_sample_rate", - emitter_type_int, &lg_prof_sample); - - int pid = prof_getpid(); - emitter_json_kv(emitter, "pid", emitter_type_int, &pid); - - emitter_json_object_end(emitter); -} - - -bool -prof_log_stop(tsdn_t *tsdn) { - if (!opt_prof || !prof_booted) { - return true; - } - - tsd_t *tsd = tsdn_tsd(tsdn); - malloc_mutex_lock(tsdn, &log_mtx); - - if (prof_logging_state != prof_logging_state_started) { - malloc_mutex_unlock(tsdn, &log_mtx); - return true; - } - - /* - * Set the state to dumping. We'll set it to stopped when we're done. - * Since other threads won't be able to start/stop/log when the state is - * dumping, we don't have to hold the lock during the whole method. - */ - prof_logging_state = prof_logging_state_dumping; - malloc_mutex_unlock(tsdn, &log_mtx); - - - emitter_t emitter; - - /* Create a file. */ - - int fd; -#ifdef JEMALLOC_JET - if (prof_log_dummy) { - fd = 0; - } else { - fd = creat(log_filename, 0644); - } -#else - fd = creat(log_filename, 0644); -#endif - - if (fd == -1) { - malloc_printf(": creat() for log file \"%s\" " - " failed with %d\n", log_filename, errno); - if (opt_abort) { - abort(); - } - return true; - } - - /* Emit to json. */ - struct prof_emitter_cb_arg_s arg; - arg.fd = fd; - emitter_init(&emitter, emitter_output_json, &prof_emitter_write_cb, - (void *)(&arg)); - - emitter_json_object_begin(&emitter); - prof_log_emit_metadata(&emitter); - prof_log_emit_threads(tsd, &emitter); - prof_log_emit_traces(tsd, &emitter); - prof_log_emit_allocs(tsd, &emitter); - emitter_json_object_end(&emitter); - - /* Reset global state. */ - if (log_tables_initialized) { - ckh_delete(tsd, &log_bt_node_set); - ckh_delete(tsd, &log_thr_node_set); - } - log_tables_initialized = false; - log_bt_index = 0; - log_thr_index = 0; - log_bt_first = NULL; - log_bt_last = NULL; - log_thr_first = NULL; - log_thr_last = NULL; - log_alloc_first = NULL; - log_alloc_last = NULL; - - malloc_mutex_lock(tsdn, &log_mtx); - prof_logging_state = prof_logging_state_stopped; - malloc_mutex_unlock(tsdn, &log_mtx); - -#ifdef JEMALLOC_JET - if (prof_log_dummy) { - return false; - } -#endif - return close(fd); -} - const char * prof_thread_name_get(tsd_t *tsd) { prof_tdata_t *tdata; @@ -3014,35 +2367,10 @@ prof_boot2(tsd_t *tsd) { } } - if (opt_prof_log) { - prof_log_start(tsd_tsdn(tsd), NULL); - } - - if (atexit(prof_log_stop_final) != 0) { - malloc_write(": Error in atexit() " - "for logging\n"); - if (opt_abort) { - abort(); - } - } - - if (malloc_mutex_init(&log_mtx, "prof_log", - WITNESS_RANK_PROF_LOG, malloc_mutex_rank_exclusive)) { - return true; - } - - if (ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS, - prof_bt_node_hash, prof_bt_node_keycomp)) { + if (prof_log_init(tsd)) { return true; } - if (ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS, - prof_thr_node_hash, prof_thr_node_keycomp)) { - return true; - } - - log_tables_initialized = true; - gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t), CACHELINE); diff --git a/src/prof_log.c b/src/prof_log.c new file mode 100644 index 0000000..25a6abe --- /dev/null +++ b/src/prof_log.c @@ -0,0 +1,698 @@ +#define JEMALLOC_PROF_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/emitter.h" + +bool opt_prof_log = false; +typedef enum prof_logging_state_e prof_logging_state_t; +enum prof_logging_state_e { + prof_logging_state_stopped, + prof_logging_state_started, + prof_logging_state_dumping +}; + +/* + * - stopped: log_start never called, or previous log_stop has completed. + * - started: log_start called, log_stop not called yet. Allocations are logged. + * - dumping: log_stop called but not finished; samples are not logged anymore. + */ +prof_logging_state_t prof_logging_state = prof_logging_state_stopped; + +#ifdef JEMALLOC_JET +static bool prof_log_dummy = false; +#endif + +/* Incremented for every log file that is output. */ +static uint64_t log_seq = 0; +static char log_filename[ + /* Minimize memory bloat for non-prof builds. */ +#ifdef JEMALLOC_PROF + PATH_MAX + +#endif + 1]; + +/* Timestamp for most recent call to log_start(). */ +static nstime_t log_start_timestamp = NSTIME_ZERO_INITIALIZER; + +/* Increment these when adding to the log_bt and log_thr linked lists. */ +static size_t log_bt_index = 0; +static size_t log_thr_index = 0; + +/* Linked list node definitions. These are only used in this file. */ +typedef struct prof_bt_node_s prof_bt_node_t; + +struct prof_bt_node_s { + prof_bt_node_t *next; + size_t index; + prof_bt_t bt; + /* Variable size backtrace vector pointed to by bt. */ + void *vec[1]; +}; + +typedef struct prof_thr_node_s prof_thr_node_t; + +struct prof_thr_node_s { + prof_thr_node_t *next; + size_t index; + uint64_t thr_uid; + /* Variable size based on thr_name_sz. */ + char name[1]; +}; + +typedef struct prof_alloc_node_s prof_alloc_node_t; + +/* This is output when logging sampled allocations. */ +struct prof_alloc_node_s { + prof_alloc_node_t *next; + /* Indices into an array of thread data. */ + size_t alloc_thr_ind; + size_t free_thr_ind; + + /* Indices into an array of backtraces. */ + size_t alloc_bt_ind; + size_t free_bt_ind; + + uint64_t alloc_time_ns; + uint64_t free_time_ns; + + size_t usize; +}; + +/* + * Created on the first call to prof_log_start and deleted on prof_log_stop. + * These are the backtraces and threads that have already been logged by an + * allocation. + */ +static bool log_tables_initialized = false; +static ckh_t log_bt_node_set; +static ckh_t log_thr_node_set; + +/* Store linked lists for logged data. */ +static prof_bt_node_t *log_bt_first = NULL; +static prof_bt_node_t *log_bt_last = NULL; +static prof_thr_node_t *log_thr_first = NULL; +static prof_thr_node_t *log_thr_last = NULL; +static prof_alloc_node_t *log_alloc_first = NULL; +static prof_alloc_node_t *log_alloc_last = NULL; + +/* Protects the prof_logging_state and any log_{...} variable. */ +static malloc_mutex_t log_mtx; + +/******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ + +/* Hashtable functions for log_bt_node_set and log_thr_node_set. */ +static void prof_thr_node_hash(const void *key, size_t r_hash[2]); +static bool prof_thr_node_keycomp(const void *k1, const void *k2); +static void prof_bt_node_hash(const void *key, size_t r_hash[2]); +static bool prof_bt_node_keycomp(const void *k1, const void *k2); + +/******************************************************************************/ + +static size_t +prof_log_bt_index(tsd_t *tsd, prof_bt_t *bt) { + assert(prof_logging_state == prof_logging_state_started); + malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx); + + prof_bt_node_t dummy_node; + dummy_node.bt = *bt; + prof_bt_node_t *node; + + /* See if this backtrace is already cached in the table. */ + if (ckh_search(&log_bt_node_set, (void *)(&dummy_node), + (void **)(&node), NULL)) { + size_t sz = offsetof(prof_bt_node_t, vec) + + (bt->len * sizeof(void *)); + prof_bt_node_t *new_node = (prof_bt_node_t *) + iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, + true, arena_get(TSDN_NULL, 0, true), true); + if (log_bt_first == NULL) { + log_bt_first = new_node; + log_bt_last = new_node; + } else { + log_bt_last->next = new_node; + log_bt_last = new_node; + } + + new_node->next = NULL; + new_node->index = log_bt_index; + /* + * Copy the backtrace: bt is inside a tdata or gctx, which + * might die before prof_log_stop is called. + */ + new_node->bt.len = bt->len; + memcpy(new_node->vec, bt->vec, bt->len * sizeof(void *)); + new_node->bt.vec = new_node->vec; + + log_bt_index++; + ckh_insert(tsd, &log_bt_node_set, (void *)new_node, NULL); + return new_node->index; + } else { + return node->index; + } +} +static size_t +prof_log_thr_index(tsd_t *tsd, uint64_t thr_uid, const char *name) { + assert(prof_logging_state == prof_logging_state_started); + malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx); + + prof_thr_node_t dummy_node; + dummy_node.thr_uid = thr_uid; + prof_thr_node_t *node; + + /* See if this thread is already cached in the table. */ + if (ckh_search(&log_thr_node_set, (void *)(&dummy_node), + (void **)(&node), NULL)) { + size_t sz = offsetof(prof_thr_node_t, name) + strlen(name) + 1; + prof_thr_node_t *new_node = (prof_thr_node_t *) + iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, + true, arena_get(TSDN_NULL, 0, true), true); + if (log_thr_first == NULL) { + log_thr_first = new_node; + log_thr_last = new_node; + } else { + log_thr_last->next = new_node; + log_thr_last = new_node; + } + + new_node->next = NULL; + new_node->index = log_thr_index; + new_node->thr_uid = thr_uid; + strcpy(new_node->name, name); + + log_thr_index++; + ckh_insert(tsd, &log_thr_node_set, (void *)new_node, NULL); + return new_node->index; + } else { + return node->index; + } +} + +void +prof_try_log(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); + + prof_tdata_t *cons_tdata = prof_tdata_get(tsd, false); + if (cons_tdata == NULL) { + /* + * We decide not to log these allocations. cons_tdata will be + * NULL only when the current thread is in a weird state (e.g. + * it's being destroyed). + */ + return; + } + + malloc_mutex_lock(tsd_tsdn(tsd), &log_mtx); + + if (prof_logging_state != prof_logging_state_started) { + goto label_done; + } + + if (!log_tables_initialized) { + bool err1 = ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS, + prof_bt_node_hash, prof_bt_node_keycomp); + bool err2 = ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS, + prof_thr_node_hash, prof_thr_node_keycomp); + if (err1 || err2) { + goto label_done; + } + log_tables_initialized = true; + } + + nstime_t alloc_time = prof_alloc_time_get(tsd_tsdn(tsd), ptr, + (alloc_ctx_t *)NULL); + nstime_t free_time = NSTIME_ZERO_INITIALIZER; + nstime_update(&free_time); + + size_t sz = sizeof(prof_alloc_node_t); + prof_alloc_node_t *new_node = (prof_alloc_node_t *) + iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); + + const char *prod_thr_name = (tctx->tdata->thread_name == NULL)? + "" : tctx->tdata->thread_name; + const char *cons_thr_name = prof_thread_name_get(tsd); + + prof_bt_t bt; + /* Initialize the backtrace, using the buffer in tdata to store it. */ + bt_init(&bt, cons_tdata->vec); + prof_backtrace(&bt); + prof_bt_t *cons_bt = &bt; + + /* We haven't destroyed tctx yet, so gctx should be good to read. */ + prof_bt_t *prod_bt = &tctx->gctx->bt; + + new_node->next = NULL; + new_node->alloc_thr_ind = prof_log_thr_index(tsd, tctx->tdata->thr_uid, + prod_thr_name); + new_node->free_thr_ind = prof_log_thr_index(tsd, cons_tdata->thr_uid, + cons_thr_name); + new_node->alloc_bt_ind = prof_log_bt_index(tsd, prod_bt); + new_node->free_bt_ind = prof_log_bt_index(tsd, cons_bt); + new_node->alloc_time_ns = nstime_ns(&alloc_time); + new_node->free_time_ns = nstime_ns(&free_time); + new_node->usize = usize; + + if (log_alloc_first == NULL) { + log_alloc_first = new_node; + log_alloc_last = new_node; + } else { + log_alloc_last->next = new_node; + log_alloc_last = new_node; + } + +label_done: + malloc_mutex_unlock(tsd_tsdn(tsd), &log_mtx); +} + +static void +prof_bt_node_hash(const void *key, size_t r_hash[2]) { + const prof_bt_node_t *bt_node = (prof_bt_node_t *)key; + prof_bt_hash((void *)(&bt_node->bt), r_hash); +} + +static bool +prof_bt_node_keycomp(const void *k1, const void *k2) { + const prof_bt_node_t *bt_node1 = (prof_bt_node_t *)k1; + const prof_bt_node_t *bt_node2 = (prof_bt_node_t *)k2; + return prof_bt_keycomp((void *)(&bt_node1->bt), + (void *)(&bt_node2->bt)); +} + +static void +prof_thr_node_hash(const void *key, size_t r_hash[2]) { + const prof_thr_node_t *thr_node = (prof_thr_node_t *)key; + hash(&thr_node->thr_uid, sizeof(uint64_t), 0x94122f35U, r_hash); +} + +static bool +prof_thr_node_keycomp(const void *k1, const void *k2) { + const prof_thr_node_t *thr_node1 = (prof_thr_node_t *)k1; + const prof_thr_node_t *thr_node2 = (prof_thr_node_t *)k2; + return thr_node1->thr_uid == thr_node2->thr_uid; +} + +#ifdef JEMALLOC_JET +size_t +prof_log_bt_count(void) { + size_t cnt = 0; + prof_bt_node_t *node = log_bt_first; + while (node != NULL) { + cnt++; + node = node->next; + } + return cnt; +} + +size_t +prof_log_alloc_count(void) { + size_t cnt = 0; + prof_alloc_node_t *node = log_alloc_first; + while (node != NULL) { + cnt++; + node = node->next; + } + return cnt; +} + +size_t +prof_log_thr_count(void) { + size_t cnt = 0; + prof_thr_node_t *node = log_thr_first; + while (node != NULL) { + cnt++; + node = node->next; + } + return cnt; +} + +bool +prof_log_is_logging(void) { + return prof_logging_state == prof_logging_state_started; +} + +bool +prof_log_rep_check(void) { + if (prof_logging_state == prof_logging_state_stopped + && log_tables_initialized) { + return true; + } + + if (log_bt_last != NULL && log_bt_last->next != NULL) { + return true; + } + if (log_thr_last != NULL && log_thr_last->next != NULL) { + return true; + } + if (log_alloc_last != NULL && log_alloc_last->next != NULL) { + return true; + } + + size_t bt_count = prof_log_bt_count(); + size_t thr_count = prof_log_thr_count(); + size_t alloc_count = prof_log_alloc_count(); + + + if (prof_logging_state == prof_logging_state_stopped) { + if (bt_count != 0 || thr_count != 0 || alloc_count || 0) { + return true; + } + } + + prof_alloc_node_t *node = log_alloc_first; + while (node != NULL) { + if (node->alloc_bt_ind >= bt_count) { + return true; + } + if (node->free_bt_ind >= bt_count) { + return true; + } + if (node->alloc_thr_ind >= thr_count) { + return true; + } + if (node->free_thr_ind >= thr_count) { + return true; + } + if (node->alloc_time_ns > node->free_time_ns) { + return true; + } + node = node->next; + } + + return false; +} + +void +prof_log_dummy_set(bool new_value) { + prof_log_dummy = new_value; +} +#endif + +bool +prof_log_start(tsdn_t *tsdn, const char *filename) { + if (!opt_prof || !prof_booted) { + return true; + } + + bool ret = false; + size_t buf_size = PATH_MAX + 1; + + malloc_mutex_lock(tsdn, &log_mtx); + + if (prof_logging_state != prof_logging_state_stopped) { + ret = true; + } else if (filename == NULL) { + /* Make default name. */ + malloc_snprintf(log_filename, buf_size, "%s.%d.%"FMTu64".json", + opt_prof_prefix, prof_getpid(), log_seq); + log_seq++; + prof_logging_state = prof_logging_state_started; + } else if (strlen(filename) >= buf_size) { + ret = true; + } else { + strcpy(log_filename, filename); + prof_logging_state = prof_logging_state_started; + } + + if (!ret) { + nstime_update(&log_start_timestamp); + } + + malloc_mutex_unlock(tsdn, &log_mtx); + + return ret; +} + +/* Used as an atexit function to stop logging on exit. */ +static void +prof_log_stop_final(void) { + tsd_t *tsd = tsd_fetch(); + prof_log_stop(tsd_tsdn(tsd)); +} + +struct prof_emitter_cb_arg_s { + int fd; + ssize_t ret; +}; + +static void +prof_emitter_write_cb(void *opaque, const char *to_write) { + struct prof_emitter_cb_arg_s *arg = + (struct prof_emitter_cb_arg_s *)opaque; + size_t bytes = strlen(to_write); +#ifdef JEMALLOC_JET + if (prof_log_dummy) { + return; + } +#endif + arg->ret = write(arg->fd, (void *)to_write, bytes); +} + +/* + * prof_log_emit_{...} goes through the appropriate linked list, emitting each + * node to the json and deallocating it. + */ +static void +prof_log_emit_threads(tsd_t *tsd, emitter_t *emitter) { + emitter_json_array_kv_begin(emitter, "threads"); + prof_thr_node_t *thr_node = log_thr_first; + prof_thr_node_t *thr_old_node; + while (thr_node != NULL) { + emitter_json_object_begin(emitter); + + emitter_json_kv(emitter, "thr_uid", emitter_type_uint64, + &thr_node->thr_uid); + + char *thr_name = thr_node->name; + + emitter_json_kv(emitter, "thr_name", emitter_type_string, + &thr_name); + + emitter_json_object_end(emitter); + thr_old_node = thr_node; + thr_node = thr_node->next; + idalloc(tsd, thr_old_node); + } + emitter_json_array_end(emitter); +} + +static void +prof_log_emit_traces(tsd_t *tsd, emitter_t *emitter) { + emitter_json_array_kv_begin(emitter, "stack_traces"); + prof_bt_node_t *bt_node = log_bt_first; + prof_bt_node_t *bt_old_node; + /* + * Calculate how many hex digits we need: twice number of bytes, two for + * "0x", and then one more for terminating '\0'. + */ + char buf[2 * sizeof(intptr_t) + 3]; + size_t buf_sz = sizeof(buf); + while (bt_node != NULL) { + emitter_json_array_begin(emitter); + size_t i; + for (i = 0; i < bt_node->bt.len; i++) { + malloc_snprintf(buf, buf_sz, "%p", bt_node->bt.vec[i]); + char *trace_str = buf; + emitter_json_value(emitter, emitter_type_string, + &trace_str); + } + emitter_json_array_end(emitter); + + bt_old_node = bt_node; + bt_node = bt_node->next; + idalloc(tsd, bt_old_node); + } + emitter_json_array_end(emitter); +} + +static void +prof_log_emit_allocs(tsd_t *tsd, emitter_t *emitter) { + emitter_json_array_kv_begin(emitter, "allocations"); + prof_alloc_node_t *alloc_node = log_alloc_first; + prof_alloc_node_t *alloc_old_node; + while (alloc_node != NULL) { + emitter_json_object_begin(emitter); + + emitter_json_kv(emitter, "alloc_thread", emitter_type_size, + &alloc_node->alloc_thr_ind); + + emitter_json_kv(emitter, "free_thread", emitter_type_size, + &alloc_node->free_thr_ind); + + emitter_json_kv(emitter, "alloc_trace", emitter_type_size, + &alloc_node->alloc_bt_ind); + + emitter_json_kv(emitter, "free_trace", emitter_type_size, + &alloc_node->free_bt_ind); + + emitter_json_kv(emitter, "alloc_timestamp", + emitter_type_uint64, &alloc_node->alloc_time_ns); + + emitter_json_kv(emitter, "free_timestamp", emitter_type_uint64, + &alloc_node->free_time_ns); + + emitter_json_kv(emitter, "usize", emitter_type_uint64, + &alloc_node->usize); + + emitter_json_object_end(emitter); + + alloc_old_node = alloc_node; + alloc_node = alloc_node->next; + idalloc(tsd, alloc_old_node); + } + emitter_json_array_end(emitter); +} + +static void +prof_log_emit_metadata(emitter_t *emitter) { + emitter_json_object_kv_begin(emitter, "info"); + + nstime_t now = NSTIME_ZERO_INITIALIZER; + + nstime_update(&now); + uint64_t ns = nstime_ns(&now) - nstime_ns(&log_start_timestamp); + emitter_json_kv(emitter, "duration", emitter_type_uint64, &ns); + + char *vers = JEMALLOC_VERSION; + emitter_json_kv(emitter, "version", + emitter_type_string, &vers); + + emitter_json_kv(emitter, "lg_sample_rate", + emitter_type_int, &lg_prof_sample); + + int pid = prof_getpid(); + emitter_json_kv(emitter, "pid", emitter_type_int, &pid); + + emitter_json_object_end(emitter); +} + + +bool +prof_log_stop(tsdn_t *tsdn) { + if (!opt_prof || !prof_booted) { + return true; + } + + tsd_t *tsd = tsdn_tsd(tsdn); + malloc_mutex_lock(tsdn, &log_mtx); + + if (prof_logging_state != prof_logging_state_started) { + malloc_mutex_unlock(tsdn, &log_mtx); + return true; + } + + /* + * Set the state to dumping. We'll set it to stopped when we're done. + * Since other threads won't be able to start/stop/log when the state is + * dumping, we don't have to hold the lock during the whole method. + */ + prof_logging_state = prof_logging_state_dumping; + malloc_mutex_unlock(tsdn, &log_mtx); + + + emitter_t emitter; + + /* Create a file. */ + + int fd; +#ifdef JEMALLOC_JET + if (prof_log_dummy) { + fd = 0; + } else { + fd = creat(log_filename, 0644); + } +#else + fd = creat(log_filename, 0644); +#endif + + if (fd == -1) { + malloc_printf(": creat() for log file \"%s\" " + " failed with %d\n", log_filename, errno); + if (opt_abort) { + abort(); + } + return true; + } + + /* Emit to json. */ + struct prof_emitter_cb_arg_s arg; + arg.fd = fd; + emitter_init(&emitter, emitter_output_json, &prof_emitter_write_cb, + (void *)(&arg)); + + emitter_json_object_begin(&emitter); + prof_log_emit_metadata(&emitter); + prof_log_emit_threads(tsd, &emitter); + prof_log_emit_traces(tsd, &emitter); + prof_log_emit_allocs(tsd, &emitter); + emitter_json_object_end(&emitter); + + /* Reset global state. */ + if (log_tables_initialized) { + ckh_delete(tsd, &log_bt_node_set); + ckh_delete(tsd, &log_thr_node_set); + } + log_tables_initialized = false; + log_bt_index = 0; + log_thr_index = 0; + log_bt_first = NULL; + log_bt_last = NULL; + log_thr_first = NULL; + log_thr_last = NULL; + log_alloc_first = NULL; + log_alloc_last = NULL; + + malloc_mutex_lock(tsdn, &log_mtx); + prof_logging_state = prof_logging_state_stopped; + malloc_mutex_unlock(tsdn, &log_mtx); + +#ifdef JEMALLOC_JET + if (prof_log_dummy) { + return false; + } +#endif + return close(fd); +} + +bool prof_log_init(tsd_t *tsd) { + if (opt_prof_log) { + prof_log_start(tsd_tsdn(tsd), NULL); + } + + if (atexit(prof_log_stop_final) != 0) { + malloc_write(": Error in atexit() " + "for logging\n"); + if (opt_abort) { + abort(); + } + } + + if (malloc_mutex_init(&log_mtx, "prof_log", + WITNESS_RANK_PROF_LOG, malloc_mutex_rank_exclusive)) { + return true; + } + + if (ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS, + prof_bt_node_hash, prof_bt_node_keycomp)) { + return true; + } + + if (ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS, + prof_thr_node_hash, prof_thr_node_keycomp)) { + return true; + } + + log_tables_initialized = true; + return false; +} + +/******************************************************************************/ -- cgit v0.12 From 0b462407ae84a62b3c097f0e9f18df487a47d9a7 Mon Sep 17 00:00:00 2001 From: Yinan Zhang Date: Wed, 17 Jul 2019 15:52:50 -0700 Subject: Refactor profiling Refactored core profiling codebase into two logical parts: (a) `prof_data.c`: core internal data structure managing & dumping; (b) `prof.c`: mutexes & outward-facing APIs. Some internal functions had to be exposed out, but there are not that many of them if the modularization is (hopefully) clean enough. --- Makefile.in | 1 + include/jemalloc/internal/prof_externs.h | 14 + msvc/projects/vc2015/jemalloc/jemalloc.vcxproj | 1 + .../vc2015/jemalloc/jemalloc.vcxproj.filters | 3 + msvc/projects/vc2017/jemalloc/jemalloc.vcxproj | 1 + .../vc2017/jemalloc/jemalloc.vcxproj.filters | 3 + src/prof.c | 1606 ++------------------ src/prof_data.c | 1440 ++++++++++++++++++ 8 files changed, 1560 insertions(+), 1509 deletions(-) create mode 100644 src/prof_data.c diff --git a/Makefile.in b/Makefile.in index 1cd973d..40daf11 100644 --- a/Makefile.in +++ b/Makefile.in @@ -117,6 +117,7 @@ C_SRCS := $(srcroot)src/jemalloc.c \ $(srcroot)src/pages.c \ $(srcroot)src/prng.c \ $(srcroot)src/prof.c \ + $(srcroot)src/prof_data.c \ $(srcroot)src/prof_log.c \ $(srcroot)src/rtree.c \ $(srcroot)src/safety_check.c \ diff --git a/include/jemalloc/internal/prof_externs.h b/include/jemalloc/internal/prof_externs.h index e94ac3b..8fc45cf 100644 --- a/include/jemalloc/internal/prof_externs.h +++ b/include/jemalloc/internal/prof_externs.h @@ -4,6 +4,11 @@ #include "jemalloc/internal/mutex.h" extern malloc_mutex_t bt2gctx_mtx; +extern malloc_mutex_t tdatas_mtx; +extern malloc_mutex_t prof_dump_mtx; + +malloc_mutex_t *prof_gctx_mutex_choose(void); +malloc_mutex_t *prof_tdata_mutex_choose(uint64_t thr_uid); extern bool opt_prof; extern bool opt_prof_active; @@ -110,4 +115,13 @@ bool prof_log_rep_check(void); void prof_log_dummy_set(bool new_value); #endif +/* Functions in prof_data.c only accessed in prof.c */ +bool prof_data_init(tsd_t *tsd); +bool prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, + bool leakcheck); +prof_tdata_t * prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, + uint64_t thr_discrim, char *thread_name, bool active); +void prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata); +void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx); + #endif /* JEMALLOC_INTERNAL_PROF_EXTERNS_H */ diff --git a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj index d93d909..387f14b 100644 --- a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj +++ b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj @@ -58,6 +58,7 @@ + diff --git a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters index 7b09d4e..030d826 100644 --- a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters +++ b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters @@ -67,6 +67,9 @@ Source Files + + Source Files + Source Files diff --git a/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj b/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj index 28bd3cd..1606a3a 100644 --- a/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj +++ b/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj @@ -58,6 +58,7 @@ + diff --git a/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters b/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters index a66c209..622b93f 100644 --- a/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters +++ b/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters @@ -67,6 +67,9 @@ Source Files + + Source Files + Source Files diff --git a/src/prof.c b/src/prof.c index 9d1edb3..79a0ffc 100644 --- a/src/prof.c +++ b/src/prof.c @@ -3,11 +3,14 @@ #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" -#include "jemalloc/internal/ckh.h" -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/mutex.h" +/* + * This file implements the profiling "APIs" needed by other parts of jemalloc, + * and also manages the relevant "operational" data, mainly options and mutexes; + * the core profiling data structures are encapsulated in prof_data.c. + */ + /******************************************************************************/ #ifdef JEMALLOC_PROF_LIBUNWIND @@ -88,20 +91,10 @@ static atomic_u_t cum_gctxs; /* Atomic counter. */ */ static malloc_mutex_t *tdata_locks; -/* - * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data - * structure that knows about all backtraces currently captured. - */ -static ckh_t bt2gctx; /* Non static to enable profiling. */ malloc_mutex_t bt2gctx_mtx; -/* - * Tree of all extant prof_tdata_t structures, regardless of state, - * {attached,detached,expired}. - */ -static prof_tdata_tree_t tdatas; -static malloc_mutex_t tdatas_mtx; +malloc_mutex_t tdatas_mtx; static uint64_t next_thr_uid; static malloc_mutex_t next_thr_uid_mtx; @@ -112,101 +105,29 @@ static uint64_t prof_dump_iseq; static uint64_t prof_dump_mseq; static uint64_t prof_dump_useq; -/* - * This buffer is rather large for stack allocation, so use a single buffer for - * all profile dumps. - */ -static malloc_mutex_t prof_dump_mtx; -static char prof_dump_buf[ - /* Minimize memory bloat for non-prof builds. */ -#ifdef JEMALLOC_PROF - PROF_DUMP_BUFSIZE -#else - 1 -#endif -]; -static size_t prof_dump_buf_end; -static int prof_dump_fd; +malloc_mutex_t prof_dump_mtx; /* Do not dump any profiles until bootstrapping is complete. */ bool prof_booted = false; /******************************************************************************/ -/* - * Function prototypes for static functions that are referenced prior to - * definition. - */ -static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx); -static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx); -static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, - bool even_if_attached); -static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, - bool even_if_attached); -static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name); +static bool +prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) { + malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); -/******************************************************************************/ -/* Red-black trees. */ - -static int -prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) { - uint64_t a_thr_uid = a->thr_uid; - uint64_t b_thr_uid = b->thr_uid; - int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid); - if (ret == 0) { - uint64_t a_thr_discrim = a->thr_discrim; - uint64_t b_thr_discrim = b->thr_discrim; - ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim < - b_thr_discrim); - if (ret == 0) { - uint64_t a_tctx_uid = a->tctx_uid; - uint64_t b_tctx_uid = b->tctx_uid; - ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid < - b_tctx_uid); - } + if (opt_prof_accum) { + return false; } - return ret; -} - -rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t, - tctx_link, prof_tctx_comp) - -static int -prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) { - unsigned a_len = a->bt.len; - unsigned b_len = b->bt.len; - unsigned comp_len = (a_len < b_len) ? a_len : b_len; - int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *)); - if (ret == 0) { - ret = (a_len > b_len) - (a_len < b_len); + if (tctx->cnts.curobjs != 0) { + return false; } - return ret; -} - -rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link, - prof_gctx_comp) - -static int -prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) { - int ret; - uint64_t a_uid = a->thr_uid; - uint64_t b_uid = b->thr_uid; - - ret = ((a_uid > b_uid) - (a_uid < b_uid)); - if (ret == 0) { - uint64_t a_discrim = a->thr_discrim; - uint64_t b_discrim = b->thr_discrim; - - ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim)); + if (tctx->prepared) { + return false; } - return ret; + return true; } -rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link, - prof_tdata_comp) - -/******************************************************************************/ - void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) { prof_tdata_t *tdata; @@ -286,45 +207,6 @@ bt_init(prof_bt_t *bt, void **vec) { bt->len = 0; } -static void -prof_enter(tsd_t *tsd, prof_tdata_t *tdata) { - cassert(config_prof); - assert(tdata == prof_tdata_get(tsd, false)); - - if (tdata != NULL) { - assert(!tdata->enq); - tdata->enq = true; - } - - malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); -} - -static void -prof_leave(tsd_t *tsd, prof_tdata_t *tdata) { - cassert(config_prof); - assert(tdata == prof_tdata_get(tsd, false)); - - malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); - - if (tdata != NULL) { - bool idump, gdump; - - assert(tdata->enq); - tdata->enq = false; - idump = tdata->enq_idump; - tdata->enq_idump = false; - gdump = tdata->enq_gdump; - tdata->enq_gdump = false; - - if (idump) { - prof_idump(tsd_tsdn(tsd)); - } - if (gdump) { - prof_gdump(tsd_tsdn(tsd)); - } - } -} - #ifdef JEMALLOC_PROF_LIBUNWIND void prof_backtrace(prof_bt_t *bt) { @@ -547,324 +429,18 @@ prof_backtrace(prof_bt_t *bt) { } #endif -static malloc_mutex_t * +malloc_mutex_t * prof_gctx_mutex_choose(void) { unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED); return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]; } -static malloc_mutex_t * +malloc_mutex_t * prof_tdata_mutex_choose(uint64_t thr_uid) { return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS]; } -static prof_gctx_t * -prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) { - /* - * Create a single allocation that has space for vec of length bt->len. - */ - size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *)); - prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size, - sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true), - true); - if (gctx == NULL) { - return NULL; - } - gctx->lock = prof_gctx_mutex_choose(); - /* - * Set nlimbo to 1, in order to avoid a race condition with - * prof_tctx_destroy()/prof_gctx_try_destroy(). - */ - gctx->nlimbo = 1; - tctx_tree_new(&gctx->tctxs); - /* Duplicate bt. */ - memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *)); - gctx->bt.vec = gctx->vec; - gctx->bt.len = bt->len; - return gctx; -} - -static void -prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, - prof_tdata_t *tdata) { - cassert(config_prof); - - /* - * Check that gctx is still unused by any thread cache before destroying - * it. prof_lookup() increments gctx->nlimbo in order to avoid a race - * condition with this function, as does prof_tctx_destroy() in order to - * avoid a race between the main body of prof_tctx_destroy() and entry - * into this function. - */ - prof_enter(tsd, tdata_self); - malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); - assert(gctx->nlimbo != 0); - if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) { - /* Remove gctx from bt2gctx. */ - if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) { - not_reached(); - } - prof_leave(tsd, tdata_self); - /* Destroy gctx. */ - malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); - idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true); - } else { - /* - * Compensate for increment in prof_tctx_destroy() or - * prof_lookup(). - */ - gctx->nlimbo--; - malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); - prof_leave(tsd, tdata_self); - } -} - -static bool -prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) { - malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); - - if (opt_prof_accum) { - return false; - } - if (tctx->cnts.curobjs != 0) { - return false; - } - if (tctx->prepared) { - return false; - } - return true; -} - -static bool -prof_gctx_should_destroy(prof_gctx_t *gctx) { - if (opt_prof_accum) { - return false; - } - if (!tctx_tree_empty(&gctx->tctxs)) { - return false; - } - if (gctx->nlimbo != 0) { - return false; - } - return true; -} - -static void -prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) { - prof_tdata_t *tdata = tctx->tdata; - prof_gctx_t *gctx = tctx->gctx; - bool destroy_tdata, destroy_tctx, destroy_gctx; - - malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); - - assert(tctx->cnts.curobjs == 0); - assert(tctx->cnts.curbytes == 0); - assert(!opt_prof_accum); - assert(tctx->cnts.accumobjs == 0); - assert(tctx->cnts.accumbytes == 0); - - ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL); - destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false); - malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); - - malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); - switch (tctx->state) { - case prof_tctx_state_nominal: - tctx_tree_remove(&gctx->tctxs, tctx); - destroy_tctx = true; - if (prof_gctx_should_destroy(gctx)) { - /* - * Increment gctx->nlimbo in order to keep another - * thread from winning the race to destroy gctx while - * this one has gctx->lock dropped. Without this, it - * would be possible for another thread to: - * - * 1) Sample an allocation associated with gctx. - * 2) Deallocate the sampled object. - * 3) Successfully prof_gctx_try_destroy(gctx). - * - * The result would be that gctx no longer exists by the - * time this thread accesses it in - * prof_gctx_try_destroy(). - */ - gctx->nlimbo++; - destroy_gctx = true; - } else { - destroy_gctx = false; - } - break; - case prof_tctx_state_dumping: - /* - * A dumping thread needs tctx to remain valid until dumping - * has finished. Change state such that the dumping thread will - * complete destruction during a late dump iteration phase. - */ - tctx->state = prof_tctx_state_purgatory; - destroy_tctx = false; - destroy_gctx = false; - break; - default: - not_reached(); - destroy_tctx = false; - destroy_gctx = false; - } - malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); - if (destroy_gctx) { - prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx, - tdata); - } - - malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock); - - if (destroy_tdata) { - prof_tdata_destroy(tsd, tdata, false); - } - - if (destroy_tctx) { - idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true); - } -} - -static bool -prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, - void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) { - union { - prof_gctx_t *p; - void *v; - } gctx, tgctx; - union { - prof_bt_t *p; - void *v; - } btkey; - bool new_gctx; - - prof_enter(tsd, tdata); - if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { - /* bt has never been seen before. Insert it. */ - prof_leave(tsd, tdata); - tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt); - if (tgctx.v == NULL) { - return true; - } - prof_enter(tsd, tdata); - if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { - gctx.p = tgctx.p; - btkey.p = &gctx.p->bt; - if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) { - /* OOM. */ - prof_leave(tsd, tdata); - idalloctm(tsd_tsdn(tsd), gctx.v, NULL, NULL, - true, true); - return true; - } - new_gctx = true; - } else { - new_gctx = false; - } - } else { - tgctx.v = NULL; - new_gctx = false; - } - - if (!new_gctx) { - /* - * Increment nlimbo, in order to avoid a race condition with - * prof_tctx_destroy()/prof_gctx_try_destroy(). - */ - malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock); - gctx.p->nlimbo++; - malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock); - new_gctx = false; - - if (tgctx.v != NULL) { - /* Lost race to insert. */ - idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true, - true); - } - } - prof_leave(tsd, tdata); - - *p_btkey = btkey.v; - *p_gctx = gctx.p; - *p_new_gctx = new_gctx; - return false; -} - -prof_tctx_t * -prof_lookup(tsd_t *tsd, prof_bt_t *bt) { - union { - prof_tctx_t *p; - void *v; - } ret; - prof_tdata_t *tdata; - bool not_found; - - cassert(config_prof); - - tdata = prof_tdata_get(tsd, false); - if (tdata == NULL) { - return NULL; - } - - malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); - not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v); - if (!not_found) { /* Note double negative! */ - ret.p->prepared = true; - } - malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); - if (not_found) { - void *btkey; - prof_gctx_t *gctx; - bool new_gctx, error; - - /* - * This thread's cache lacks bt. Look for it in the global - * cache. - */ - if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx, - &new_gctx)) { - return NULL; - } - - /* Link a prof_tctx_t into gctx for this thread. */ - ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t), - sz_size2index(sizeof(prof_tctx_t)), false, NULL, true, - arena_ichoose(tsd, NULL), true); - if (ret.p == NULL) { - if (new_gctx) { - prof_gctx_try_destroy(tsd, tdata, gctx, tdata); - } - return NULL; - } - ret.p->tdata = tdata; - ret.p->thr_uid = tdata->thr_uid; - ret.p->thr_discrim = tdata->thr_discrim; - memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); - ret.p->gctx = gctx; - ret.p->tctx_uid = tdata->tctx_uid_next++; - ret.p->prepared = true; - ret.p->state = prof_tctx_state_initializing; - malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); - error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v); - malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); - if (error) { - if (new_gctx) { - prof_gctx_try_destroy(tsd, tdata, gctx, tdata); - } - idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true); - return NULL; - } - malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); - ret.p->state = prof_tctx_state_nominal; - tctx_tree_insert(&gctx->tctxs, ret.p); - gctx->nlimbo--; - malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); - } - - return ret.p; -} - /* * The bodies of this function and prof_leakcheck() are compiled out unless heap * profiling is enabled, so that it is possible to compile jemalloc with @@ -921,884 +497,85 @@ prof_sample_threshold_update(prof_tdata_t *tdata) { #endif } -#ifdef JEMALLOC_JET -static prof_tdata_t * -prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, - void *arg) { - size_t *tdata_count = (size_t *)arg; - - (*tdata_count)++; - - return NULL; +int +prof_getpid(void) { +#ifdef _WIN32 + return GetCurrentProcessId(); +#else + return getpid(); +#endif } -size_t -prof_tdata_count(void) { - size_t tdata_count = 0; - tsdn_t *tsdn; - - tsdn = tsdn_fetch(); - malloc_mutex_lock(tsdn, &tdatas_mtx); - tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter, - (void *)&tdata_count); - malloc_mutex_unlock(tsdn, &tdatas_mtx); +#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) +#define VSEQ_INVALID UINT64_C(0xffffffffffffffff) +static void +prof_dump_filename(char *filename, char v, uint64_t vseq) { + cassert(config_prof); - return tdata_count; + if (vseq != VSEQ_INVALID) { + /* "...v.heap" */ + malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, + "%s.%d.%"FMTu64".%c%"FMTu64".heap", + opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq); + } else { + /* "....heap" */ + malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, + "%s.%d.%"FMTu64".%c.heap", + opt_prof_prefix, prof_getpid(), prof_dump_seq, v); + } + prof_dump_seq++; } -size_t -prof_bt_count(void) { - size_t bt_count; +static void +prof_fdump(void) { tsd_t *tsd; - prof_tdata_t *tdata; - - tsd = tsd_fetch(); - tdata = prof_tdata_get(tsd, false); - if (tdata == NULL) { - return 0; - } - - malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); - bt_count = ckh_count(&bt2gctx); - malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); - - return bt_count; -} -#endif + char filename[DUMP_FILENAME_BUFSIZE]; -static int -prof_dump_open_impl(bool propagate_err, const char *filename) { - int fd; + cassert(config_prof); + assert(opt_prof_final); + assert(opt_prof_prefix[0] != '\0'); - fd = creat(filename, 0644); - if (fd == -1 && !propagate_err) { - malloc_printf(": creat(\"%s\"), 0644) failed\n", - filename); - if (opt_abort) { - abort(); - } + if (!prof_booted) { + return; } + tsd = tsd_fetch(); + assert(tsd_reentrancy_level_get(tsd) == 0); - return fd; + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); + prof_dump_filename(filename, 'f', VSEQ_INVALID); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); + prof_dump(tsd, false, filename, opt_prof_leak); } -prof_dump_open_t *JET_MUTABLE prof_dump_open = prof_dump_open_impl; - -static bool -prof_dump_flush(bool propagate_err) { - bool ret = false; - ssize_t err; +bool +prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) { cassert(config_prof); - err = malloc_write_fd(prof_dump_fd, prof_dump_buf, prof_dump_buf_end); - if (err == -1) { - if (!propagate_err) { - malloc_write(": write() failed during heap " - "profile flush\n"); - if (opt_abort) { - abort(); - } - } - ret = true; +#ifndef JEMALLOC_ATOMIC_U64 + if (malloc_mutex_init(&prof_accum->mtx, "prof_accum", + WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) { + return true; } - prof_dump_buf_end = 0; - - return ret; -} - -static bool -prof_dump_close(bool propagate_err) { - bool ret; - - assert(prof_dump_fd != -1); - ret = prof_dump_flush(propagate_err); - close(prof_dump_fd); - prof_dump_fd = -1; - - return ret; + prof_accum->accumbytes = 0; +#else + atomic_store_u64(&prof_accum->accumbytes, 0, ATOMIC_RELAXED); +#endif + return false; } -static bool -prof_dump_write(bool propagate_err, const char *s) { - size_t i, slen, n; +void +prof_idump(tsdn_t *tsdn) { + tsd_t *tsd; + prof_tdata_t *tdata; cassert(config_prof); - i = 0; - slen = strlen(s); - while (i < slen) { - /* Flush the buffer if it is full. */ - if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { - if (prof_dump_flush(propagate_err) && propagate_err) { - return true; - } - } - - if (prof_dump_buf_end + slen - i <= PROF_DUMP_BUFSIZE) { - /* Finish writing. */ - n = slen - i; - } else { - /* Write as much of s as will fit. */ - n = PROF_DUMP_BUFSIZE - prof_dump_buf_end; - } - memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n); - prof_dump_buf_end += n; - i += n; - } - - return false; -} - -JEMALLOC_FORMAT_PRINTF(2, 3) -static bool -prof_dump_printf(bool propagate_err, const char *format, ...) { - bool ret; - va_list ap; - char buf[PROF_PRINTF_BUFSIZE]; - - va_start(ap, format); - malloc_vsnprintf(buf, sizeof(buf), format, ap); - va_end(ap); - ret = prof_dump_write(propagate_err, buf); - - return ret; -} - -static void -prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) { - malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); - - malloc_mutex_lock(tsdn, tctx->gctx->lock); - - switch (tctx->state) { - case prof_tctx_state_initializing: - malloc_mutex_unlock(tsdn, tctx->gctx->lock); - return; - case prof_tctx_state_nominal: - tctx->state = prof_tctx_state_dumping; - malloc_mutex_unlock(tsdn, tctx->gctx->lock); - - memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t)); - - tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs; - tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes; - if (opt_prof_accum) { - tdata->cnt_summed.accumobjs += - tctx->dump_cnts.accumobjs; - tdata->cnt_summed.accumbytes += - tctx->dump_cnts.accumbytes; - } - break; - case prof_tctx_state_dumping: - case prof_tctx_state_purgatory: - not_reached(); - } -} - -static void -prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) { - malloc_mutex_assert_owner(tsdn, gctx->lock); - - gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs; - gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes; - if (opt_prof_accum) { - gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs; - gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes; - } -} - -static prof_tctx_t * -prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { - tsdn_t *tsdn = (tsdn_t *)arg; - - malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); - - switch (tctx->state) { - case prof_tctx_state_nominal: - /* New since dumping started; ignore. */ - break; - case prof_tctx_state_dumping: - case prof_tctx_state_purgatory: - prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx); - break; - default: - not_reached(); - } - - return NULL; -} - -struct prof_tctx_dump_iter_arg_s { - tsdn_t *tsdn; - bool propagate_err; -}; - -static prof_tctx_t * -prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) { - struct prof_tctx_dump_iter_arg_s *arg = - (struct prof_tctx_dump_iter_arg_s *)opaque; - - malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock); - - switch (tctx->state) { - case prof_tctx_state_initializing: - case prof_tctx_state_nominal: - /* Not captured by this dump. */ - break; - case prof_tctx_state_dumping: - case prof_tctx_state_purgatory: - if (prof_dump_printf(arg->propagate_err, - " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": " - "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs, - tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs, - tctx->dump_cnts.accumbytes)) { - return tctx; - } - break; - default: - not_reached(); - } - return NULL; -} - -static prof_tctx_t * -prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { - tsdn_t *tsdn = (tsdn_t *)arg; - prof_tctx_t *ret; - - malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); - - switch (tctx->state) { - case prof_tctx_state_nominal: - /* New since dumping started; ignore. */ - break; - case prof_tctx_state_dumping: - tctx->state = prof_tctx_state_nominal; - break; - case prof_tctx_state_purgatory: - ret = tctx; - goto label_return; - default: - not_reached(); - } - - ret = NULL; -label_return: - return ret; -} - -static void -prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) { - cassert(config_prof); - - malloc_mutex_lock(tsdn, gctx->lock); - - /* - * Increment nlimbo so that gctx won't go away before dump. - * Additionally, link gctx into the dump list so that it is included in - * prof_dump()'s second pass. - */ - gctx->nlimbo++; - gctx_tree_insert(gctxs, gctx); - - memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t)); - - malloc_mutex_unlock(tsdn, gctx->lock); -} - -struct prof_gctx_merge_iter_arg_s { - tsdn_t *tsdn; - size_t leak_ngctx; -}; - -static prof_gctx_t * -prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { - struct prof_gctx_merge_iter_arg_s *arg = - (struct prof_gctx_merge_iter_arg_s *)opaque; - - malloc_mutex_lock(arg->tsdn, gctx->lock); - tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, - (void *)arg->tsdn); - if (gctx->cnt_summed.curobjs != 0) { - arg->leak_ngctx++; - } - malloc_mutex_unlock(arg->tsdn, gctx->lock); - - return NULL; -} - -static void -prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) { - prof_tdata_t *tdata = prof_tdata_get(tsd, false); - prof_gctx_t *gctx; - - /* - * Standard tree iteration won't work here, because as soon as we - * decrement gctx->nlimbo and unlock gctx, another thread can - * concurrently destroy it, which will corrupt the tree. Therefore, - * tear down the tree one node at a time during iteration. - */ - while ((gctx = gctx_tree_first(gctxs)) != NULL) { - gctx_tree_remove(gctxs, gctx); - malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); - { - prof_tctx_t *next; - - next = NULL; - do { - prof_tctx_t *to_destroy = - tctx_tree_iter(&gctx->tctxs, next, - prof_tctx_finish_iter, - (void *)tsd_tsdn(tsd)); - if (to_destroy != NULL) { - next = tctx_tree_next(&gctx->tctxs, - to_destroy); - tctx_tree_remove(&gctx->tctxs, - to_destroy); - idalloctm(tsd_tsdn(tsd), to_destroy, - NULL, NULL, true, true); - } else { - next = NULL; - } - } while (next != NULL); - } - gctx->nlimbo--; - if (prof_gctx_should_destroy(gctx)) { - gctx->nlimbo++; - malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); - prof_gctx_try_destroy(tsd, tdata, gctx, tdata); - } else { - malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); - } - } -} - -struct prof_tdata_merge_iter_arg_s { - tsdn_t *tsdn; - prof_cnt_t cnt_all; -}; - -static prof_tdata_t * -prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, - void *opaque) { - struct prof_tdata_merge_iter_arg_s *arg = - (struct prof_tdata_merge_iter_arg_s *)opaque; - - malloc_mutex_lock(arg->tsdn, tdata->lock); - if (!tdata->expired) { - size_t tabind; - union { - prof_tctx_t *p; - void *v; - } tctx; - - tdata->dumping = true; - memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t)); - for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL, - &tctx.v);) { - prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata); - } - - arg->cnt_all.curobjs += tdata->cnt_summed.curobjs; - arg->cnt_all.curbytes += tdata->cnt_summed.curbytes; - if (opt_prof_accum) { - arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs; - arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes; - } - } else { - tdata->dumping = false; - } - malloc_mutex_unlock(arg->tsdn, tdata->lock); - - return NULL; -} - -static prof_tdata_t * -prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, - void *arg) { - bool propagate_err = *(bool *)arg; - - if (!tdata->dumping) { - return NULL; - } - - if (prof_dump_printf(propagate_err, - " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n", - tdata->thr_uid, tdata->cnt_summed.curobjs, - tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs, - tdata->cnt_summed.accumbytes, - (tdata->thread_name != NULL) ? " " : "", - (tdata->thread_name != NULL) ? tdata->thread_name : "")) { - return tdata; - } - return NULL; -} - -static bool -prof_dump_header_impl(tsdn_t *tsdn, bool propagate_err, - const prof_cnt_t *cnt_all) { - bool ret; - - if (prof_dump_printf(propagate_err, - "heap_v2/%"FMTu64"\n" - " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", - ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs, - cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) { - return true; - } - - malloc_mutex_lock(tsdn, &tdatas_mtx); - ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter, - (void *)&propagate_err) != NULL); - malloc_mutex_unlock(tsdn, &tdatas_mtx); - return ret; -} -prof_dump_header_t *JET_MUTABLE prof_dump_header = prof_dump_header_impl; - -static bool -prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx, - const prof_bt_t *bt, prof_gctx_tree_t *gctxs) { - bool ret; - unsigned i; - struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg; - - cassert(config_prof); - malloc_mutex_assert_owner(tsdn, gctx->lock); - - /* Avoid dumping such gctx's that have no useful data. */ - if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) || - (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) { - assert(gctx->cnt_summed.curobjs == 0); - assert(gctx->cnt_summed.curbytes == 0); - assert(gctx->cnt_summed.accumobjs == 0); - assert(gctx->cnt_summed.accumbytes == 0); - ret = false; - goto label_return; - } - - if (prof_dump_printf(propagate_err, "@")) { - ret = true; - goto label_return; - } - for (i = 0; i < bt->len; i++) { - if (prof_dump_printf(propagate_err, " %#"FMTxPTR, - (uintptr_t)bt->vec[i])) { - ret = true; - goto label_return; - } - } - - if (prof_dump_printf(propagate_err, - "\n" - " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", - gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes, - gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) { - ret = true; - goto label_return; - } - - prof_tctx_dump_iter_arg.tsdn = tsdn; - prof_tctx_dump_iter_arg.propagate_err = propagate_err; - if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, - (void *)&prof_tctx_dump_iter_arg) != NULL) { - ret = true; - goto label_return; - } - - ret = false; -label_return: - return ret; -} - -#ifndef _WIN32 -JEMALLOC_FORMAT_PRINTF(1, 2) -static int -prof_open_maps(const char *format, ...) { - int mfd; - va_list ap; - char filename[PATH_MAX + 1]; - - va_start(ap, format); - malloc_vsnprintf(filename, sizeof(filename), format, ap); - va_end(ap); - -#if defined(O_CLOEXEC) - mfd = open(filename, O_RDONLY | O_CLOEXEC); -#else - mfd = open(filename, O_RDONLY); - if (mfd != -1) { - fcntl(mfd, F_SETFD, fcntl(mfd, F_GETFD) | FD_CLOEXEC); - } -#endif - - return mfd; -} -#endif - -int -prof_getpid(void) { -#ifdef _WIN32 - return GetCurrentProcessId(); -#else - return getpid(); -#endif -} - -static bool -prof_dump_maps(bool propagate_err) { - bool ret; - int mfd; - - cassert(config_prof); -#ifdef __FreeBSD__ - mfd = prof_open_maps("/proc/curproc/map"); -#elif defined(_WIN32) - mfd = -1; // Not implemented -#else - { - int pid = prof_getpid(); - - mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid); - if (mfd == -1) { - mfd = prof_open_maps("/proc/%d/maps", pid); - } - } -#endif - if (mfd != -1) { - ssize_t nread; - - if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") && - propagate_err) { - ret = true; - goto label_return; - } - nread = 0; - do { - prof_dump_buf_end += nread; - if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { - /* Make space in prof_dump_buf before read(). */ - if (prof_dump_flush(propagate_err) && - propagate_err) { - ret = true; - goto label_return; - } - } - nread = malloc_read_fd(mfd, - &prof_dump_buf[prof_dump_buf_end], PROF_DUMP_BUFSIZE - - prof_dump_buf_end); - } while (nread > 0); - } else { - ret = true; - goto label_return; - } - - ret = false; -label_return: - if (mfd != -1) { - close(mfd); - } - return ret; -} - -/* - * See prof_sample_threshold_update() comment for why the body of this function - * is conditionally compiled. - */ -static void -prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx, - const char *filename) { -#ifdef JEMALLOC_PROF - /* - * Scaling is equivalent AdjustSamples() in jeprof, but the result may - * differ slightly from what jeprof reports, because here we scale the - * summary values, whereas jeprof scales each context individually and - * reports the sums of the scaled values. - */ - if (cnt_all->curbytes != 0) { - double sample_period = (double)((uint64_t)1 << lg_prof_sample); - double ratio = (((double)cnt_all->curbytes) / - (double)cnt_all->curobjs) / sample_period; - double scale_factor = 1.0 / (1.0 - exp(-ratio)); - uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes) - * scale_factor); - uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) * - scale_factor); - - malloc_printf(": Leak approximation summary: ~%"FMTu64 - " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n", - curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs != - 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : ""); - malloc_printf( - ": Run jeprof on \"%s\" for leak detail\n", - filename); - } -#endif -} - -struct prof_gctx_dump_iter_arg_s { - tsdn_t *tsdn; - bool propagate_err; -}; - -static prof_gctx_t * -prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { - prof_gctx_t *ret; - struct prof_gctx_dump_iter_arg_s *arg = - (struct prof_gctx_dump_iter_arg_s *)opaque; - - malloc_mutex_lock(arg->tsdn, gctx->lock); - - if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt, - gctxs)) { - ret = gctx; - goto label_return; - } - - ret = NULL; -label_return: - malloc_mutex_unlock(arg->tsdn, gctx->lock); - return ret; -} - -static void -prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata, - struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg, - struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, - prof_gctx_tree_t *gctxs) { - size_t tabind; - union { - prof_gctx_t *p; - void *v; - } gctx; - - prof_enter(tsd, tdata); - - /* - * Put gctx's in limbo and clear their counters in preparation for - * summing. - */ - gctx_tree_new(gctxs); - for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) { - prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs); - } - - /* - * Iterate over tdatas, and for the non-expired ones snapshot their tctx - * stats and merge them into the associated gctx's. - */ - prof_tdata_merge_iter_arg->tsdn = tsd_tsdn(tsd); - memset(&prof_tdata_merge_iter_arg->cnt_all, 0, sizeof(prof_cnt_t)); - malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); - tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, - (void *)prof_tdata_merge_iter_arg); - malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); - - /* Merge tctx stats into gctx's. */ - prof_gctx_merge_iter_arg->tsdn = tsd_tsdn(tsd); - prof_gctx_merge_iter_arg->leak_ngctx = 0; - gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter, - (void *)prof_gctx_merge_iter_arg); - - prof_leave(tsd, tdata); -} - -static bool -prof_dump_file(tsd_t *tsd, bool propagate_err, const char *filename, - bool leakcheck, prof_tdata_t *tdata, - struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg, - struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, - struct prof_gctx_dump_iter_arg_s *prof_gctx_dump_iter_arg, - prof_gctx_tree_t *gctxs) { - /* Create dump file. */ - if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) { - return true; - } - - /* Dump profile header. */ - if (prof_dump_header(tsd_tsdn(tsd), propagate_err, - &prof_tdata_merge_iter_arg->cnt_all)) { - goto label_write_error; - } - - /* Dump per gctx profile stats. */ - prof_gctx_dump_iter_arg->tsdn = tsd_tsdn(tsd); - prof_gctx_dump_iter_arg->propagate_err = propagate_err; - if (gctx_tree_iter(gctxs, NULL, prof_gctx_dump_iter, - (void *)prof_gctx_dump_iter_arg) != NULL) { - goto label_write_error; - } - - /* Dump /proc//maps if possible. */ - if (prof_dump_maps(propagate_err)) { - goto label_write_error; - } - - if (prof_dump_close(propagate_err)) { - return true; - } - - return false; -label_write_error: - prof_dump_close(propagate_err); - return true; -} - -static bool -prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, - bool leakcheck) { - cassert(config_prof); - assert(tsd_reentrancy_level_get(tsd) == 0); - - prof_tdata_t * tdata = prof_tdata_get(tsd, true); - if (tdata == NULL) { - return true; - } - - pre_reentrancy(tsd, NULL); - malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); - - prof_gctx_tree_t gctxs; - struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; - struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; - struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg; - prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg, - &prof_gctx_merge_iter_arg, &gctxs); - bool err = prof_dump_file(tsd, propagate_err, filename, leakcheck, tdata, - &prof_tdata_merge_iter_arg, &prof_gctx_merge_iter_arg, - &prof_gctx_dump_iter_arg, &gctxs); - prof_gctx_finish(tsd, &gctxs); - - malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); - post_reentrancy(tsd); - - if (err) { - return true; - } - - if (leakcheck) { - prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all, - prof_gctx_merge_iter_arg.leak_ngctx, filename); - } - return false; -} - -#ifdef JEMALLOC_JET -void -prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs, - uint64_t *accumbytes) { - tsd_t *tsd; - prof_tdata_t *tdata; - struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; - struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; - prof_gctx_tree_t gctxs; - - tsd = tsd_fetch(); - tdata = prof_tdata_get(tsd, false); - if (tdata == NULL) { - if (curobjs != NULL) { - *curobjs = 0; - } - if (curbytes != NULL) { - *curbytes = 0; - } - if (accumobjs != NULL) { - *accumobjs = 0; - } - if (accumbytes != NULL) { - *accumbytes = 0; - } - return; - } - - prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg, - &prof_gctx_merge_iter_arg, &gctxs); - prof_gctx_finish(tsd, &gctxs); - - if (curobjs != NULL) { - *curobjs = prof_tdata_merge_iter_arg.cnt_all.curobjs; - } - if (curbytes != NULL) { - *curbytes = prof_tdata_merge_iter_arg.cnt_all.curbytes; - } - if (accumobjs != NULL) { - *accumobjs = prof_tdata_merge_iter_arg.cnt_all.accumobjs; - } - if (accumbytes != NULL) { - *accumbytes = prof_tdata_merge_iter_arg.cnt_all.accumbytes; - } -} -#endif - -#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) -#define VSEQ_INVALID UINT64_C(0xffffffffffffffff) -static void -prof_dump_filename(char *filename, char v, uint64_t vseq) { - cassert(config_prof); - - if (vseq != VSEQ_INVALID) { - /* "...v.heap" */ - malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, - "%s.%d.%"FMTu64".%c%"FMTu64".heap", - opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq); - } else { - /* "....heap" */ - malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, - "%s.%d.%"FMTu64".%c.heap", - opt_prof_prefix, prof_getpid(), prof_dump_seq, v); - } - prof_dump_seq++; -} - -static void -prof_fdump(void) { - tsd_t *tsd; - char filename[DUMP_FILENAME_BUFSIZE]; - - cassert(config_prof); - assert(opt_prof_final); - assert(opt_prof_prefix[0] != '\0'); - - if (!prof_booted) { - return; - } - tsd = tsd_fetch(); - assert(tsd_reentrancy_level_get(tsd) == 0); - - malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); - prof_dump_filename(filename, 'f', VSEQ_INVALID); - malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); - prof_dump(tsd, false, filename, opt_prof_leak); -} - -bool -prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) { - cassert(config_prof); - -#ifndef JEMALLOC_ATOMIC_U64 - if (malloc_mutex_init(&prof_accum->mtx, "prof_accum", - WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) { - return true; - } - prof_accum->accumbytes = 0; -#else - atomic_store_u64(&prof_accum->accumbytes, 0, ATOMIC_RELAXED); -#endif - return false; -} - -void -prof_idump(tsdn_t *tsdn) { - tsd_t *tsd; - prof_tdata_t *tdata; - - cassert(config_prof); - - if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) { - return; - } - tsd = tsdn_tsd(tsdn); - if (tsd_reentrancy_level_get(tsd) > 0) { - return; + if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) { + return; + } + tsd = tsdn_tsd(tsdn); + if (tsd_reentrancy_level_get(tsd) > 0) { + return; } tdata = prof_tdata_get(tsd, false); @@ -1877,28 +654,6 @@ prof_gdump(tsdn_t *tsdn) { } } -void -prof_bt_hash(const void *key, size_t r_hash[2]) { - prof_bt_t *bt = (prof_bt_t *)key; - - cassert(config_prof); - - hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash); -} - -bool -prof_bt_keycomp(const void *k1, const void *k2) { - const prof_bt_t *bt1 = (prof_bt_t *)k1; - const prof_bt_t *bt2 = (prof_bt_t *)k2; - - cassert(config_prof); - - if (bt1->len != bt2->len) { - return false; - } - return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); -} - static uint64_t prof_thr_uid_alloc(tsdn_t *tsdn) { uint64_t thr_uid; @@ -1911,124 +666,33 @@ prof_thr_uid_alloc(tsdn_t *tsdn) { return thr_uid; } -static prof_tdata_t * -prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, - char *thread_name, bool active) { - prof_tdata_t *tdata; - - cassert(config_prof); - - /* Initialize an empty cache for this thread. */ - tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t), - sz_size2index(sizeof(prof_tdata_t)), false, NULL, true, - arena_get(TSDN_NULL, 0, true), true); - if (tdata == NULL) { - return NULL; - } - - tdata->lock = prof_tdata_mutex_choose(thr_uid); - tdata->thr_uid = thr_uid; - tdata->thr_discrim = thr_discrim; - tdata->thread_name = thread_name; - tdata->attached = true; - tdata->expired = false; - tdata->tctx_uid_next = 0; - - if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash, - prof_bt_keycomp)) { - idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); - return NULL; - } - - tdata->prng_state = (uint64_t)(uintptr_t)tdata; - prof_sample_threshold_update(tdata); - - tdata->enq = false; - tdata->enq_idump = false; - tdata->enq_gdump = false; - - tdata->dumping = false; - tdata->active = active; - - malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); - tdata_tree_insert(&tdatas, tdata); - malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); - - return tdata; -} - prof_tdata_t * prof_tdata_init(tsd_t *tsd) { return prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0, NULL, prof_thread_active_init_get(tsd_tsdn(tsd))); } -static bool -prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) { - if (tdata->attached && !even_if_attached) { - return false; - } - if (ckh_count(&tdata->bt2tctx) != 0) { - return false; - } - return true; -} - -static bool -prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, - bool even_if_attached) { - malloc_mutex_assert_owner(tsdn, tdata->lock); - - return prof_tdata_should_destroy_unlocked(tdata, even_if_attached); -} - -static void -prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, - bool even_if_attached) { - malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx); - - tdata_tree_remove(&tdatas, tdata); - - assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); +static char * +prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) { + char *ret; + size_t size; - if (tdata->thread_name != NULL) { - idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true, - true); + if (thread_name == NULL) { + return NULL; } - ckh_delete(tsd, &tdata->bt2tctx); - idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); -} - -static void -prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) { - malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); - prof_tdata_destroy_locked(tsd, tdata, even_if_attached); - malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); -} -static void -prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) { - bool destroy_tdata; - - malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); - if (tdata->attached) { - destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, - true); - /* - * Only detach if !destroy_tdata, because detaching would allow - * another thread to win the race to destroy tdata. - */ - if (!destroy_tdata) { - tdata->attached = false; - } - tsd_prof_tdata_set(tsd, NULL); - } else { - destroy_tdata = false; + size = strlen(thread_name) + 1; + if (size == 1) { + return ""; } - malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); - if (destroy_tdata) { - prof_tdata_destroy(tsd, tdata, true); + + ret = iallocztm(tsdn, size, sz_size2index(size), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); + if (ret == NULL) { + return NULL; } + memcpy(ret, thread_name, size); + return ret; } prof_tdata_t * @@ -2044,58 +708,6 @@ prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) { active); } -static bool -prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) { - bool destroy_tdata; - - malloc_mutex_lock(tsdn, tdata->lock); - if (!tdata->expired) { - tdata->expired = true; - destroy_tdata = tdata->attached ? false : - prof_tdata_should_destroy(tsdn, tdata, false); - } else { - destroy_tdata = false; - } - malloc_mutex_unlock(tsdn, tdata->lock); - - return destroy_tdata; -} - -static prof_tdata_t * -prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, - void *arg) { - tsdn_t *tsdn = (tsdn_t *)arg; - - return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL); -} - -void -prof_reset(tsd_t *tsd, size_t lg_sample) { - prof_tdata_t *next; - - assert(lg_sample < (sizeof(uint64_t) << 3)); - - malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); - malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); - - lg_prof_sample = lg_sample; - - next = NULL; - do { - prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next, - prof_tdata_reset_iter, (void *)tsd); - if (to_destroy != NULL) { - next = tdata_tree_next(&tdatas, to_destroy); - prof_tdata_destroy_locked(tsd, to_destroy, false); - } else { - next = NULL; - } - } while (next != NULL); - - malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); - malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); -} - void prof_tdata_cleanup(tsd_t *tsd) { prof_tdata_t *tdata; @@ -2142,29 +754,6 @@ prof_thread_name_get(tsd_t *tsd) { return (tdata->thread_name != NULL ? tdata->thread_name : ""); } -static char * -prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) { - char *ret; - size_t size; - - if (thread_name == NULL) { - return NULL; - } - - size = strlen(thread_name) + 1; - if (size == 1) { - return ""; - } - - ret = iallocztm(tsdn, size, sz_size2index(size), false, NULL, true, - arena_get(TSDN_NULL, 0, true), true); - if (ret == NULL) { - return NULL; - } - memcpy(ret, thread_name, size); - return ret; -} - int prof_thread_name_set(tsd_t *tsd, const char *thread_name) { prof_tdata_t *tdata; @@ -2329,16 +918,15 @@ prof_boot2(tsd_t *tsd) { return true; } - if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash, - prof_bt_keycomp)) { + if (prof_data_init(tsd)) { return true; } + if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx", WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) { return true; } - tdata_tree_new(&tdatas); if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas", WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) { return true; diff --git a/src/prof_data.c b/src/prof_data.c new file mode 100644 index 0000000..a4cb749 --- /dev/null +++ b/src/prof_data.c @@ -0,0 +1,1440 @@ +#define JEMALLOC_PROF_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/malloc_io.h" + +/* + * This file defines and manages the core profiling data structures. + * + * Conceptually, profiling data can be imagined as a table with three columns: + * thread, stack trace, and current allocation size. (When prof_accum is on, + * there's one additional column which is the cumulative allocation size.) + * + * Implementation wise, each thread maintains a hash recording the stack trace + * to allocation size correspondences, which are basically the individual rows + * in the table. In addition, two global "indices" are built to make data + * aggregation efficient (for dumping): bt2gctx and tdatas, which are basically + * the "grouped by stack trace" and "grouped by thread" views of the same table, + * respectively. Note that the allocation size is only aggregated to the two + * indices at dumping time, so as to optimize for performance. + */ + +/******************************************************************************/ + +/* + * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data + * structure that knows about all backtraces currently captured. + */ +static ckh_t bt2gctx; + +/* + * Tree of all extant prof_tdata_t structures, regardless of state, + * {attached,detached,expired}. + */ +static prof_tdata_tree_t tdatas; + +/* + * This buffer is rather large for stack allocation, so use a single buffer for + * all profile dumps. + */ +static char prof_dump_buf[ + /* Minimize memory bloat for non-prof builds. */ +#ifdef JEMALLOC_PROF + PROF_DUMP_BUFSIZE +#else + 1 +#endif +]; +static size_t prof_dump_buf_end; +static int prof_dump_fd; + +/******************************************************************************/ +/* Red-black trees. */ + +static int +prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) { + uint64_t a_thr_uid = a->thr_uid; + uint64_t b_thr_uid = b->thr_uid; + int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid); + if (ret == 0) { + uint64_t a_thr_discrim = a->thr_discrim; + uint64_t b_thr_discrim = b->thr_discrim; + ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim < + b_thr_discrim); + if (ret == 0) { + uint64_t a_tctx_uid = a->tctx_uid; + uint64_t b_tctx_uid = b->tctx_uid; + ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid < + b_tctx_uid); + } + } + return ret; +} + +rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t, + tctx_link, prof_tctx_comp) + +static int +prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) { + unsigned a_len = a->bt.len; + unsigned b_len = b->bt.len; + unsigned comp_len = (a_len < b_len) ? a_len : b_len; + int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *)); + if (ret == 0) { + ret = (a_len > b_len) - (a_len < b_len); + } + return ret; +} + +rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link, + prof_gctx_comp) + +static int +prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) { + int ret; + uint64_t a_uid = a->thr_uid; + uint64_t b_uid = b->thr_uid; + + ret = ((a_uid > b_uid) - (a_uid < b_uid)); + if (ret == 0) { + uint64_t a_discrim = a->thr_discrim; + uint64_t b_discrim = b->thr_discrim; + + ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim)); + } + return ret; +} + +rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link, + prof_tdata_comp) + +/******************************************************************************/ + +bool +prof_data_init(tsd_t *tsd) { + tdata_tree_new(&tdatas); + return ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, + prof_bt_hash, prof_bt_keycomp); +} + +static void +prof_enter(tsd_t *tsd, prof_tdata_t *tdata) { + cassert(config_prof); + assert(tdata == prof_tdata_get(tsd, false)); + + if (tdata != NULL) { + assert(!tdata->enq); + tdata->enq = true; + } + + malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); +} + +static void +prof_leave(tsd_t *tsd, prof_tdata_t *tdata) { + cassert(config_prof); + assert(tdata == prof_tdata_get(tsd, false)); + + malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); + + if (tdata != NULL) { + bool idump, gdump; + + assert(tdata->enq); + tdata->enq = false; + idump = tdata->enq_idump; + tdata->enq_idump = false; + gdump = tdata->enq_gdump; + tdata->enq_gdump = false; + + if (idump) { + prof_idump(tsd_tsdn(tsd)); + } + if (gdump) { + prof_gdump(tsd_tsdn(tsd)); + } + } +} + +static prof_gctx_t * +prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) { + /* + * Create a single allocation that has space for vec of length bt->len. + */ + size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *)); + prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size, + sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true), + true); + if (gctx == NULL) { + return NULL; + } + gctx->lock = prof_gctx_mutex_choose(); + /* + * Set nlimbo to 1, in order to avoid a race condition with + * prof_tctx_destroy()/prof_gctx_try_destroy(). + */ + gctx->nlimbo = 1; + tctx_tree_new(&gctx->tctxs); + /* Duplicate bt. */ + memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *)); + gctx->bt.vec = gctx->vec; + gctx->bt.len = bt->len; + return gctx; +} + +static void +prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, + prof_tdata_t *tdata) { + cassert(config_prof); + + /* + * Check that gctx is still unused by any thread cache before destroying + * it. prof_lookup() increments gctx->nlimbo in order to avoid a race + * condition with this function, as does prof_tctx_destroy() in order to + * avoid a race between the main body of prof_tctx_destroy() and entry + * into this function. + */ + prof_enter(tsd, tdata_self); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); + assert(gctx->nlimbo != 0); + if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) { + /* Remove gctx from bt2gctx. */ + if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) { + not_reached(); + } + prof_leave(tsd, tdata_self); + /* Destroy gctx. */ + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true); + } else { + /* + * Compensate for increment in prof_tctx_destroy() or + * prof_lookup(). + */ + gctx->nlimbo--; + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + prof_leave(tsd, tdata_self); + } +} + +static bool +prof_gctx_should_destroy(prof_gctx_t *gctx) { + if (opt_prof_accum) { + return false; + } + if (!tctx_tree_empty(&gctx->tctxs)) { + return false; + } + if (gctx->nlimbo != 0) { + return false; + } + return true; +} + +static bool +prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, + void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) { + union { + prof_gctx_t *p; + void *v; + } gctx, tgctx; + union { + prof_bt_t *p; + void *v; + } btkey; + bool new_gctx; + + prof_enter(tsd, tdata); + if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { + /* bt has never been seen before. Insert it. */ + prof_leave(tsd, tdata); + tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt); + if (tgctx.v == NULL) { + return true; + } + prof_enter(tsd, tdata); + if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { + gctx.p = tgctx.p; + btkey.p = &gctx.p->bt; + if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) { + /* OOM. */ + prof_leave(tsd, tdata); + idalloctm(tsd_tsdn(tsd), gctx.v, NULL, NULL, + true, true); + return true; + } + new_gctx = true; + } else { + new_gctx = false; + } + } else { + tgctx.v = NULL; + new_gctx = false; + } + + if (!new_gctx) { + /* + * Increment nlimbo, in order to avoid a race condition with + * prof_tctx_destroy()/prof_gctx_try_destroy(). + */ + malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock); + gctx.p->nlimbo++; + malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock); + new_gctx = false; + + if (tgctx.v != NULL) { + /* Lost race to insert. */ + idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true, + true); + } + } + prof_leave(tsd, tdata); + + *p_btkey = btkey.v; + *p_gctx = gctx.p; + *p_new_gctx = new_gctx; + return false; +} + +prof_tctx_t * +prof_lookup(tsd_t *tsd, prof_bt_t *bt) { + union { + prof_tctx_t *p; + void *v; + } ret; + prof_tdata_t *tdata; + bool not_found; + + cassert(config_prof); + + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) { + return NULL; + } + + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); + not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v); + if (!not_found) { /* Note double negative! */ + ret.p->prepared = true; + } + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + if (not_found) { + void *btkey; + prof_gctx_t *gctx; + bool new_gctx, error; + + /* + * This thread's cache lacks bt. Look for it in the global + * cache. + */ + if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx, + &new_gctx)) { + return NULL; + } + + /* Link a prof_tctx_t into gctx for this thread. */ + ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t), + sz_size2index(sizeof(prof_tctx_t)), false, NULL, true, + arena_ichoose(tsd, NULL), true); + if (ret.p == NULL) { + if (new_gctx) { + prof_gctx_try_destroy(tsd, tdata, gctx, tdata); + } + return NULL; + } + ret.p->tdata = tdata; + ret.p->thr_uid = tdata->thr_uid; + ret.p->thr_discrim = tdata->thr_discrim; + memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); + ret.p->gctx = gctx; + ret.p->tctx_uid = tdata->tctx_uid_next++; + ret.p->prepared = true; + ret.p->state = prof_tctx_state_initializing; + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); + error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + if (error) { + if (new_gctx) { + prof_gctx_try_destroy(tsd, tdata, gctx, tdata); + } + idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true); + return NULL; + } + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); + ret.p->state = prof_tctx_state_nominal; + tctx_tree_insert(&gctx->tctxs, ret.p); + gctx->nlimbo--; + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + } + + return ret.p; +} + +#ifdef JEMALLOC_JET +static prof_tdata_t * +prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *arg) { + size_t *tdata_count = (size_t *)arg; + + (*tdata_count)++; + + return NULL; +} + +size_t +prof_tdata_count(void) { + size_t tdata_count = 0; + tsdn_t *tsdn; + + tsdn = tsdn_fetch(); + malloc_mutex_lock(tsdn, &tdatas_mtx); + tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter, + (void *)&tdata_count); + malloc_mutex_unlock(tsdn, &tdatas_mtx); + + return tdata_count; +} + +size_t +prof_bt_count(void) { + size_t bt_count; + tsd_t *tsd; + prof_tdata_t *tdata; + + tsd = tsd_fetch(); + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) { + return 0; + } + + malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); + bt_count = ckh_count(&bt2gctx); + malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); + + return bt_count; +} +#endif + +static int +prof_dump_open_impl(bool propagate_err, const char *filename) { + int fd; + + fd = creat(filename, 0644); + if (fd == -1 && !propagate_err) { + malloc_printf(": creat(\"%s\"), 0644) failed\n", + filename); + if (opt_abort) { + abort(); + } + } + + return fd; +} +prof_dump_open_t *JET_MUTABLE prof_dump_open = prof_dump_open_impl; + +static bool +prof_dump_flush(bool propagate_err) { + bool ret = false; + ssize_t err; + + cassert(config_prof); + + err = malloc_write_fd(prof_dump_fd, prof_dump_buf, prof_dump_buf_end); + if (err == -1) { + if (!propagate_err) { + malloc_write(": write() failed during heap " + "profile flush\n"); + if (opt_abort) { + abort(); + } + } + ret = true; + } + prof_dump_buf_end = 0; + + return ret; +} + +static bool +prof_dump_close(bool propagate_err) { + bool ret; + + assert(prof_dump_fd != -1); + ret = prof_dump_flush(propagate_err); + close(prof_dump_fd); + prof_dump_fd = -1; + + return ret; +} + +static bool +prof_dump_write(bool propagate_err, const char *s) { + size_t i, slen, n; + + cassert(config_prof); + + i = 0; + slen = strlen(s); + while (i < slen) { + /* Flush the buffer if it is full. */ + if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { + if (prof_dump_flush(propagate_err) && propagate_err) { + return true; + } + } + + if (prof_dump_buf_end + slen - i <= PROF_DUMP_BUFSIZE) { + /* Finish writing. */ + n = slen - i; + } else { + /* Write as much of s as will fit. */ + n = PROF_DUMP_BUFSIZE - prof_dump_buf_end; + } + memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n); + prof_dump_buf_end += n; + i += n; + } + + return false; +} + +JEMALLOC_FORMAT_PRINTF(2, 3) +static bool +prof_dump_printf(bool propagate_err, const char *format, ...) { + bool ret; + va_list ap; + char buf[PROF_PRINTF_BUFSIZE]; + + va_start(ap, format); + malloc_vsnprintf(buf, sizeof(buf), format, ap); + va_end(ap); + ret = prof_dump_write(propagate_err, buf); + + return ret; +} + +static void +prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) { + malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); + + malloc_mutex_lock(tsdn, tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_initializing: + malloc_mutex_unlock(tsdn, tctx->gctx->lock); + return; + case prof_tctx_state_nominal: + tctx->state = prof_tctx_state_dumping; + malloc_mutex_unlock(tsdn, tctx->gctx->lock); + + memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t)); + + tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs; + tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes; + if (opt_prof_accum) { + tdata->cnt_summed.accumobjs += + tctx->dump_cnts.accumobjs; + tdata->cnt_summed.accumbytes += + tctx->dump_cnts.accumbytes; + } + break; + case prof_tctx_state_dumping: + case prof_tctx_state_purgatory: + not_reached(); + } +} + +static void +prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) { + malloc_mutex_assert_owner(tsdn, gctx->lock); + + gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs; + gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes; + if (opt_prof_accum) { + gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs; + gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes; + } +} + +static prof_tctx_t * +prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; + + malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_nominal: + /* New since dumping started; ignore. */ + break; + case prof_tctx_state_dumping: + case prof_tctx_state_purgatory: + prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx); + break; + default: + not_reached(); + } + + return NULL; +} + +struct prof_tctx_dump_iter_arg_s { + tsdn_t *tsdn; + bool propagate_err; +}; + +static prof_tctx_t * +prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) { + struct prof_tctx_dump_iter_arg_s *arg = + (struct prof_tctx_dump_iter_arg_s *)opaque; + + malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_initializing: + case prof_tctx_state_nominal: + /* Not captured by this dump. */ + break; + case prof_tctx_state_dumping: + case prof_tctx_state_purgatory: + if (prof_dump_printf(arg->propagate_err, + " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": " + "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs, + tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs, + tctx->dump_cnts.accumbytes)) { + return tctx; + } + break; + default: + not_reached(); + } + return NULL; +} + +static prof_tctx_t * +prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; + prof_tctx_t *ret; + + malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_nominal: + /* New since dumping started; ignore. */ + break; + case prof_tctx_state_dumping: + tctx->state = prof_tctx_state_nominal; + break; + case prof_tctx_state_purgatory: + ret = tctx; + goto label_return; + default: + not_reached(); + } + + ret = NULL; +label_return: + return ret; +} + +static void +prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) { + cassert(config_prof); + + malloc_mutex_lock(tsdn, gctx->lock); + + /* + * Increment nlimbo so that gctx won't go away before dump. + * Additionally, link gctx into the dump list so that it is included in + * prof_dump()'s second pass. + */ + gctx->nlimbo++; + gctx_tree_insert(gctxs, gctx); + + memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t)); + + malloc_mutex_unlock(tsdn, gctx->lock); +} + +struct prof_gctx_merge_iter_arg_s { + tsdn_t *tsdn; + size_t leak_ngctx; +}; + +static prof_gctx_t * +prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { + struct prof_gctx_merge_iter_arg_s *arg = + (struct prof_gctx_merge_iter_arg_s *)opaque; + + malloc_mutex_lock(arg->tsdn, gctx->lock); + tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, + (void *)arg->tsdn); + if (gctx->cnt_summed.curobjs != 0) { + arg->leak_ngctx++; + } + malloc_mutex_unlock(arg->tsdn, gctx->lock); + + return NULL; +} + +static void +prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) { + prof_tdata_t *tdata = prof_tdata_get(tsd, false); + prof_gctx_t *gctx; + + /* + * Standard tree iteration won't work here, because as soon as we + * decrement gctx->nlimbo and unlock gctx, another thread can + * concurrently destroy it, which will corrupt the tree. Therefore, + * tear down the tree one node at a time during iteration. + */ + while ((gctx = gctx_tree_first(gctxs)) != NULL) { + gctx_tree_remove(gctxs, gctx); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); + { + prof_tctx_t *next; + + next = NULL; + do { + prof_tctx_t *to_destroy = + tctx_tree_iter(&gctx->tctxs, next, + prof_tctx_finish_iter, + (void *)tsd_tsdn(tsd)); + if (to_destroy != NULL) { + next = tctx_tree_next(&gctx->tctxs, + to_destroy); + tctx_tree_remove(&gctx->tctxs, + to_destroy); + idalloctm(tsd_tsdn(tsd), to_destroy, + NULL, NULL, true, true); + } else { + next = NULL; + } + } while (next != NULL); + } + gctx->nlimbo--; + if (prof_gctx_should_destroy(gctx)) { + gctx->nlimbo++; + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + prof_gctx_try_destroy(tsd, tdata, gctx, tdata); + } else { + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + } + } +} + +struct prof_tdata_merge_iter_arg_s { + tsdn_t *tsdn; + prof_cnt_t cnt_all; +}; + +static prof_tdata_t * +prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *opaque) { + struct prof_tdata_merge_iter_arg_s *arg = + (struct prof_tdata_merge_iter_arg_s *)opaque; + + malloc_mutex_lock(arg->tsdn, tdata->lock); + if (!tdata->expired) { + size_t tabind; + union { + prof_tctx_t *p; + void *v; + } tctx; + + tdata->dumping = true; + memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t)); + for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL, + &tctx.v);) { + prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata); + } + + arg->cnt_all.curobjs += tdata->cnt_summed.curobjs; + arg->cnt_all.curbytes += tdata->cnt_summed.curbytes; + if (opt_prof_accum) { + arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs; + arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes; + } + } else { + tdata->dumping = false; + } + malloc_mutex_unlock(arg->tsdn, tdata->lock); + + return NULL; +} + +static prof_tdata_t * +prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *arg) { + bool propagate_err = *(bool *)arg; + + if (!tdata->dumping) { + return NULL; + } + + if (prof_dump_printf(propagate_err, + " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n", + tdata->thr_uid, tdata->cnt_summed.curobjs, + tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs, + tdata->cnt_summed.accumbytes, + (tdata->thread_name != NULL) ? " " : "", + (tdata->thread_name != NULL) ? tdata->thread_name : "")) { + return tdata; + } + return NULL; +} + +static bool +prof_dump_header_impl(tsdn_t *tsdn, bool propagate_err, + const prof_cnt_t *cnt_all) { + bool ret; + + if (prof_dump_printf(propagate_err, + "heap_v2/%"FMTu64"\n" + " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", + ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs, + cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) { + return true; + } + + malloc_mutex_lock(tsdn, &tdatas_mtx); + ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter, + (void *)&propagate_err) != NULL); + malloc_mutex_unlock(tsdn, &tdatas_mtx); + return ret; +} +prof_dump_header_t *JET_MUTABLE prof_dump_header = prof_dump_header_impl; + +static bool +prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx, + const prof_bt_t *bt, prof_gctx_tree_t *gctxs) { + bool ret; + unsigned i; + struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg; + + cassert(config_prof); + malloc_mutex_assert_owner(tsdn, gctx->lock); + + /* Avoid dumping such gctx's that have no useful data. */ + if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) || + (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) { + assert(gctx->cnt_summed.curobjs == 0); + assert(gctx->cnt_summed.curbytes == 0); + assert(gctx->cnt_summed.accumobjs == 0); + assert(gctx->cnt_summed.accumbytes == 0); + ret = false; + goto label_return; + } + + if (prof_dump_printf(propagate_err, "@")) { + ret = true; + goto label_return; + } + for (i = 0; i < bt->len; i++) { + if (prof_dump_printf(propagate_err, " %#"FMTxPTR, + (uintptr_t)bt->vec[i])) { + ret = true; + goto label_return; + } + } + + if (prof_dump_printf(propagate_err, + "\n" + " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", + gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes, + gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) { + ret = true; + goto label_return; + } + + prof_tctx_dump_iter_arg.tsdn = tsdn; + prof_tctx_dump_iter_arg.propagate_err = propagate_err; + if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, + (void *)&prof_tctx_dump_iter_arg) != NULL) { + ret = true; + goto label_return; + } + + ret = false; +label_return: + return ret; +} + +#ifndef _WIN32 +JEMALLOC_FORMAT_PRINTF(1, 2) +static int +prof_open_maps(const char *format, ...) { + int mfd; + va_list ap; + char filename[PATH_MAX + 1]; + + va_start(ap, format); + malloc_vsnprintf(filename, sizeof(filename), format, ap); + va_end(ap); + +#if defined(O_CLOEXEC) + mfd = open(filename, O_RDONLY | O_CLOEXEC); +#else + mfd = open(filename, O_RDONLY); + if (mfd != -1) { + fcntl(mfd, F_SETFD, fcntl(mfd, F_GETFD) | FD_CLOEXEC); + } +#endif + + return mfd; +} +#endif + +static bool +prof_dump_maps(bool propagate_err) { + bool ret; + int mfd; + + cassert(config_prof); +#ifdef __FreeBSD__ + mfd = prof_open_maps("/proc/curproc/map"); +#elif defined(_WIN32) + mfd = -1; // Not implemented +#else + { + int pid = prof_getpid(); + + mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid); + if (mfd == -1) { + mfd = prof_open_maps("/proc/%d/maps", pid); + } + } +#endif + if (mfd != -1) { + ssize_t nread; + + if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") && + propagate_err) { + ret = true; + goto label_return; + } + nread = 0; + do { + prof_dump_buf_end += nread; + if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { + /* Make space in prof_dump_buf before read(). */ + if (prof_dump_flush(propagate_err) && + propagate_err) { + ret = true; + goto label_return; + } + } + nread = malloc_read_fd(mfd, + &prof_dump_buf[prof_dump_buf_end], PROF_DUMP_BUFSIZE + - prof_dump_buf_end); + } while (nread > 0); + } else { + ret = true; + goto label_return; + } + + ret = false; +label_return: + if (mfd != -1) { + close(mfd); + } + return ret; +} + +/* + * See prof_sample_threshold_update() comment for why the body of this function + * is conditionally compiled. + */ +static void +prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx, + const char *filename) { +#ifdef JEMALLOC_PROF + /* + * Scaling is equivalent AdjustSamples() in jeprof, but the result may + * differ slightly from what jeprof reports, because here we scale the + * summary values, whereas jeprof scales each context individually and + * reports the sums of the scaled values. + */ + if (cnt_all->curbytes != 0) { + double sample_period = (double)((uint64_t)1 << lg_prof_sample); + double ratio = (((double)cnt_all->curbytes) / + (double)cnt_all->curobjs) / sample_period; + double scale_factor = 1.0 / (1.0 - exp(-ratio)); + uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes) + * scale_factor); + uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) * + scale_factor); + + malloc_printf(": Leak approximation summary: ~%"FMTu64 + " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n", + curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs != + 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : ""); + malloc_printf( + ": Run jeprof on \"%s\" for leak detail\n", + filename); + } +#endif +} + +struct prof_gctx_dump_iter_arg_s { + tsdn_t *tsdn; + bool propagate_err; +}; + +static prof_gctx_t * +prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { + prof_gctx_t *ret; + struct prof_gctx_dump_iter_arg_s *arg = + (struct prof_gctx_dump_iter_arg_s *)opaque; + + malloc_mutex_lock(arg->tsdn, gctx->lock); + + if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt, + gctxs)) { + ret = gctx; + goto label_return; + } + + ret = NULL; +label_return: + malloc_mutex_unlock(arg->tsdn, gctx->lock); + return ret; +} + +static void +prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata, + struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg, + struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, + prof_gctx_tree_t *gctxs) { + size_t tabind; + union { + prof_gctx_t *p; + void *v; + } gctx; + + prof_enter(tsd, tdata); + + /* + * Put gctx's in limbo and clear their counters in preparation for + * summing. + */ + gctx_tree_new(gctxs); + for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) { + prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs); + } + + /* + * Iterate over tdatas, and for the non-expired ones snapshot their tctx + * stats and merge them into the associated gctx's. + */ + prof_tdata_merge_iter_arg->tsdn = tsd_tsdn(tsd); + memset(&prof_tdata_merge_iter_arg->cnt_all, 0, sizeof(prof_cnt_t)); + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, + (void *)prof_tdata_merge_iter_arg); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); + + /* Merge tctx stats into gctx's. */ + prof_gctx_merge_iter_arg->tsdn = tsd_tsdn(tsd); + prof_gctx_merge_iter_arg->leak_ngctx = 0; + gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter, + (void *)prof_gctx_merge_iter_arg); + + prof_leave(tsd, tdata); +} + +static bool +prof_dump_file(tsd_t *tsd, bool propagate_err, const char *filename, + bool leakcheck, prof_tdata_t *tdata, + struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg, + struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, + struct prof_gctx_dump_iter_arg_s *prof_gctx_dump_iter_arg, + prof_gctx_tree_t *gctxs) { + /* Create dump file. */ + if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) { + return true; + } + + /* Dump profile header. */ + if (prof_dump_header(tsd_tsdn(tsd), propagate_err, + &prof_tdata_merge_iter_arg->cnt_all)) { + goto label_write_error; + } + + /* Dump per gctx profile stats. */ + prof_gctx_dump_iter_arg->tsdn = tsd_tsdn(tsd); + prof_gctx_dump_iter_arg->propagate_err = propagate_err; + if (gctx_tree_iter(gctxs, NULL, prof_gctx_dump_iter, + (void *)prof_gctx_dump_iter_arg) != NULL) { + goto label_write_error; + } + + /* Dump /proc//maps if possible. */ + if (prof_dump_maps(propagate_err)) { + goto label_write_error; + } + + if (prof_dump_close(propagate_err)) { + return true; + } + + return false; +label_write_error: + prof_dump_close(propagate_err); + return true; +} + +bool +prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, + bool leakcheck) { + cassert(config_prof); + assert(tsd_reentrancy_level_get(tsd) == 0); + + prof_tdata_t * tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) { + return true; + } + + pre_reentrancy(tsd, NULL); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); + + prof_gctx_tree_t gctxs; + struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; + struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; + struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg; + prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg, + &prof_gctx_merge_iter_arg, &gctxs); + bool err = prof_dump_file(tsd, propagate_err, filename, leakcheck, tdata, + &prof_tdata_merge_iter_arg, &prof_gctx_merge_iter_arg, + &prof_gctx_dump_iter_arg, &gctxs); + prof_gctx_finish(tsd, &gctxs); + + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); + post_reentrancy(tsd); + + if (err) { + return true; + } + + if (leakcheck) { + prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all, + prof_gctx_merge_iter_arg.leak_ngctx, filename); + } + return false; +} + +#ifdef JEMALLOC_JET +void +prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs, + uint64_t *accumbytes) { + tsd_t *tsd; + prof_tdata_t *tdata; + struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; + struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; + prof_gctx_tree_t gctxs; + + tsd = tsd_fetch(); + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) { + if (curobjs != NULL) { + *curobjs = 0; + } + if (curbytes != NULL) { + *curbytes = 0; + } + if (accumobjs != NULL) { + *accumobjs = 0; + } + if (accumbytes != NULL) { + *accumbytes = 0; + } + return; + } + + prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg, + &prof_gctx_merge_iter_arg, &gctxs); + prof_gctx_finish(tsd, &gctxs); + + if (curobjs != NULL) { + *curobjs = prof_tdata_merge_iter_arg.cnt_all.curobjs; + } + if (curbytes != NULL) { + *curbytes = prof_tdata_merge_iter_arg.cnt_all.curbytes; + } + if (accumobjs != NULL) { + *accumobjs = prof_tdata_merge_iter_arg.cnt_all.accumobjs; + } + if (accumbytes != NULL) { + *accumbytes = prof_tdata_merge_iter_arg.cnt_all.accumbytes; + } +} +#endif + +void +prof_bt_hash(const void *key, size_t r_hash[2]) { + prof_bt_t *bt = (prof_bt_t *)key; + + cassert(config_prof); + + hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash); +} + +bool +prof_bt_keycomp(const void *k1, const void *k2) { + const prof_bt_t *bt1 = (prof_bt_t *)k1; + const prof_bt_t *bt2 = (prof_bt_t *)k2; + + cassert(config_prof); + + if (bt1->len != bt2->len) { + return false; + } + return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); +} + +prof_tdata_t * +prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, + char *thread_name, bool active) { + prof_tdata_t *tdata; + + cassert(config_prof); + + /* Initialize an empty cache for this thread. */ + tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t), + sz_size2index(sizeof(prof_tdata_t)), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); + if (tdata == NULL) { + return NULL; + } + + tdata->lock = prof_tdata_mutex_choose(thr_uid); + tdata->thr_uid = thr_uid; + tdata->thr_discrim = thr_discrim; + tdata->thread_name = thread_name; + tdata->attached = true; + tdata->expired = false; + tdata->tctx_uid_next = 0; + + if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash, + prof_bt_keycomp)) { + idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); + return NULL; + } + + tdata->prng_state = (uint64_t)(uintptr_t)tdata; + prof_sample_threshold_update(tdata); + + tdata->enq = false; + tdata->enq_idump = false; + tdata->enq_gdump = false; + + tdata->dumping = false; + tdata->active = active; + + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + tdata_tree_insert(&tdatas, tdata); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); + + return tdata; +} + +static bool +prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) { + if (tdata->attached && !even_if_attached) { + return false; + } + if (ckh_count(&tdata->bt2tctx) != 0) { + return false; + } + return true; +} + +static bool +prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, + bool even_if_attached) { + malloc_mutex_assert_owner(tsdn, tdata->lock); + + return prof_tdata_should_destroy_unlocked(tdata, even_if_attached); +} + +static void +prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, + bool even_if_attached) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx); + + tdata_tree_remove(&tdatas, tdata); + + assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); + + if (tdata->thread_name != NULL) { + idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true, + true); + } + ckh_delete(tsd, &tdata->bt2tctx); + idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); +} + +static void +prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) { + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + prof_tdata_destroy_locked(tsd, tdata, even_if_attached); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); +} + +void +prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) { + bool destroy_tdata; + + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); + if (tdata->attached) { + destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, + true); + /* + * Only detach if !destroy_tdata, because detaching would allow + * another thread to win the race to destroy tdata. + */ + if (!destroy_tdata) { + tdata->attached = false; + } + tsd_prof_tdata_set(tsd, NULL); + } else { + destroy_tdata = false; + } + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + if (destroy_tdata) { + prof_tdata_destroy(tsd, tdata, true); + } +} + +static bool +prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) { + bool destroy_tdata; + + malloc_mutex_lock(tsdn, tdata->lock); + if (!tdata->expired) { + tdata->expired = true; + destroy_tdata = tdata->attached ? false : + prof_tdata_should_destroy(tsdn, tdata, false); + } else { + destroy_tdata = false; + } + malloc_mutex_unlock(tsdn, tdata->lock); + + return destroy_tdata; +} + +static prof_tdata_t * +prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; + + return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL); +} + +void +prof_reset(tsd_t *tsd, size_t lg_sample) { + prof_tdata_t *next; + + assert(lg_sample < (sizeof(uint64_t) << 3)); + + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + + lg_prof_sample = lg_sample; + + next = NULL; + do { + prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next, + prof_tdata_reset_iter, (void *)tsd); + if (to_destroy != NULL) { + next = tdata_tree_next(&tdatas, to_destroy); + prof_tdata_destroy_locked(tsd, to_destroy, false); + } else { + next = NULL; + } + } while (next != NULL); + + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); +} + +void +prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) { + prof_tdata_t *tdata = tctx->tdata; + prof_gctx_t *gctx = tctx->gctx; + bool destroy_tdata, destroy_tctx, destroy_gctx; + + malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); + + assert(tctx->cnts.curobjs == 0); + assert(tctx->cnts.curbytes == 0); + assert(!opt_prof_accum); + assert(tctx->cnts.accumobjs == 0); + assert(tctx->cnts.accumbytes == 0); + + ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL); + destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); + switch (tctx->state) { + case prof_tctx_state_nominal: + tctx_tree_remove(&gctx->tctxs, tctx); + destroy_tctx = true; + if (prof_gctx_should_destroy(gctx)) { + /* + * Increment gctx->nlimbo in order to keep another + * thread from winning the race to destroy gctx while + * this one has gctx->lock dropped. Without this, it + * would be possible for another thread to: + * + * 1) Sample an allocation associated with gctx. + * 2) Deallocate the sampled object. + * 3) Successfully prof_gctx_try_destroy(gctx). + * + * The result would be that gctx no longer exists by the + * time this thread accesses it in + * prof_gctx_try_destroy(). + */ + gctx->nlimbo++; + destroy_gctx = true; + } else { + destroy_gctx = false; + } + break; + case prof_tctx_state_dumping: + /* + * A dumping thread needs tctx to remain valid until dumping + * has finished. Change state such that the dumping thread will + * complete destruction during a late dump iteration phase. + */ + tctx->state = prof_tctx_state_purgatory; + destroy_tctx = false; + destroy_gctx = false; + break; + default: + not_reached(); + destroy_tctx = false; + destroy_gctx = false; + } + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + if (destroy_gctx) { + prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx, + tdata); + } + + malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock); + + if (destroy_tdata) { + prof_tdata_destroy(tsd, tdata, false); + } + + if (destroy_tctx) { + idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true); + } +} + +/******************************************************************************/ -- cgit v0.12 From 1a0503367be5950a8da648996ba7ae2620e39393 Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Mon, 29 Jul 2019 14:09:20 -0700 Subject: Revert "Refactor profiling" This reverts commit 0b462407ae84a62b3c097f0e9f18df487a47d9a7. --- Makefile.in | 1 - include/jemalloc/internal/prof_externs.h | 14 - msvc/projects/vc2015/jemalloc/jemalloc.vcxproj | 1 - .../vc2015/jemalloc/jemalloc.vcxproj.filters | 3 - msvc/projects/vc2017/jemalloc/jemalloc.vcxproj | 1 - .../vc2017/jemalloc/jemalloc.vcxproj.filters | 3 - src/prof.c | 1606 ++++++++++++++++++-- src/prof_data.c | 1440 ------------------ 8 files changed, 1509 insertions(+), 1560 deletions(-) delete mode 100644 src/prof_data.c diff --git a/Makefile.in b/Makefile.in index 40daf11..1cd973d 100644 --- a/Makefile.in +++ b/Makefile.in @@ -117,7 +117,6 @@ C_SRCS := $(srcroot)src/jemalloc.c \ $(srcroot)src/pages.c \ $(srcroot)src/prng.c \ $(srcroot)src/prof.c \ - $(srcroot)src/prof_data.c \ $(srcroot)src/prof_log.c \ $(srcroot)src/rtree.c \ $(srcroot)src/safety_check.c \ diff --git a/include/jemalloc/internal/prof_externs.h b/include/jemalloc/internal/prof_externs.h index 8fc45cf..e94ac3b 100644 --- a/include/jemalloc/internal/prof_externs.h +++ b/include/jemalloc/internal/prof_externs.h @@ -4,11 +4,6 @@ #include "jemalloc/internal/mutex.h" extern malloc_mutex_t bt2gctx_mtx; -extern malloc_mutex_t tdatas_mtx; -extern malloc_mutex_t prof_dump_mtx; - -malloc_mutex_t *prof_gctx_mutex_choose(void); -malloc_mutex_t *prof_tdata_mutex_choose(uint64_t thr_uid); extern bool opt_prof; extern bool opt_prof_active; @@ -115,13 +110,4 @@ bool prof_log_rep_check(void); void prof_log_dummy_set(bool new_value); #endif -/* Functions in prof_data.c only accessed in prof.c */ -bool prof_data_init(tsd_t *tsd); -bool prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, - bool leakcheck); -prof_tdata_t * prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, - uint64_t thr_discrim, char *thread_name, bool active); -void prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata); -void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx); - #endif /* JEMALLOC_INTERNAL_PROF_EXTERNS_H */ diff --git a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj index 387f14b..d93d909 100644 --- a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj +++ b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj @@ -58,7 +58,6 @@ - diff --git a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters index 030d826..7b09d4e 100644 --- a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters +++ b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters @@ -67,9 +67,6 @@ Source Files - - Source Files - Source Files diff --git a/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj b/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj index 1606a3a..28bd3cd 100644 --- a/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj +++ b/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj @@ -58,7 +58,6 @@ - diff --git a/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters b/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters index 622b93f..a66c209 100644 --- a/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters +++ b/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters @@ -67,9 +67,6 @@ Source Files - - Source Files - Source Files diff --git a/src/prof.c b/src/prof.c index 79a0ffc..9d1edb3 100644 --- a/src/prof.c +++ b/src/prof.c @@ -3,14 +3,11 @@ #include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/assert.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/mutex.h" -/* - * This file implements the profiling "APIs" needed by other parts of jemalloc, - * and also manages the relevant "operational" data, mainly options and mutexes; - * the core profiling data structures are encapsulated in prof_data.c. - */ - /******************************************************************************/ #ifdef JEMALLOC_PROF_LIBUNWIND @@ -91,10 +88,20 @@ static atomic_u_t cum_gctxs; /* Atomic counter. */ */ static malloc_mutex_t *tdata_locks; +/* + * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data + * structure that knows about all backtraces currently captured. + */ +static ckh_t bt2gctx; /* Non static to enable profiling. */ malloc_mutex_t bt2gctx_mtx; -malloc_mutex_t tdatas_mtx; +/* + * Tree of all extant prof_tdata_t structures, regardless of state, + * {attached,detached,expired}. + */ +static prof_tdata_tree_t tdatas; +static malloc_mutex_t tdatas_mtx; static uint64_t next_thr_uid; static malloc_mutex_t next_thr_uid_mtx; @@ -105,29 +112,101 @@ static uint64_t prof_dump_iseq; static uint64_t prof_dump_mseq; static uint64_t prof_dump_useq; -malloc_mutex_t prof_dump_mtx; +/* + * This buffer is rather large for stack allocation, so use a single buffer for + * all profile dumps. + */ +static malloc_mutex_t prof_dump_mtx; +static char prof_dump_buf[ + /* Minimize memory bloat for non-prof builds. */ +#ifdef JEMALLOC_PROF + PROF_DUMP_BUFSIZE +#else + 1 +#endif +]; +static size_t prof_dump_buf_end; +static int prof_dump_fd; /* Do not dump any profiles until bootstrapping is complete. */ bool prof_booted = false; /******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ -static bool -prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) { - malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); +static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx); +static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx); +static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, + bool even_if_attached); +static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, + bool even_if_attached); +static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name); - if (opt_prof_accum) { - return false; +/******************************************************************************/ +/* Red-black trees. */ + +static int +prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) { + uint64_t a_thr_uid = a->thr_uid; + uint64_t b_thr_uid = b->thr_uid; + int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid); + if (ret == 0) { + uint64_t a_thr_discrim = a->thr_discrim; + uint64_t b_thr_discrim = b->thr_discrim; + ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim < + b_thr_discrim); + if (ret == 0) { + uint64_t a_tctx_uid = a->tctx_uid; + uint64_t b_tctx_uid = b->tctx_uid; + ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid < + b_tctx_uid); + } } - if (tctx->cnts.curobjs != 0) { - return false; + return ret; +} + +rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t, + tctx_link, prof_tctx_comp) + +static int +prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) { + unsigned a_len = a->bt.len; + unsigned b_len = b->bt.len; + unsigned comp_len = (a_len < b_len) ? a_len : b_len; + int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *)); + if (ret == 0) { + ret = (a_len > b_len) - (a_len < b_len); } - if (tctx->prepared) { - return false; + return ret; +} + +rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link, + prof_gctx_comp) + +static int +prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) { + int ret; + uint64_t a_uid = a->thr_uid; + uint64_t b_uid = b->thr_uid; + + ret = ((a_uid > b_uid) - (a_uid < b_uid)); + if (ret == 0) { + uint64_t a_discrim = a->thr_discrim; + uint64_t b_discrim = b->thr_discrim; + + ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim)); } - return true; + return ret; } +rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link, + prof_tdata_comp) + +/******************************************************************************/ + void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) { prof_tdata_t *tdata; @@ -207,6 +286,45 @@ bt_init(prof_bt_t *bt, void **vec) { bt->len = 0; } +static void +prof_enter(tsd_t *tsd, prof_tdata_t *tdata) { + cassert(config_prof); + assert(tdata == prof_tdata_get(tsd, false)); + + if (tdata != NULL) { + assert(!tdata->enq); + tdata->enq = true; + } + + malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); +} + +static void +prof_leave(tsd_t *tsd, prof_tdata_t *tdata) { + cassert(config_prof); + assert(tdata == prof_tdata_get(tsd, false)); + + malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); + + if (tdata != NULL) { + bool idump, gdump; + + assert(tdata->enq); + tdata->enq = false; + idump = tdata->enq_idump; + tdata->enq_idump = false; + gdump = tdata->enq_gdump; + tdata->enq_gdump = false; + + if (idump) { + prof_idump(tsd_tsdn(tsd)); + } + if (gdump) { + prof_gdump(tsd_tsdn(tsd)); + } + } +} + #ifdef JEMALLOC_PROF_LIBUNWIND void prof_backtrace(prof_bt_t *bt) { @@ -429,18 +547,324 @@ prof_backtrace(prof_bt_t *bt) { } #endif -malloc_mutex_t * +static malloc_mutex_t * prof_gctx_mutex_choose(void) { unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED); return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]; } -malloc_mutex_t * +static malloc_mutex_t * prof_tdata_mutex_choose(uint64_t thr_uid) { return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS]; } +static prof_gctx_t * +prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) { + /* + * Create a single allocation that has space for vec of length bt->len. + */ + size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *)); + prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size, + sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true), + true); + if (gctx == NULL) { + return NULL; + } + gctx->lock = prof_gctx_mutex_choose(); + /* + * Set nlimbo to 1, in order to avoid a race condition with + * prof_tctx_destroy()/prof_gctx_try_destroy(). + */ + gctx->nlimbo = 1; + tctx_tree_new(&gctx->tctxs); + /* Duplicate bt. */ + memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *)); + gctx->bt.vec = gctx->vec; + gctx->bt.len = bt->len; + return gctx; +} + +static void +prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, + prof_tdata_t *tdata) { + cassert(config_prof); + + /* + * Check that gctx is still unused by any thread cache before destroying + * it. prof_lookup() increments gctx->nlimbo in order to avoid a race + * condition with this function, as does prof_tctx_destroy() in order to + * avoid a race between the main body of prof_tctx_destroy() and entry + * into this function. + */ + prof_enter(tsd, tdata_self); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); + assert(gctx->nlimbo != 0); + if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) { + /* Remove gctx from bt2gctx. */ + if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) { + not_reached(); + } + prof_leave(tsd, tdata_self); + /* Destroy gctx. */ + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true); + } else { + /* + * Compensate for increment in prof_tctx_destroy() or + * prof_lookup(). + */ + gctx->nlimbo--; + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + prof_leave(tsd, tdata_self); + } +} + +static bool +prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) { + malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); + + if (opt_prof_accum) { + return false; + } + if (tctx->cnts.curobjs != 0) { + return false; + } + if (tctx->prepared) { + return false; + } + return true; +} + +static bool +prof_gctx_should_destroy(prof_gctx_t *gctx) { + if (opt_prof_accum) { + return false; + } + if (!tctx_tree_empty(&gctx->tctxs)) { + return false; + } + if (gctx->nlimbo != 0) { + return false; + } + return true; +} + +static void +prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) { + prof_tdata_t *tdata = tctx->tdata; + prof_gctx_t *gctx = tctx->gctx; + bool destroy_tdata, destroy_tctx, destroy_gctx; + + malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); + + assert(tctx->cnts.curobjs == 0); + assert(tctx->cnts.curbytes == 0); + assert(!opt_prof_accum); + assert(tctx->cnts.accumobjs == 0); + assert(tctx->cnts.accumbytes == 0); + + ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL); + destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); + switch (tctx->state) { + case prof_tctx_state_nominal: + tctx_tree_remove(&gctx->tctxs, tctx); + destroy_tctx = true; + if (prof_gctx_should_destroy(gctx)) { + /* + * Increment gctx->nlimbo in order to keep another + * thread from winning the race to destroy gctx while + * this one has gctx->lock dropped. Without this, it + * would be possible for another thread to: + * + * 1) Sample an allocation associated with gctx. + * 2) Deallocate the sampled object. + * 3) Successfully prof_gctx_try_destroy(gctx). + * + * The result would be that gctx no longer exists by the + * time this thread accesses it in + * prof_gctx_try_destroy(). + */ + gctx->nlimbo++; + destroy_gctx = true; + } else { + destroy_gctx = false; + } + break; + case prof_tctx_state_dumping: + /* + * A dumping thread needs tctx to remain valid until dumping + * has finished. Change state such that the dumping thread will + * complete destruction during a late dump iteration phase. + */ + tctx->state = prof_tctx_state_purgatory; + destroy_tctx = false; + destroy_gctx = false; + break; + default: + not_reached(); + destroy_tctx = false; + destroy_gctx = false; + } + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + if (destroy_gctx) { + prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx, + tdata); + } + + malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock); + + if (destroy_tdata) { + prof_tdata_destroy(tsd, tdata, false); + } + + if (destroy_tctx) { + idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true); + } +} + +static bool +prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, + void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) { + union { + prof_gctx_t *p; + void *v; + } gctx, tgctx; + union { + prof_bt_t *p; + void *v; + } btkey; + bool new_gctx; + + prof_enter(tsd, tdata); + if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { + /* bt has never been seen before. Insert it. */ + prof_leave(tsd, tdata); + tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt); + if (tgctx.v == NULL) { + return true; + } + prof_enter(tsd, tdata); + if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { + gctx.p = tgctx.p; + btkey.p = &gctx.p->bt; + if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) { + /* OOM. */ + prof_leave(tsd, tdata); + idalloctm(tsd_tsdn(tsd), gctx.v, NULL, NULL, + true, true); + return true; + } + new_gctx = true; + } else { + new_gctx = false; + } + } else { + tgctx.v = NULL; + new_gctx = false; + } + + if (!new_gctx) { + /* + * Increment nlimbo, in order to avoid a race condition with + * prof_tctx_destroy()/prof_gctx_try_destroy(). + */ + malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock); + gctx.p->nlimbo++; + malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock); + new_gctx = false; + + if (tgctx.v != NULL) { + /* Lost race to insert. */ + idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true, + true); + } + } + prof_leave(tsd, tdata); + + *p_btkey = btkey.v; + *p_gctx = gctx.p; + *p_new_gctx = new_gctx; + return false; +} + +prof_tctx_t * +prof_lookup(tsd_t *tsd, prof_bt_t *bt) { + union { + prof_tctx_t *p; + void *v; + } ret; + prof_tdata_t *tdata; + bool not_found; + + cassert(config_prof); + + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) { + return NULL; + } + + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); + not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v); + if (!not_found) { /* Note double negative! */ + ret.p->prepared = true; + } + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + if (not_found) { + void *btkey; + prof_gctx_t *gctx; + bool new_gctx, error; + + /* + * This thread's cache lacks bt. Look for it in the global + * cache. + */ + if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx, + &new_gctx)) { + return NULL; + } + + /* Link a prof_tctx_t into gctx for this thread. */ + ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t), + sz_size2index(sizeof(prof_tctx_t)), false, NULL, true, + arena_ichoose(tsd, NULL), true); + if (ret.p == NULL) { + if (new_gctx) { + prof_gctx_try_destroy(tsd, tdata, gctx, tdata); + } + return NULL; + } + ret.p->tdata = tdata; + ret.p->thr_uid = tdata->thr_uid; + ret.p->thr_discrim = tdata->thr_discrim; + memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); + ret.p->gctx = gctx; + ret.p->tctx_uid = tdata->tctx_uid_next++; + ret.p->prepared = true; + ret.p->state = prof_tctx_state_initializing; + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); + error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + if (error) { + if (new_gctx) { + prof_gctx_try_destroy(tsd, tdata, gctx, tdata); + } + idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true); + return NULL; + } + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); + ret.p->state = prof_tctx_state_nominal; + tctx_tree_insert(&gctx->tctxs, ret.p); + gctx->nlimbo--; + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + } + + return ret.p; +} + /* * The bodies of this function and prof_leakcheck() are compiled out unless heap * profiling is enabled, so that it is possible to compile jemalloc with @@ -497,85 +921,884 @@ prof_sample_threshold_update(prof_tdata_t *tdata) { #endif } -int -prof_getpid(void) { -#ifdef _WIN32 - return GetCurrentProcessId(); -#else - return getpid(); -#endif +#ifdef JEMALLOC_JET +static prof_tdata_t * +prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *arg) { + size_t *tdata_count = (size_t *)arg; + + (*tdata_count)++; + + return NULL; } -#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) -#define VSEQ_INVALID UINT64_C(0xffffffffffffffff) -static void -prof_dump_filename(char *filename, char v, uint64_t vseq) { - cassert(config_prof); +size_t +prof_tdata_count(void) { + size_t tdata_count = 0; + tsdn_t *tsdn; - if (vseq != VSEQ_INVALID) { - /* "...v.heap" */ - malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, - "%s.%d.%"FMTu64".%c%"FMTu64".heap", - opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq); - } else { - /* "....heap" */ - malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, - "%s.%d.%"FMTu64".%c.heap", - opt_prof_prefix, prof_getpid(), prof_dump_seq, v); - } - prof_dump_seq++; + tsdn = tsdn_fetch(); + malloc_mutex_lock(tsdn, &tdatas_mtx); + tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter, + (void *)&tdata_count); + malloc_mutex_unlock(tsdn, &tdatas_mtx); + + return tdata_count; } -static void -prof_fdump(void) { +size_t +prof_bt_count(void) { + size_t bt_count; tsd_t *tsd; - char filename[DUMP_FILENAME_BUFSIZE]; + prof_tdata_t *tdata; - cassert(config_prof); - assert(opt_prof_final); - assert(opt_prof_prefix[0] != '\0'); + tsd = tsd_fetch(); + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) { + return 0; + } - if (!prof_booted) { - return; + malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); + bt_count = ckh_count(&bt2gctx); + malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); + + return bt_count; +} +#endif + +static int +prof_dump_open_impl(bool propagate_err, const char *filename) { + int fd; + + fd = creat(filename, 0644); + if (fd == -1 && !propagate_err) { + malloc_printf(": creat(\"%s\"), 0644) failed\n", + filename); + if (opt_abort) { + abort(); + } } - tsd = tsd_fetch(); - assert(tsd_reentrancy_level_get(tsd) == 0); - malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); - prof_dump_filename(filename, 'f', VSEQ_INVALID); - malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); - prof_dump(tsd, false, filename, opt_prof_leak); + return fd; } +prof_dump_open_t *JET_MUTABLE prof_dump_open = prof_dump_open_impl; + +static bool +prof_dump_flush(bool propagate_err) { + bool ret = false; + ssize_t err; -bool -prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) { cassert(config_prof); -#ifndef JEMALLOC_ATOMIC_U64 - if (malloc_mutex_init(&prof_accum->mtx, "prof_accum", - WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) { - return true; + err = malloc_write_fd(prof_dump_fd, prof_dump_buf, prof_dump_buf_end); + if (err == -1) { + if (!propagate_err) { + malloc_write(": write() failed during heap " + "profile flush\n"); + if (opt_abort) { + abort(); + } + } + ret = true; } - prof_accum->accumbytes = 0; -#else - atomic_store_u64(&prof_accum->accumbytes, 0, ATOMIC_RELAXED); -#endif - return false; + prof_dump_buf_end = 0; + + return ret; } -void -prof_idump(tsdn_t *tsdn) { - tsd_t *tsd; - prof_tdata_t *tdata; +static bool +prof_dump_close(bool propagate_err) { + bool ret; + + assert(prof_dump_fd != -1); + ret = prof_dump_flush(propagate_err); + close(prof_dump_fd); + prof_dump_fd = -1; + + return ret; +} + +static bool +prof_dump_write(bool propagate_err, const char *s) { + size_t i, slen, n; cassert(config_prof); - if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) { - return; - } - tsd = tsdn_tsd(tsdn); - if (tsd_reentrancy_level_get(tsd) > 0) { - return; + i = 0; + slen = strlen(s); + while (i < slen) { + /* Flush the buffer if it is full. */ + if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { + if (prof_dump_flush(propagate_err) && propagate_err) { + return true; + } + } + + if (prof_dump_buf_end + slen - i <= PROF_DUMP_BUFSIZE) { + /* Finish writing. */ + n = slen - i; + } else { + /* Write as much of s as will fit. */ + n = PROF_DUMP_BUFSIZE - prof_dump_buf_end; + } + memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n); + prof_dump_buf_end += n; + i += n; + } + + return false; +} + +JEMALLOC_FORMAT_PRINTF(2, 3) +static bool +prof_dump_printf(bool propagate_err, const char *format, ...) { + bool ret; + va_list ap; + char buf[PROF_PRINTF_BUFSIZE]; + + va_start(ap, format); + malloc_vsnprintf(buf, sizeof(buf), format, ap); + va_end(ap); + ret = prof_dump_write(propagate_err, buf); + + return ret; +} + +static void +prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) { + malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); + + malloc_mutex_lock(tsdn, tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_initializing: + malloc_mutex_unlock(tsdn, tctx->gctx->lock); + return; + case prof_tctx_state_nominal: + tctx->state = prof_tctx_state_dumping; + malloc_mutex_unlock(tsdn, tctx->gctx->lock); + + memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t)); + + tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs; + tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes; + if (opt_prof_accum) { + tdata->cnt_summed.accumobjs += + tctx->dump_cnts.accumobjs; + tdata->cnt_summed.accumbytes += + tctx->dump_cnts.accumbytes; + } + break; + case prof_tctx_state_dumping: + case prof_tctx_state_purgatory: + not_reached(); + } +} + +static void +prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) { + malloc_mutex_assert_owner(tsdn, gctx->lock); + + gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs; + gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes; + if (opt_prof_accum) { + gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs; + gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes; + } +} + +static prof_tctx_t * +prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; + + malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_nominal: + /* New since dumping started; ignore. */ + break; + case prof_tctx_state_dumping: + case prof_tctx_state_purgatory: + prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx); + break; + default: + not_reached(); + } + + return NULL; +} + +struct prof_tctx_dump_iter_arg_s { + tsdn_t *tsdn; + bool propagate_err; +}; + +static prof_tctx_t * +prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) { + struct prof_tctx_dump_iter_arg_s *arg = + (struct prof_tctx_dump_iter_arg_s *)opaque; + + malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_initializing: + case prof_tctx_state_nominal: + /* Not captured by this dump. */ + break; + case prof_tctx_state_dumping: + case prof_tctx_state_purgatory: + if (prof_dump_printf(arg->propagate_err, + " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": " + "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs, + tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs, + tctx->dump_cnts.accumbytes)) { + return tctx; + } + break; + default: + not_reached(); + } + return NULL; +} + +static prof_tctx_t * +prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; + prof_tctx_t *ret; + + malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_nominal: + /* New since dumping started; ignore. */ + break; + case prof_tctx_state_dumping: + tctx->state = prof_tctx_state_nominal; + break; + case prof_tctx_state_purgatory: + ret = tctx; + goto label_return; + default: + not_reached(); + } + + ret = NULL; +label_return: + return ret; +} + +static void +prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) { + cassert(config_prof); + + malloc_mutex_lock(tsdn, gctx->lock); + + /* + * Increment nlimbo so that gctx won't go away before dump. + * Additionally, link gctx into the dump list so that it is included in + * prof_dump()'s second pass. + */ + gctx->nlimbo++; + gctx_tree_insert(gctxs, gctx); + + memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t)); + + malloc_mutex_unlock(tsdn, gctx->lock); +} + +struct prof_gctx_merge_iter_arg_s { + tsdn_t *tsdn; + size_t leak_ngctx; +}; + +static prof_gctx_t * +prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { + struct prof_gctx_merge_iter_arg_s *arg = + (struct prof_gctx_merge_iter_arg_s *)opaque; + + malloc_mutex_lock(arg->tsdn, gctx->lock); + tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, + (void *)arg->tsdn); + if (gctx->cnt_summed.curobjs != 0) { + arg->leak_ngctx++; + } + malloc_mutex_unlock(arg->tsdn, gctx->lock); + + return NULL; +} + +static void +prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) { + prof_tdata_t *tdata = prof_tdata_get(tsd, false); + prof_gctx_t *gctx; + + /* + * Standard tree iteration won't work here, because as soon as we + * decrement gctx->nlimbo and unlock gctx, another thread can + * concurrently destroy it, which will corrupt the tree. Therefore, + * tear down the tree one node at a time during iteration. + */ + while ((gctx = gctx_tree_first(gctxs)) != NULL) { + gctx_tree_remove(gctxs, gctx); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); + { + prof_tctx_t *next; + + next = NULL; + do { + prof_tctx_t *to_destroy = + tctx_tree_iter(&gctx->tctxs, next, + prof_tctx_finish_iter, + (void *)tsd_tsdn(tsd)); + if (to_destroy != NULL) { + next = tctx_tree_next(&gctx->tctxs, + to_destroy); + tctx_tree_remove(&gctx->tctxs, + to_destroy); + idalloctm(tsd_tsdn(tsd), to_destroy, + NULL, NULL, true, true); + } else { + next = NULL; + } + } while (next != NULL); + } + gctx->nlimbo--; + if (prof_gctx_should_destroy(gctx)) { + gctx->nlimbo++; + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + prof_gctx_try_destroy(tsd, tdata, gctx, tdata); + } else { + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + } + } +} + +struct prof_tdata_merge_iter_arg_s { + tsdn_t *tsdn; + prof_cnt_t cnt_all; +}; + +static prof_tdata_t * +prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *opaque) { + struct prof_tdata_merge_iter_arg_s *arg = + (struct prof_tdata_merge_iter_arg_s *)opaque; + + malloc_mutex_lock(arg->tsdn, tdata->lock); + if (!tdata->expired) { + size_t tabind; + union { + prof_tctx_t *p; + void *v; + } tctx; + + tdata->dumping = true; + memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t)); + for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL, + &tctx.v);) { + prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata); + } + + arg->cnt_all.curobjs += tdata->cnt_summed.curobjs; + arg->cnt_all.curbytes += tdata->cnt_summed.curbytes; + if (opt_prof_accum) { + arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs; + arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes; + } + } else { + tdata->dumping = false; + } + malloc_mutex_unlock(arg->tsdn, tdata->lock); + + return NULL; +} + +static prof_tdata_t * +prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *arg) { + bool propagate_err = *(bool *)arg; + + if (!tdata->dumping) { + return NULL; + } + + if (prof_dump_printf(propagate_err, + " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n", + tdata->thr_uid, tdata->cnt_summed.curobjs, + tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs, + tdata->cnt_summed.accumbytes, + (tdata->thread_name != NULL) ? " " : "", + (tdata->thread_name != NULL) ? tdata->thread_name : "")) { + return tdata; + } + return NULL; +} + +static bool +prof_dump_header_impl(tsdn_t *tsdn, bool propagate_err, + const prof_cnt_t *cnt_all) { + bool ret; + + if (prof_dump_printf(propagate_err, + "heap_v2/%"FMTu64"\n" + " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", + ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs, + cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) { + return true; + } + + malloc_mutex_lock(tsdn, &tdatas_mtx); + ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter, + (void *)&propagate_err) != NULL); + malloc_mutex_unlock(tsdn, &tdatas_mtx); + return ret; +} +prof_dump_header_t *JET_MUTABLE prof_dump_header = prof_dump_header_impl; + +static bool +prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx, + const prof_bt_t *bt, prof_gctx_tree_t *gctxs) { + bool ret; + unsigned i; + struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg; + + cassert(config_prof); + malloc_mutex_assert_owner(tsdn, gctx->lock); + + /* Avoid dumping such gctx's that have no useful data. */ + if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) || + (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) { + assert(gctx->cnt_summed.curobjs == 0); + assert(gctx->cnt_summed.curbytes == 0); + assert(gctx->cnt_summed.accumobjs == 0); + assert(gctx->cnt_summed.accumbytes == 0); + ret = false; + goto label_return; + } + + if (prof_dump_printf(propagate_err, "@")) { + ret = true; + goto label_return; + } + for (i = 0; i < bt->len; i++) { + if (prof_dump_printf(propagate_err, " %#"FMTxPTR, + (uintptr_t)bt->vec[i])) { + ret = true; + goto label_return; + } + } + + if (prof_dump_printf(propagate_err, + "\n" + " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", + gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes, + gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) { + ret = true; + goto label_return; + } + + prof_tctx_dump_iter_arg.tsdn = tsdn; + prof_tctx_dump_iter_arg.propagate_err = propagate_err; + if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, + (void *)&prof_tctx_dump_iter_arg) != NULL) { + ret = true; + goto label_return; + } + + ret = false; +label_return: + return ret; +} + +#ifndef _WIN32 +JEMALLOC_FORMAT_PRINTF(1, 2) +static int +prof_open_maps(const char *format, ...) { + int mfd; + va_list ap; + char filename[PATH_MAX + 1]; + + va_start(ap, format); + malloc_vsnprintf(filename, sizeof(filename), format, ap); + va_end(ap); + +#if defined(O_CLOEXEC) + mfd = open(filename, O_RDONLY | O_CLOEXEC); +#else + mfd = open(filename, O_RDONLY); + if (mfd != -1) { + fcntl(mfd, F_SETFD, fcntl(mfd, F_GETFD) | FD_CLOEXEC); + } +#endif + + return mfd; +} +#endif + +int +prof_getpid(void) { +#ifdef _WIN32 + return GetCurrentProcessId(); +#else + return getpid(); +#endif +} + +static bool +prof_dump_maps(bool propagate_err) { + bool ret; + int mfd; + + cassert(config_prof); +#ifdef __FreeBSD__ + mfd = prof_open_maps("/proc/curproc/map"); +#elif defined(_WIN32) + mfd = -1; // Not implemented +#else + { + int pid = prof_getpid(); + + mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid); + if (mfd == -1) { + mfd = prof_open_maps("/proc/%d/maps", pid); + } + } +#endif + if (mfd != -1) { + ssize_t nread; + + if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") && + propagate_err) { + ret = true; + goto label_return; + } + nread = 0; + do { + prof_dump_buf_end += nread; + if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { + /* Make space in prof_dump_buf before read(). */ + if (prof_dump_flush(propagate_err) && + propagate_err) { + ret = true; + goto label_return; + } + } + nread = malloc_read_fd(mfd, + &prof_dump_buf[prof_dump_buf_end], PROF_DUMP_BUFSIZE + - prof_dump_buf_end); + } while (nread > 0); + } else { + ret = true; + goto label_return; + } + + ret = false; +label_return: + if (mfd != -1) { + close(mfd); + } + return ret; +} + +/* + * See prof_sample_threshold_update() comment for why the body of this function + * is conditionally compiled. + */ +static void +prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx, + const char *filename) { +#ifdef JEMALLOC_PROF + /* + * Scaling is equivalent AdjustSamples() in jeprof, but the result may + * differ slightly from what jeprof reports, because here we scale the + * summary values, whereas jeprof scales each context individually and + * reports the sums of the scaled values. + */ + if (cnt_all->curbytes != 0) { + double sample_period = (double)((uint64_t)1 << lg_prof_sample); + double ratio = (((double)cnt_all->curbytes) / + (double)cnt_all->curobjs) / sample_period; + double scale_factor = 1.0 / (1.0 - exp(-ratio)); + uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes) + * scale_factor); + uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) * + scale_factor); + + malloc_printf(": Leak approximation summary: ~%"FMTu64 + " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n", + curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs != + 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : ""); + malloc_printf( + ": Run jeprof on \"%s\" for leak detail\n", + filename); + } +#endif +} + +struct prof_gctx_dump_iter_arg_s { + tsdn_t *tsdn; + bool propagate_err; +}; + +static prof_gctx_t * +prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { + prof_gctx_t *ret; + struct prof_gctx_dump_iter_arg_s *arg = + (struct prof_gctx_dump_iter_arg_s *)opaque; + + malloc_mutex_lock(arg->tsdn, gctx->lock); + + if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt, + gctxs)) { + ret = gctx; + goto label_return; + } + + ret = NULL; +label_return: + malloc_mutex_unlock(arg->tsdn, gctx->lock); + return ret; +} + +static void +prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata, + struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg, + struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, + prof_gctx_tree_t *gctxs) { + size_t tabind; + union { + prof_gctx_t *p; + void *v; + } gctx; + + prof_enter(tsd, tdata); + + /* + * Put gctx's in limbo and clear their counters in preparation for + * summing. + */ + gctx_tree_new(gctxs); + for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) { + prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs); + } + + /* + * Iterate over tdatas, and for the non-expired ones snapshot their tctx + * stats and merge them into the associated gctx's. + */ + prof_tdata_merge_iter_arg->tsdn = tsd_tsdn(tsd); + memset(&prof_tdata_merge_iter_arg->cnt_all, 0, sizeof(prof_cnt_t)); + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, + (void *)prof_tdata_merge_iter_arg); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); + + /* Merge tctx stats into gctx's. */ + prof_gctx_merge_iter_arg->tsdn = tsd_tsdn(tsd); + prof_gctx_merge_iter_arg->leak_ngctx = 0; + gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter, + (void *)prof_gctx_merge_iter_arg); + + prof_leave(tsd, tdata); +} + +static bool +prof_dump_file(tsd_t *tsd, bool propagate_err, const char *filename, + bool leakcheck, prof_tdata_t *tdata, + struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg, + struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, + struct prof_gctx_dump_iter_arg_s *prof_gctx_dump_iter_arg, + prof_gctx_tree_t *gctxs) { + /* Create dump file. */ + if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) { + return true; + } + + /* Dump profile header. */ + if (prof_dump_header(tsd_tsdn(tsd), propagate_err, + &prof_tdata_merge_iter_arg->cnt_all)) { + goto label_write_error; + } + + /* Dump per gctx profile stats. */ + prof_gctx_dump_iter_arg->tsdn = tsd_tsdn(tsd); + prof_gctx_dump_iter_arg->propagate_err = propagate_err; + if (gctx_tree_iter(gctxs, NULL, prof_gctx_dump_iter, + (void *)prof_gctx_dump_iter_arg) != NULL) { + goto label_write_error; + } + + /* Dump /proc//maps if possible. */ + if (prof_dump_maps(propagate_err)) { + goto label_write_error; + } + + if (prof_dump_close(propagate_err)) { + return true; + } + + return false; +label_write_error: + prof_dump_close(propagate_err); + return true; +} + +static bool +prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, + bool leakcheck) { + cassert(config_prof); + assert(tsd_reentrancy_level_get(tsd) == 0); + + prof_tdata_t * tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) { + return true; + } + + pre_reentrancy(tsd, NULL); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); + + prof_gctx_tree_t gctxs; + struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; + struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; + struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg; + prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg, + &prof_gctx_merge_iter_arg, &gctxs); + bool err = prof_dump_file(tsd, propagate_err, filename, leakcheck, tdata, + &prof_tdata_merge_iter_arg, &prof_gctx_merge_iter_arg, + &prof_gctx_dump_iter_arg, &gctxs); + prof_gctx_finish(tsd, &gctxs); + + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); + post_reentrancy(tsd); + + if (err) { + return true; + } + + if (leakcheck) { + prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all, + prof_gctx_merge_iter_arg.leak_ngctx, filename); + } + return false; +} + +#ifdef JEMALLOC_JET +void +prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs, + uint64_t *accumbytes) { + tsd_t *tsd; + prof_tdata_t *tdata; + struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; + struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; + prof_gctx_tree_t gctxs; + + tsd = tsd_fetch(); + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) { + if (curobjs != NULL) { + *curobjs = 0; + } + if (curbytes != NULL) { + *curbytes = 0; + } + if (accumobjs != NULL) { + *accumobjs = 0; + } + if (accumbytes != NULL) { + *accumbytes = 0; + } + return; + } + + prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg, + &prof_gctx_merge_iter_arg, &gctxs); + prof_gctx_finish(tsd, &gctxs); + + if (curobjs != NULL) { + *curobjs = prof_tdata_merge_iter_arg.cnt_all.curobjs; + } + if (curbytes != NULL) { + *curbytes = prof_tdata_merge_iter_arg.cnt_all.curbytes; + } + if (accumobjs != NULL) { + *accumobjs = prof_tdata_merge_iter_arg.cnt_all.accumobjs; + } + if (accumbytes != NULL) { + *accumbytes = prof_tdata_merge_iter_arg.cnt_all.accumbytes; + } +} +#endif + +#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) +#define VSEQ_INVALID UINT64_C(0xffffffffffffffff) +static void +prof_dump_filename(char *filename, char v, uint64_t vseq) { + cassert(config_prof); + + if (vseq != VSEQ_INVALID) { + /* "...v.heap" */ + malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, + "%s.%d.%"FMTu64".%c%"FMTu64".heap", + opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq); + } else { + /* "....heap" */ + malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, + "%s.%d.%"FMTu64".%c.heap", + opt_prof_prefix, prof_getpid(), prof_dump_seq, v); + } + prof_dump_seq++; +} + +static void +prof_fdump(void) { + tsd_t *tsd; + char filename[DUMP_FILENAME_BUFSIZE]; + + cassert(config_prof); + assert(opt_prof_final); + assert(opt_prof_prefix[0] != '\0'); + + if (!prof_booted) { + return; + } + tsd = tsd_fetch(); + assert(tsd_reentrancy_level_get(tsd) == 0); + + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); + prof_dump_filename(filename, 'f', VSEQ_INVALID); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); + prof_dump(tsd, false, filename, opt_prof_leak); +} + +bool +prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) { + cassert(config_prof); + +#ifndef JEMALLOC_ATOMIC_U64 + if (malloc_mutex_init(&prof_accum->mtx, "prof_accum", + WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) { + return true; + } + prof_accum->accumbytes = 0; +#else + atomic_store_u64(&prof_accum->accumbytes, 0, ATOMIC_RELAXED); +#endif + return false; +} + +void +prof_idump(tsdn_t *tsdn) { + tsd_t *tsd; + prof_tdata_t *tdata; + + cassert(config_prof); + + if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) { + return; + } + tsd = tsdn_tsd(tsdn); + if (tsd_reentrancy_level_get(tsd) > 0) { + return; } tdata = prof_tdata_get(tsd, false); @@ -654,6 +1877,28 @@ prof_gdump(tsdn_t *tsdn) { } } +void +prof_bt_hash(const void *key, size_t r_hash[2]) { + prof_bt_t *bt = (prof_bt_t *)key; + + cassert(config_prof); + + hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash); +} + +bool +prof_bt_keycomp(const void *k1, const void *k2) { + const prof_bt_t *bt1 = (prof_bt_t *)k1; + const prof_bt_t *bt2 = (prof_bt_t *)k2; + + cassert(config_prof); + + if (bt1->len != bt2->len) { + return false; + } + return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); +} + static uint64_t prof_thr_uid_alloc(tsdn_t *tsdn) { uint64_t thr_uid; @@ -666,33 +1911,124 @@ prof_thr_uid_alloc(tsdn_t *tsdn) { return thr_uid; } +static prof_tdata_t * +prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, + char *thread_name, bool active) { + prof_tdata_t *tdata; + + cassert(config_prof); + + /* Initialize an empty cache for this thread. */ + tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t), + sz_size2index(sizeof(prof_tdata_t)), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); + if (tdata == NULL) { + return NULL; + } + + tdata->lock = prof_tdata_mutex_choose(thr_uid); + tdata->thr_uid = thr_uid; + tdata->thr_discrim = thr_discrim; + tdata->thread_name = thread_name; + tdata->attached = true; + tdata->expired = false; + tdata->tctx_uid_next = 0; + + if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash, + prof_bt_keycomp)) { + idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); + return NULL; + } + + tdata->prng_state = (uint64_t)(uintptr_t)tdata; + prof_sample_threshold_update(tdata); + + tdata->enq = false; + tdata->enq_idump = false; + tdata->enq_gdump = false; + + tdata->dumping = false; + tdata->active = active; + + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + tdata_tree_insert(&tdatas, tdata); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); + + return tdata; +} + prof_tdata_t * prof_tdata_init(tsd_t *tsd) { return prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0, NULL, prof_thread_active_init_get(tsd_tsdn(tsd))); } -static char * -prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) { - char *ret; - size_t size; - - if (thread_name == NULL) { - return NULL; +static bool +prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) { + if (tdata->attached && !even_if_attached) { + return false; + } + if (ckh_count(&tdata->bt2tctx) != 0) { + return false; } + return true; +} - size = strlen(thread_name) + 1; - if (size == 1) { - return ""; +static bool +prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, + bool even_if_attached) { + malloc_mutex_assert_owner(tsdn, tdata->lock); + + return prof_tdata_should_destroy_unlocked(tdata, even_if_attached); +} + +static void +prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, + bool even_if_attached) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx); + + tdata_tree_remove(&tdatas, tdata); + + assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); + + if (tdata->thread_name != NULL) { + idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true, + true); } + ckh_delete(tsd, &tdata->bt2tctx); + idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); +} - ret = iallocztm(tsdn, size, sz_size2index(size), false, NULL, true, - arena_get(TSDN_NULL, 0, true), true); - if (ret == NULL) { - return NULL; +static void +prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) { + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + prof_tdata_destroy_locked(tsd, tdata, even_if_attached); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); +} + +static void +prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) { + bool destroy_tdata; + + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); + if (tdata->attached) { + destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, + true); + /* + * Only detach if !destroy_tdata, because detaching would allow + * another thread to win the race to destroy tdata. + */ + if (!destroy_tdata) { + tdata->attached = false; + } + tsd_prof_tdata_set(tsd, NULL); + } else { + destroy_tdata = false; + } + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + if (destroy_tdata) { + prof_tdata_destroy(tsd, tdata, true); } - memcpy(ret, thread_name, size); - return ret; } prof_tdata_t * @@ -708,6 +2044,58 @@ prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) { active); } +static bool +prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) { + bool destroy_tdata; + + malloc_mutex_lock(tsdn, tdata->lock); + if (!tdata->expired) { + tdata->expired = true; + destroy_tdata = tdata->attached ? false : + prof_tdata_should_destroy(tsdn, tdata, false); + } else { + destroy_tdata = false; + } + malloc_mutex_unlock(tsdn, tdata->lock); + + return destroy_tdata; +} + +static prof_tdata_t * +prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; + + return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL); +} + +void +prof_reset(tsd_t *tsd, size_t lg_sample) { + prof_tdata_t *next; + + assert(lg_sample < (sizeof(uint64_t) << 3)); + + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + + lg_prof_sample = lg_sample; + + next = NULL; + do { + prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next, + prof_tdata_reset_iter, (void *)tsd); + if (to_destroy != NULL) { + next = tdata_tree_next(&tdatas, to_destroy); + prof_tdata_destroy_locked(tsd, to_destroy, false); + } else { + next = NULL; + } + } while (next != NULL); + + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); +} + void prof_tdata_cleanup(tsd_t *tsd) { prof_tdata_t *tdata; @@ -754,6 +2142,29 @@ prof_thread_name_get(tsd_t *tsd) { return (tdata->thread_name != NULL ? tdata->thread_name : ""); } +static char * +prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) { + char *ret; + size_t size; + + if (thread_name == NULL) { + return NULL; + } + + size = strlen(thread_name) + 1; + if (size == 1) { + return ""; + } + + ret = iallocztm(tsdn, size, sz_size2index(size), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); + if (ret == NULL) { + return NULL; + } + memcpy(ret, thread_name, size); + return ret; +} + int prof_thread_name_set(tsd_t *tsd, const char *thread_name) { prof_tdata_t *tdata; @@ -918,15 +2329,16 @@ prof_boot2(tsd_t *tsd) { return true; } - if (prof_data_init(tsd)) { + if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash, + prof_bt_keycomp)) { return true; } - if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx", WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) { return true; } + tdata_tree_new(&tdatas); if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas", WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) { return true; diff --git a/src/prof_data.c b/src/prof_data.c deleted file mode 100644 index a4cb749..0000000 --- a/src/prof_data.c +++ /dev/null @@ -1,1440 +0,0 @@ -#define JEMALLOC_PROF_C_ -#include "jemalloc/internal/jemalloc_preamble.h" -#include "jemalloc/internal/jemalloc_internal_includes.h" - -#include "jemalloc/internal/assert.h" -#include "jemalloc/internal/ckh.h" -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/malloc_io.h" - -/* - * This file defines and manages the core profiling data structures. - * - * Conceptually, profiling data can be imagined as a table with three columns: - * thread, stack trace, and current allocation size. (When prof_accum is on, - * there's one additional column which is the cumulative allocation size.) - * - * Implementation wise, each thread maintains a hash recording the stack trace - * to allocation size correspondences, which are basically the individual rows - * in the table. In addition, two global "indices" are built to make data - * aggregation efficient (for dumping): bt2gctx and tdatas, which are basically - * the "grouped by stack trace" and "grouped by thread" views of the same table, - * respectively. Note that the allocation size is only aggregated to the two - * indices at dumping time, so as to optimize for performance. - */ - -/******************************************************************************/ - -/* - * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data - * structure that knows about all backtraces currently captured. - */ -static ckh_t bt2gctx; - -/* - * Tree of all extant prof_tdata_t structures, regardless of state, - * {attached,detached,expired}. - */ -static prof_tdata_tree_t tdatas; - -/* - * This buffer is rather large for stack allocation, so use a single buffer for - * all profile dumps. - */ -static char prof_dump_buf[ - /* Minimize memory bloat for non-prof builds. */ -#ifdef JEMALLOC_PROF - PROF_DUMP_BUFSIZE -#else - 1 -#endif -]; -static size_t prof_dump_buf_end; -static int prof_dump_fd; - -/******************************************************************************/ -/* Red-black trees. */ - -static int -prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) { - uint64_t a_thr_uid = a->thr_uid; - uint64_t b_thr_uid = b->thr_uid; - int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid); - if (ret == 0) { - uint64_t a_thr_discrim = a->thr_discrim; - uint64_t b_thr_discrim = b->thr_discrim; - ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim < - b_thr_discrim); - if (ret == 0) { - uint64_t a_tctx_uid = a->tctx_uid; - uint64_t b_tctx_uid = b->tctx_uid; - ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid < - b_tctx_uid); - } - } - return ret; -} - -rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t, - tctx_link, prof_tctx_comp) - -static int -prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) { - unsigned a_len = a->bt.len; - unsigned b_len = b->bt.len; - unsigned comp_len = (a_len < b_len) ? a_len : b_len; - int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *)); - if (ret == 0) { - ret = (a_len > b_len) - (a_len < b_len); - } - return ret; -} - -rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link, - prof_gctx_comp) - -static int -prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) { - int ret; - uint64_t a_uid = a->thr_uid; - uint64_t b_uid = b->thr_uid; - - ret = ((a_uid > b_uid) - (a_uid < b_uid)); - if (ret == 0) { - uint64_t a_discrim = a->thr_discrim; - uint64_t b_discrim = b->thr_discrim; - - ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim)); - } - return ret; -} - -rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link, - prof_tdata_comp) - -/******************************************************************************/ - -bool -prof_data_init(tsd_t *tsd) { - tdata_tree_new(&tdatas); - return ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, - prof_bt_hash, prof_bt_keycomp); -} - -static void -prof_enter(tsd_t *tsd, prof_tdata_t *tdata) { - cassert(config_prof); - assert(tdata == prof_tdata_get(tsd, false)); - - if (tdata != NULL) { - assert(!tdata->enq); - tdata->enq = true; - } - - malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); -} - -static void -prof_leave(tsd_t *tsd, prof_tdata_t *tdata) { - cassert(config_prof); - assert(tdata == prof_tdata_get(tsd, false)); - - malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); - - if (tdata != NULL) { - bool idump, gdump; - - assert(tdata->enq); - tdata->enq = false; - idump = tdata->enq_idump; - tdata->enq_idump = false; - gdump = tdata->enq_gdump; - tdata->enq_gdump = false; - - if (idump) { - prof_idump(tsd_tsdn(tsd)); - } - if (gdump) { - prof_gdump(tsd_tsdn(tsd)); - } - } -} - -static prof_gctx_t * -prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) { - /* - * Create a single allocation that has space for vec of length bt->len. - */ - size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *)); - prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size, - sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true), - true); - if (gctx == NULL) { - return NULL; - } - gctx->lock = prof_gctx_mutex_choose(); - /* - * Set nlimbo to 1, in order to avoid a race condition with - * prof_tctx_destroy()/prof_gctx_try_destroy(). - */ - gctx->nlimbo = 1; - tctx_tree_new(&gctx->tctxs); - /* Duplicate bt. */ - memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *)); - gctx->bt.vec = gctx->vec; - gctx->bt.len = bt->len; - return gctx; -} - -static void -prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, - prof_tdata_t *tdata) { - cassert(config_prof); - - /* - * Check that gctx is still unused by any thread cache before destroying - * it. prof_lookup() increments gctx->nlimbo in order to avoid a race - * condition with this function, as does prof_tctx_destroy() in order to - * avoid a race between the main body of prof_tctx_destroy() and entry - * into this function. - */ - prof_enter(tsd, tdata_self); - malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); - assert(gctx->nlimbo != 0); - if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) { - /* Remove gctx from bt2gctx. */ - if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) { - not_reached(); - } - prof_leave(tsd, tdata_self); - /* Destroy gctx. */ - malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); - idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true); - } else { - /* - * Compensate for increment in prof_tctx_destroy() or - * prof_lookup(). - */ - gctx->nlimbo--; - malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); - prof_leave(tsd, tdata_self); - } -} - -static bool -prof_gctx_should_destroy(prof_gctx_t *gctx) { - if (opt_prof_accum) { - return false; - } - if (!tctx_tree_empty(&gctx->tctxs)) { - return false; - } - if (gctx->nlimbo != 0) { - return false; - } - return true; -} - -static bool -prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, - void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) { - union { - prof_gctx_t *p; - void *v; - } gctx, tgctx; - union { - prof_bt_t *p; - void *v; - } btkey; - bool new_gctx; - - prof_enter(tsd, tdata); - if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { - /* bt has never been seen before. Insert it. */ - prof_leave(tsd, tdata); - tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt); - if (tgctx.v == NULL) { - return true; - } - prof_enter(tsd, tdata); - if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { - gctx.p = tgctx.p; - btkey.p = &gctx.p->bt; - if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) { - /* OOM. */ - prof_leave(tsd, tdata); - idalloctm(tsd_tsdn(tsd), gctx.v, NULL, NULL, - true, true); - return true; - } - new_gctx = true; - } else { - new_gctx = false; - } - } else { - tgctx.v = NULL; - new_gctx = false; - } - - if (!new_gctx) { - /* - * Increment nlimbo, in order to avoid a race condition with - * prof_tctx_destroy()/prof_gctx_try_destroy(). - */ - malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock); - gctx.p->nlimbo++; - malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock); - new_gctx = false; - - if (tgctx.v != NULL) { - /* Lost race to insert. */ - idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true, - true); - } - } - prof_leave(tsd, tdata); - - *p_btkey = btkey.v; - *p_gctx = gctx.p; - *p_new_gctx = new_gctx; - return false; -} - -prof_tctx_t * -prof_lookup(tsd_t *tsd, prof_bt_t *bt) { - union { - prof_tctx_t *p; - void *v; - } ret; - prof_tdata_t *tdata; - bool not_found; - - cassert(config_prof); - - tdata = prof_tdata_get(tsd, false); - if (tdata == NULL) { - return NULL; - } - - malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); - not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v); - if (!not_found) { /* Note double negative! */ - ret.p->prepared = true; - } - malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); - if (not_found) { - void *btkey; - prof_gctx_t *gctx; - bool new_gctx, error; - - /* - * This thread's cache lacks bt. Look for it in the global - * cache. - */ - if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx, - &new_gctx)) { - return NULL; - } - - /* Link a prof_tctx_t into gctx for this thread. */ - ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t), - sz_size2index(sizeof(prof_tctx_t)), false, NULL, true, - arena_ichoose(tsd, NULL), true); - if (ret.p == NULL) { - if (new_gctx) { - prof_gctx_try_destroy(tsd, tdata, gctx, tdata); - } - return NULL; - } - ret.p->tdata = tdata; - ret.p->thr_uid = tdata->thr_uid; - ret.p->thr_discrim = tdata->thr_discrim; - memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); - ret.p->gctx = gctx; - ret.p->tctx_uid = tdata->tctx_uid_next++; - ret.p->prepared = true; - ret.p->state = prof_tctx_state_initializing; - malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); - error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v); - malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); - if (error) { - if (new_gctx) { - prof_gctx_try_destroy(tsd, tdata, gctx, tdata); - } - idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true); - return NULL; - } - malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); - ret.p->state = prof_tctx_state_nominal; - tctx_tree_insert(&gctx->tctxs, ret.p); - gctx->nlimbo--; - malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); - } - - return ret.p; -} - -#ifdef JEMALLOC_JET -static prof_tdata_t * -prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, - void *arg) { - size_t *tdata_count = (size_t *)arg; - - (*tdata_count)++; - - return NULL; -} - -size_t -prof_tdata_count(void) { - size_t tdata_count = 0; - tsdn_t *tsdn; - - tsdn = tsdn_fetch(); - malloc_mutex_lock(tsdn, &tdatas_mtx); - tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter, - (void *)&tdata_count); - malloc_mutex_unlock(tsdn, &tdatas_mtx); - - return tdata_count; -} - -size_t -prof_bt_count(void) { - size_t bt_count; - tsd_t *tsd; - prof_tdata_t *tdata; - - tsd = tsd_fetch(); - tdata = prof_tdata_get(tsd, false); - if (tdata == NULL) { - return 0; - } - - malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); - bt_count = ckh_count(&bt2gctx); - malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); - - return bt_count; -} -#endif - -static int -prof_dump_open_impl(bool propagate_err, const char *filename) { - int fd; - - fd = creat(filename, 0644); - if (fd == -1 && !propagate_err) { - malloc_printf(": creat(\"%s\"), 0644) failed\n", - filename); - if (opt_abort) { - abort(); - } - } - - return fd; -} -prof_dump_open_t *JET_MUTABLE prof_dump_open = prof_dump_open_impl; - -static bool -prof_dump_flush(bool propagate_err) { - bool ret = false; - ssize_t err; - - cassert(config_prof); - - err = malloc_write_fd(prof_dump_fd, prof_dump_buf, prof_dump_buf_end); - if (err == -1) { - if (!propagate_err) { - malloc_write(": write() failed during heap " - "profile flush\n"); - if (opt_abort) { - abort(); - } - } - ret = true; - } - prof_dump_buf_end = 0; - - return ret; -} - -static bool -prof_dump_close(bool propagate_err) { - bool ret; - - assert(prof_dump_fd != -1); - ret = prof_dump_flush(propagate_err); - close(prof_dump_fd); - prof_dump_fd = -1; - - return ret; -} - -static bool -prof_dump_write(bool propagate_err, const char *s) { - size_t i, slen, n; - - cassert(config_prof); - - i = 0; - slen = strlen(s); - while (i < slen) { - /* Flush the buffer if it is full. */ - if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { - if (prof_dump_flush(propagate_err) && propagate_err) { - return true; - } - } - - if (prof_dump_buf_end + slen - i <= PROF_DUMP_BUFSIZE) { - /* Finish writing. */ - n = slen - i; - } else { - /* Write as much of s as will fit. */ - n = PROF_DUMP_BUFSIZE - prof_dump_buf_end; - } - memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n); - prof_dump_buf_end += n; - i += n; - } - - return false; -} - -JEMALLOC_FORMAT_PRINTF(2, 3) -static bool -prof_dump_printf(bool propagate_err, const char *format, ...) { - bool ret; - va_list ap; - char buf[PROF_PRINTF_BUFSIZE]; - - va_start(ap, format); - malloc_vsnprintf(buf, sizeof(buf), format, ap); - va_end(ap); - ret = prof_dump_write(propagate_err, buf); - - return ret; -} - -static void -prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) { - malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); - - malloc_mutex_lock(tsdn, tctx->gctx->lock); - - switch (tctx->state) { - case prof_tctx_state_initializing: - malloc_mutex_unlock(tsdn, tctx->gctx->lock); - return; - case prof_tctx_state_nominal: - tctx->state = prof_tctx_state_dumping; - malloc_mutex_unlock(tsdn, tctx->gctx->lock); - - memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t)); - - tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs; - tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes; - if (opt_prof_accum) { - tdata->cnt_summed.accumobjs += - tctx->dump_cnts.accumobjs; - tdata->cnt_summed.accumbytes += - tctx->dump_cnts.accumbytes; - } - break; - case prof_tctx_state_dumping: - case prof_tctx_state_purgatory: - not_reached(); - } -} - -static void -prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) { - malloc_mutex_assert_owner(tsdn, gctx->lock); - - gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs; - gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes; - if (opt_prof_accum) { - gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs; - gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes; - } -} - -static prof_tctx_t * -prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { - tsdn_t *tsdn = (tsdn_t *)arg; - - malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); - - switch (tctx->state) { - case prof_tctx_state_nominal: - /* New since dumping started; ignore. */ - break; - case prof_tctx_state_dumping: - case prof_tctx_state_purgatory: - prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx); - break; - default: - not_reached(); - } - - return NULL; -} - -struct prof_tctx_dump_iter_arg_s { - tsdn_t *tsdn; - bool propagate_err; -}; - -static prof_tctx_t * -prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) { - struct prof_tctx_dump_iter_arg_s *arg = - (struct prof_tctx_dump_iter_arg_s *)opaque; - - malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock); - - switch (tctx->state) { - case prof_tctx_state_initializing: - case prof_tctx_state_nominal: - /* Not captured by this dump. */ - break; - case prof_tctx_state_dumping: - case prof_tctx_state_purgatory: - if (prof_dump_printf(arg->propagate_err, - " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": " - "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs, - tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs, - tctx->dump_cnts.accumbytes)) { - return tctx; - } - break; - default: - not_reached(); - } - return NULL; -} - -static prof_tctx_t * -prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { - tsdn_t *tsdn = (tsdn_t *)arg; - prof_tctx_t *ret; - - malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); - - switch (tctx->state) { - case prof_tctx_state_nominal: - /* New since dumping started; ignore. */ - break; - case prof_tctx_state_dumping: - tctx->state = prof_tctx_state_nominal; - break; - case prof_tctx_state_purgatory: - ret = tctx; - goto label_return; - default: - not_reached(); - } - - ret = NULL; -label_return: - return ret; -} - -static void -prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) { - cassert(config_prof); - - malloc_mutex_lock(tsdn, gctx->lock); - - /* - * Increment nlimbo so that gctx won't go away before dump. - * Additionally, link gctx into the dump list so that it is included in - * prof_dump()'s second pass. - */ - gctx->nlimbo++; - gctx_tree_insert(gctxs, gctx); - - memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t)); - - malloc_mutex_unlock(tsdn, gctx->lock); -} - -struct prof_gctx_merge_iter_arg_s { - tsdn_t *tsdn; - size_t leak_ngctx; -}; - -static prof_gctx_t * -prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { - struct prof_gctx_merge_iter_arg_s *arg = - (struct prof_gctx_merge_iter_arg_s *)opaque; - - malloc_mutex_lock(arg->tsdn, gctx->lock); - tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, - (void *)arg->tsdn); - if (gctx->cnt_summed.curobjs != 0) { - arg->leak_ngctx++; - } - malloc_mutex_unlock(arg->tsdn, gctx->lock); - - return NULL; -} - -static void -prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) { - prof_tdata_t *tdata = prof_tdata_get(tsd, false); - prof_gctx_t *gctx; - - /* - * Standard tree iteration won't work here, because as soon as we - * decrement gctx->nlimbo and unlock gctx, another thread can - * concurrently destroy it, which will corrupt the tree. Therefore, - * tear down the tree one node at a time during iteration. - */ - while ((gctx = gctx_tree_first(gctxs)) != NULL) { - gctx_tree_remove(gctxs, gctx); - malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); - { - prof_tctx_t *next; - - next = NULL; - do { - prof_tctx_t *to_destroy = - tctx_tree_iter(&gctx->tctxs, next, - prof_tctx_finish_iter, - (void *)tsd_tsdn(tsd)); - if (to_destroy != NULL) { - next = tctx_tree_next(&gctx->tctxs, - to_destroy); - tctx_tree_remove(&gctx->tctxs, - to_destroy); - idalloctm(tsd_tsdn(tsd), to_destroy, - NULL, NULL, true, true); - } else { - next = NULL; - } - } while (next != NULL); - } - gctx->nlimbo--; - if (prof_gctx_should_destroy(gctx)) { - gctx->nlimbo++; - malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); - prof_gctx_try_destroy(tsd, tdata, gctx, tdata); - } else { - malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); - } - } -} - -struct prof_tdata_merge_iter_arg_s { - tsdn_t *tsdn; - prof_cnt_t cnt_all; -}; - -static prof_tdata_t * -prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, - void *opaque) { - struct prof_tdata_merge_iter_arg_s *arg = - (struct prof_tdata_merge_iter_arg_s *)opaque; - - malloc_mutex_lock(arg->tsdn, tdata->lock); - if (!tdata->expired) { - size_t tabind; - union { - prof_tctx_t *p; - void *v; - } tctx; - - tdata->dumping = true; - memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t)); - for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL, - &tctx.v);) { - prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata); - } - - arg->cnt_all.curobjs += tdata->cnt_summed.curobjs; - arg->cnt_all.curbytes += tdata->cnt_summed.curbytes; - if (opt_prof_accum) { - arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs; - arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes; - } - } else { - tdata->dumping = false; - } - malloc_mutex_unlock(arg->tsdn, tdata->lock); - - return NULL; -} - -static prof_tdata_t * -prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, - void *arg) { - bool propagate_err = *(bool *)arg; - - if (!tdata->dumping) { - return NULL; - } - - if (prof_dump_printf(propagate_err, - " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n", - tdata->thr_uid, tdata->cnt_summed.curobjs, - tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs, - tdata->cnt_summed.accumbytes, - (tdata->thread_name != NULL) ? " " : "", - (tdata->thread_name != NULL) ? tdata->thread_name : "")) { - return tdata; - } - return NULL; -} - -static bool -prof_dump_header_impl(tsdn_t *tsdn, bool propagate_err, - const prof_cnt_t *cnt_all) { - bool ret; - - if (prof_dump_printf(propagate_err, - "heap_v2/%"FMTu64"\n" - " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", - ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs, - cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) { - return true; - } - - malloc_mutex_lock(tsdn, &tdatas_mtx); - ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter, - (void *)&propagate_err) != NULL); - malloc_mutex_unlock(tsdn, &tdatas_mtx); - return ret; -} -prof_dump_header_t *JET_MUTABLE prof_dump_header = prof_dump_header_impl; - -static bool -prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx, - const prof_bt_t *bt, prof_gctx_tree_t *gctxs) { - bool ret; - unsigned i; - struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg; - - cassert(config_prof); - malloc_mutex_assert_owner(tsdn, gctx->lock); - - /* Avoid dumping such gctx's that have no useful data. */ - if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) || - (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) { - assert(gctx->cnt_summed.curobjs == 0); - assert(gctx->cnt_summed.curbytes == 0); - assert(gctx->cnt_summed.accumobjs == 0); - assert(gctx->cnt_summed.accumbytes == 0); - ret = false; - goto label_return; - } - - if (prof_dump_printf(propagate_err, "@")) { - ret = true; - goto label_return; - } - for (i = 0; i < bt->len; i++) { - if (prof_dump_printf(propagate_err, " %#"FMTxPTR, - (uintptr_t)bt->vec[i])) { - ret = true; - goto label_return; - } - } - - if (prof_dump_printf(propagate_err, - "\n" - " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", - gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes, - gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) { - ret = true; - goto label_return; - } - - prof_tctx_dump_iter_arg.tsdn = tsdn; - prof_tctx_dump_iter_arg.propagate_err = propagate_err; - if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, - (void *)&prof_tctx_dump_iter_arg) != NULL) { - ret = true; - goto label_return; - } - - ret = false; -label_return: - return ret; -} - -#ifndef _WIN32 -JEMALLOC_FORMAT_PRINTF(1, 2) -static int -prof_open_maps(const char *format, ...) { - int mfd; - va_list ap; - char filename[PATH_MAX + 1]; - - va_start(ap, format); - malloc_vsnprintf(filename, sizeof(filename), format, ap); - va_end(ap); - -#if defined(O_CLOEXEC) - mfd = open(filename, O_RDONLY | O_CLOEXEC); -#else - mfd = open(filename, O_RDONLY); - if (mfd != -1) { - fcntl(mfd, F_SETFD, fcntl(mfd, F_GETFD) | FD_CLOEXEC); - } -#endif - - return mfd; -} -#endif - -static bool -prof_dump_maps(bool propagate_err) { - bool ret; - int mfd; - - cassert(config_prof); -#ifdef __FreeBSD__ - mfd = prof_open_maps("/proc/curproc/map"); -#elif defined(_WIN32) - mfd = -1; // Not implemented -#else - { - int pid = prof_getpid(); - - mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid); - if (mfd == -1) { - mfd = prof_open_maps("/proc/%d/maps", pid); - } - } -#endif - if (mfd != -1) { - ssize_t nread; - - if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") && - propagate_err) { - ret = true; - goto label_return; - } - nread = 0; - do { - prof_dump_buf_end += nread; - if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { - /* Make space in prof_dump_buf before read(). */ - if (prof_dump_flush(propagate_err) && - propagate_err) { - ret = true; - goto label_return; - } - } - nread = malloc_read_fd(mfd, - &prof_dump_buf[prof_dump_buf_end], PROF_DUMP_BUFSIZE - - prof_dump_buf_end); - } while (nread > 0); - } else { - ret = true; - goto label_return; - } - - ret = false; -label_return: - if (mfd != -1) { - close(mfd); - } - return ret; -} - -/* - * See prof_sample_threshold_update() comment for why the body of this function - * is conditionally compiled. - */ -static void -prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx, - const char *filename) { -#ifdef JEMALLOC_PROF - /* - * Scaling is equivalent AdjustSamples() in jeprof, but the result may - * differ slightly from what jeprof reports, because here we scale the - * summary values, whereas jeprof scales each context individually and - * reports the sums of the scaled values. - */ - if (cnt_all->curbytes != 0) { - double sample_period = (double)((uint64_t)1 << lg_prof_sample); - double ratio = (((double)cnt_all->curbytes) / - (double)cnt_all->curobjs) / sample_period; - double scale_factor = 1.0 / (1.0 - exp(-ratio)); - uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes) - * scale_factor); - uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) * - scale_factor); - - malloc_printf(": Leak approximation summary: ~%"FMTu64 - " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n", - curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs != - 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : ""); - malloc_printf( - ": Run jeprof on \"%s\" for leak detail\n", - filename); - } -#endif -} - -struct prof_gctx_dump_iter_arg_s { - tsdn_t *tsdn; - bool propagate_err; -}; - -static prof_gctx_t * -prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { - prof_gctx_t *ret; - struct prof_gctx_dump_iter_arg_s *arg = - (struct prof_gctx_dump_iter_arg_s *)opaque; - - malloc_mutex_lock(arg->tsdn, gctx->lock); - - if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt, - gctxs)) { - ret = gctx; - goto label_return; - } - - ret = NULL; -label_return: - malloc_mutex_unlock(arg->tsdn, gctx->lock); - return ret; -} - -static void -prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata, - struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg, - struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, - prof_gctx_tree_t *gctxs) { - size_t tabind; - union { - prof_gctx_t *p; - void *v; - } gctx; - - prof_enter(tsd, tdata); - - /* - * Put gctx's in limbo and clear their counters in preparation for - * summing. - */ - gctx_tree_new(gctxs); - for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) { - prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs); - } - - /* - * Iterate over tdatas, and for the non-expired ones snapshot their tctx - * stats and merge them into the associated gctx's. - */ - prof_tdata_merge_iter_arg->tsdn = tsd_tsdn(tsd); - memset(&prof_tdata_merge_iter_arg->cnt_all, 0, sizeof(prof_cnt_t)); - malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); - tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, - (void *)prof_tdata_merge_iter_arg); - malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); - - /* Merge tctx stats into gctx's. */ - prof_gctx_merge_iter_arg->tsdn = tsd_tsdn(tsd); - prof_gctx_merge_iter_arg->leak_ngctx = 0; - gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter, - (void *)prof_gctx_merge_iter_arg); - - prof_leave(tsd, tdata); -} - -static bool -prof_dump_file(tsd_t *tsd, bool propagate_err, const char *filename, - bool leakcheck, prof_tdata_t *tdata, - struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg, - struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, - struct prof_gctx_dump_iter_arg_s *prof_gctx_dump_iter_arg, - prof_gctx_tree_t *gctxs) { - /* Create dump file. */ - if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) { - return true; - } - - /* Dump profile header. */ - if (prof_dump_header(tsd_tsdn(tsd), propagate_err, - &prof_tdata_merge_iter_arg->cnt_all)) { - goto label_write_error; - } - - /* Dump per gctx profile stats. */ - prof_gctx_dump_iter_arg->tsdn = tsd_tsdn(tsd); - prof_gctx_dump_iter_arg->propagate_err = propagate_err; - if (gctx_tree_iter(gctxs, NULL, prof_gctx_dump_iter, - (void *)prof_gctx_dump_iter_arg) != NULL) { - goto label_write_error; - } - - /* Dump /proc//maps if possible. */ - if (prof_dump_maps(propagate_err)) { - goto label_write_error; - } - - if (prof_dump_close(propagate_err)) { - return true; - } - - return false; -label_write_error: - prof_dump_close(propagate_err); - return true; -} - -bool -prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, - bool leakcheck) { - cassert(config_prof); - assert(tsd_reentrancy_level_get(tsd) == 0); - - prof_tdata_t * tdata = prof_tdata_get(tsd, true); - if (tdata == NULL) { - return true; - } - - pre_reentrancy(tsd, NULL); - malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); - - prof_gctx_tree_t gctxs; - struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; - struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; - struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg; - prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg, - &prof_gctx_merge_iter_arg, &gctxs); - bool err = prof_dump_file(tsd, propagate_err, filename, leakcheck, tdata, - &prof_tdata_merge_iter_arg, &prof_gctx_merge_iter_arg, - &prof_gctx_dump_iter_arg, &gctxs); - prof_gctx_finish(tsd, &gctxs); - - malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); - post_reentrancy(tsd); - - if (err) { - return true; - } - - if (leakcheck) { - prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all, - prof_gctx_merge_iter_arg.leak_ngctx, filename); - } - return false; -} - -#ifdef JEMALLOC_JET -void -prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs, - uint64_t *accumbytes) { - tsd_t *tsd; - prof_tdata_t *tdata; - struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; - struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; - prof_gctx_tree_t gctxs; - - tsd = tsd_fetch(); - tdata = prof_tdata_get(tsd, false); - if (tdata == NULL) { - if (curobjs != NULL) { - *curobjs = 0; - } - if (curbytes != NULL) { - *curbytes = 0; - } - if (accumobjs != NULL) { - *accumobjs = 0; - } - if (accumbytes != NULL) { - *accumbytes = 0; - } - return; - } - - prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg, - &prof_gctx_merge_iter_arg, &gctxs); - prof_gctx_finish(tsd, &gctxs); - - if (curobjs != NULL) { - *curobjs = prof_tdata_merge_iter_arg.cnt_all.curobjs; - } - if (curbytes != NULL) { - *curbytes = prof_tdata_merge_iter_arg.cnt_all.curbytes; - } - if (accumobjs != NULL) { - *accumobjs = prof_tdata_merge_iter_arg.cnt_all.accumobjs; - } - if (accumbytes != NULL) { - *accumbytes = prof_tdata_merge_iter_arg.cnt_all.accumbytes; - } -} -#endif - -void -prof_bt_hash(const void *key, size_t r_hash[2]) { - prof_bt_t *bt = (prof_bt_t *)key; - - cassert(config_prof); - - hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash); -} - -bool -prof_bt_keycomp(const void *k1, const void *k2) { - const prof_bt_t *bt1 = (prof_bt_t *)k1; - const prof_bt_t *bt2 = (prof_bt_t *)k2; - - cassert(config_prof); - - if (bt1->len != bt2->len) { - return false; - } - return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); -} - -prof_tdata_t * -prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, - char *thread_name, bool active) { - prof_tdata_t *tdata; - - cassert(config_prof); - - /* Initialize an empty cache for this thread. */ - tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t), - sz_size2index(sizeof(prof_tdata_t)), false, NULL, true, - arena_get(TSDN_NULL, 0, true), true); - if (tdata == NULL) { - return NULL; - } - - tdata->lock = prof_tdata_mutex_choose(thr_uid); - tdata->thr_uid = thr_uid; - tdata->thr_discrim = thr_discrim; - tdata->thread_name = thread_name; - tdata->attached = true; - tdata->expired = false; - tdata->tctx_uid_next = 0; - - if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash, - prof_bt_keycomp)) { - idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); - return NULL; - } - - tdata->prng_state = (uint64_t)(uintptr_t)tdata; - prof_sample_threshold_update(tdata); - - tdata->enq = false; - tdata->enq_idump = false; - tdata->enq_gdump = false; - - tdata->dumping = false; - tdata->active = active; - - malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); - tdata_tree_insert(&tdatas, tdata); - malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); - - return tdata; -} - -static bool -prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) { - if (tdata->attached && !even_if_attached) { - return false; - } - if (ckh_count(&tdata->bt2tctx) != 0) { - return false; - } - return true; -} - -static bool -prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, - bool even_if_attached) { - malloc_mutex_assert_owner(tsdn, tdata->lock); - - return prof_tdata_should_destroy_unlocked(tdata, even_if_attached); -} - -static void -prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, - bool even_if_attached) { - malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx); - - tdata_tree_remove(&tdatas, tdata); - - assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); - - if (tdata->thread_name != NULL) { - idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true, - true); - } - ckh_delete(tsd, &tdata->bt2tctx); - idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); -} - -static void -prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) { - malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); - prof_tdata_destroy_locked(tsd, tdata, even_if_attached); - malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); -} - -void -prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) { - bool destroy_tdata; - - malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); - if (tdata->attached) { - destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, - true); - /* - * Only detach if !destroy_tdata, because detaching would allow - * another thread to win the race to destroy tdata. - */ - if (!destroy_tdata) { - tdata->attached = false; - } - tsd_prof_tdata_set(tsd, NULL); - } else { - destroy_tdata = false; - } - malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); - if (destroy_tdata) { - prof_tdata_destroy(tsd, tdata, true); - } -} - -static bool -prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) { - bool destroy_tdata; - - malloc_mutex_lock(tsdn, tdata->lock); - if (!tdata->expired) { - tdata->expired = true; - destroy_tdata = tdata->attached ? false : - prof_tdata_should_destroy(tsdn, tdata, false); - } else { - destroy_tdata = false; - } - malloc_mutex_unlock(tsdn, tdata->lock); - - return destroy_tdata; -} - -static prof_tdata_t * -prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, - void *arg) { - tsdn_t *tsdn = (tsdn_t *)arg; - - return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL); -} - -void -prof_reset(tsd_t *tsd, size_t lg_sample) { - prof_tdata_t *next; - - assert(lg_sample < (sizeof(uint64_t) << 3)); - - malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); - malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); - - lg_prof_sample = lg_sample; - - next = NULL; - do { - prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next, - prof_tdata_reset_iter, (void *)tsd); - if (to_destroy != NULL) { - next = tdata_tree_next(&tdatas, to_destroy); - prof_tdata_destroy_locked(tsd, to_destroy, false); - } else { - next = NULL; - } - } while (next != NULL); - - malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); - malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); -} - -void -prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) { - prof_tdata_t *tdata = tctx->tdata; - prof_gctx_t *gctx = tctx->gctx; - bool destroy_tdata, destroy_tctx, destroy_gctx; - - malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); - - assert(tctx->cnts.curobjs == 0); - assert(tctx->cnts.curbytes == 0); - assert(!opt_prof_accum); - assert(tctx->cnts.accumobjs == 0); - assert(tctx->cnts.accumbytes == 0); - - ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL); - destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false); - malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); - - malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); - switch (tctx->state) { - case prof_tctx_state_nominal: - tctx_tree_remove(&gctx->tctxs, tctx); - destroy_tctx = true; - if (prof_gctx_should_destroy(gctx)) { - /* - * Increment gctx->nlimbo in order to keep another - * thread from winning the race to destroy gctx while - * this one has gctx->lock dropped. Without this, it - * would be possible for another thread to: - * - * 1) Sample an allocation associated with gctx. - * 2) Deallocate the sampled object. - * 3) Successfully prof_gctx_try_destroy(gctx). - * - * The result would be that gctx no longer exists by the - * time this thread accesses it in - * prof_gctx_try_destroy(). - */ - gctx->nlimbo++; - destroy_gctx = true; - } else { - destroy_gctx = false; - } - break; - case prof_tctx_state_dumping: - /* - * A dumping thread needs tctx to remain valid until dumping - * has finished. Change state such that the dumping thread will - * complete destruction during a late dump iteration phase. - */ - tctx->state = prof_tctx_state_purgatory; - destroy_tctx = false; - destroy_gctx = false; - break; - default: - not_reached(); - destroy_tctx = false; - destroy_gctx = false; - } - malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); - if (destroy_gctx) { - prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx, - tdata); - } - - malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock); - - if (destroy_tdata) { - prof_tdata_destroy(tsd, tdata, false); - } - - if (destroy_tctx) { - idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true); - } -} - -/******************************************************************************/ -- cgit v0.12 From 5742473cc87558b4655064ebacfd837119673928 Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Mon, 29 Jul 2019 14:09:20 -0700 Subject: Revert "Refactor prof log" This reverts commit 7618b0b8e458d9c0db6e4b05ccbe6c6308952890. --- Makefile.in | 1 - include/jemalloc/internal/prof_externs.h | 8 - msvc/projects/vc2015/jemalloc/jemalloc.vcxproj | 1 - .../vc2015/jemalloc/jemalloc.vcxproj.filters | 3 - msvc/projects/vc2017/jemalloc/jemalloc.vcxproj | 1 - .../vc2017/jemalloc/jemalloc.vcxproj.filters | 3 - src/prof.c | 682 +++++++++++++++++++- src/prof_log.c | 698 --------------------- 8 files changed, 677 insertions(+), 720 deletions(-) delete mode 100644 src/prof_log.c diff --git a/Makefile.in b/Makefile.in index 1cd973d..7128b00 100644 --- a/Makefile.in +++ b/Makefile.in @@ -117,7 +117,6 @@ C_SRCS := $(srcroot)src/jemalloc.c \ $(srcroot)src/pages.c \ $(srcroot)src/prng.c \ $(srcroot)src/prof.c \ - $(srcroot)src/prof_log.c \ $(srcroot)src/rtree.c \ $(srcroot)src/safety_check.c \ $(srcroot)src/stats.c \ diff --git a/include/jemalloc/internal/prof_externs.h b/include/jemalloc/internal/prof_externs.h index e94ac3b..094f3e1 100644 --- a/include/jemalloc/internal/prof_externs.h +++ b/include/jemalloc/internal/prof_externs.h @@ -43,8 +43,6 @@ extern uint64_t prof_interval; */ extern size_t lg_prof_sample; -extern bool prof_booted; - void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated); void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx); @@ -66,14 +64,10 @@ extern prof_dump_header_t *JET_MUTABLE prof_dump_header; void prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs, uint64_t *accumbytes); #endif -int prof_getpid(void); bool prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum); void prof_idump(tsdn_t *tsdn); bool prof_mdump(tsd_t *tsd, const char *filename); void prof_gdump(tsdn_t *tsdn); - -void prof_bt_hash(const void *key, size_t r_hash[2]); -bool prof_bt_keycomp(const void *k1, const void *k2); prof_tdata_t *prof_tdata_init(tsd_t *tsd); prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata); void prof_reset(tsd_t *tsd, size_t lg_sample); @@ -97,10 +91,8 @@ void prof_postfork_parent(tsdn_t *tsdn); void prof_postfork_child(tsdn_t *tsdn); void prof_sample_threshold_update(prof_tdata_t *tdata); -void prof_try_log(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx); bool prof_log_start(tsdn_t *tsdn, const char *filename); bool prof_log_stop(tsdn_t *tsdn); -bool prof_log_init(tsd_t *tsdn); #ifdef JEMALLOC_JET size_t prof_log_bt_count(void); size_t prof_log_alloc_count(void); diff --git a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj index d93d909..228e8be 100644 --- a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj +++ b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj @@ -58,7 +58,6 @@ - diff --git a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters index 7b09d4e..d839515 100644 --- a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters +++ b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters @@ -67,9 +67,6 @@ Source Files - - Source Files - Source Files diff --git a/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj b/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj index 28bd3cd..edcceed 100644 --- a/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj +++ b/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj @@ -58,7 +58,6 @@ - diff --git a/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters b/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters index a66c209..6df7260 100644 --- a/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters +++ b/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters @@ -67,9 +67,6 @@ Source Files - - Source Files - Source Files diff --git a/src/prof.c b/src/prof.c index 9d1edb3..4ebe279 100644 --- a/src/prof.c +++ b/src/prof.c @@ -7,6 +7,7 @@ #include "jemalloc/internal/hash.h" #include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/emitter.h" /******************************************************************************/ @@ -38,6 +39,7 @@ bool opt_prof_gdump = false; bool opt_prof_final = false; bool opt_prof_leak = false; bool opt_prof_accum = false; +bool opt_prof_log = false; char opt_prof_prefix[ /* Minimize memory bloat for non-prof builds. */ #ifdef JEMALLOC_PROF @@ -70,6 +72,100 @@ uint64_t prof_interval = 0; size_t lg_prof_sample; +typedef enum prof_logging_state_e prof_logging_state_t; +enum prof_logging_state_e { + prof_logging_state_stopped, + prof_logging_state_started, + prof_logging_state_dumping +}; + +/* + * - stopped: log_start never called, or previous log_stop has completed. + * - started: log_start called, log_stop not called yet. Allocations are logged. + * - dumping: log_stop called but not finished; samples are not logged anymore. + */ +prof_logging_state_t prof_logging_state = prof_logging_state_stopped; + +#ifdef JEMALLOC_JET +static bool prof_log_dummy = false; +#endif + +/* Incremented for every log file that is output. */ +static uint64_t log_seq = 0; +static char log_filename[ + /* Minimize memory bloat for non-prof builds. */ +#ifdef JEMALLOC_PROF + PATH_MAX + +#endif + 1]; + +/* Timestamp for most recent call to log_start(). */ +static nstime_t log_start_timestamp = NSTIME_ZERO_INITIALIZER; + +/* Increment these when adding to the log_bt and log_thr linked lists. */ +static size_t log_bt_index = 0; +static size_t log_thr_index = 0; + +/* Linked list node definitions. These are only used in prof.c. */ +typedef struct prof_bt_node_s prof_bt_node_t; + +struct prof_bt_node_s { + prof_bt_node_t *next; + size_t index; + prof_bt_t bt; + /* Variable size backtrace vector pointed to by bt. */ + void *vec[1]; +}; + +typedef struct prof_thr_node_s prof_thr_node_t; + +struct prof_thr_node_s { + prof_thr_node_t *next; + size_t index; + uint64_t thr_uid; + /* Variable size based on thr_name_sz. */ + char name[1]; +}; + +typedef struct prof_alloc_node_s prof_alloc_node_t; + +/* This is output when logging sampled allocations. */ +struct prof_alloc_node_s { + prof_alloc_node_t *next; + /* Indices into an array of thread data. */ + size_t alloc_thr_ind; + size_t free_thr_ind; + + /* Indices into an array of backtraces. */ + size_t alloc_bt_ind; + size_t free_bt_ind; + + uint64_t alloc_time_ns; + uint64_t free_time_ns; + + size_t usize; +}; + +/* + * Created on the first call to prof_log_start and deleted on prof_log_stop. + * These are the backtraces and threads that have already been logged by an + * allocation. + */ +static bool log_tables_initialized = false; +static ckh_t log_bt_node_set; +static ckh_t log_thr_node_set; + +/* Store linked lists for logged data. */ +static prof_bt_node_t *log_bt_first = NULL; +static prof_bt_node_t *log_bt_last = NULL; +static prof_thr_node_t *log_thr_first = NULL; +static prof_thr_node_t *log_thr_last = NULL; +static prof_alloc_node_t *log_alloc_first = NULL; +static prof_alloc_node_t *log_alloc_last = NULL; + +/* Protects the prof_logging_state and any log_{...} variable. */ +static malloc_mutex_t log_mtx; + /* * Table of mutexes that are shared among gctx's. These are leaf locks, so * there is no problem with using them for more than one gctx at the same time. @@ -129,7 +225,7 @@ static size_t prof_dump_buf_end; static int prof_dump_fd; /* Do not dump any profiles until bootstrapping is complete. */ -bool prof_booted = false; +static bool prof_booted = false; /******************************************************************************/ /* @@ -145,6 +241,12 @@ static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached); static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name); +/* Hashtable functions for log_bt_node_set and log_thr_node_set. */ +static void prof_thr_node_hash(const void *key, size_t r_hash[2]); +static bool prof_thr_node_keycomp(const void *k1, const void *k2); +static void prof_bt_node_hash(const void *key, size_t r_hash[2]); +static bool prof_bt_node_keycomp(const void *k1, const void *k2); + /******************************************************************************/ /* Red-black trees. */ @@ -259,6 +361,162 @@ prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, malloc_mutex_unlock(tsdn, tctx->tdata->lock); } +static size_t +prof_log_bt_index(tsd_t *tsd, prof_bt_t *bt) { + assert(prof_logging_state == prof_logging_state_started); + malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx); + + prof_bt_node_t dummy_node; + dummy_node.bt = *bt; + prof_bt_node_t *node; + + /* See if this backtrace is already cached in the table. */ + if (ckh_search(&log_bt_node_set, (void *)(&dummy_node), + (void **)(&node), NULL)) { + size_t sz = offsetof(prof_bt_node_t, vec) + + (bt->len * sizeof(void *)); + prof_bt_node_t *new_node = (prof_bt_node_t *) + iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, + true, arena_get(TSDN_NULL, 0, true), true); + if (log_bt_first == NULL) { + log_bt_first = new_node; + log_bt_last = new_node; + } else { + log_bt_last->next = new_node; + log_bt_last = new_node; + } + + new_node->next = NULL; + new_node->index = log_bt_index; + /* + * Copy the backtrace: bt is inside a tdata or gctx, which + * might die before prof_log_stop is called. + */ + new_node->bt.len = bt->len; + memcpy(new_node->vec, bt->vec, bt->len * sizeof(void *)); + new_node->bt.vec = new_node->vec; + + log_bt_index++; + ckh_insert(tsd, &log_bt_node_set, (void *)new_node, NULL); + return new_node->index; + } else { + return node->index; + } +} +static size_t +prof_log_thr_index(tsd_t *tsd, uint64_t thr_uid, const char *name) { + assert(prof_logging_state == prof_logging_state_started); + malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx); + + prof_thr_node_t dummy_node; + dummy_node.thr_uid = thr_uid; + prof_thr_node_t *node; + + /* See if this thread is already cached in the table. */ + if (ckh_search(&log_thr_node_set, (void *)(&dummy_node), + (void **)(&node), NULL)) { + size_t sz = offsetof(prof_thr_node_t, name) + strlen(name) + 1; + prof_thr_node_t *new_node = (prof_thr_node_t *) + iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, + true, arena_get(TSDN_NULL, 0, true), true); + if (log_thr_first == NULL) { + log_thr_first = new_node; + log_thr_last = new_node; + } else { + log_thr_last->next = new_node; + log_thr_last = new_node; + } + + new_node->next = NULL; + new_node->index = log_thr_index; + new_node->thr_uid = thr_uid; + strcpy(new_node->name, name); + + log_thr_index++; + ckh_insert(tsd, &log_thr_node_set, (void *)new_node, NULL); + return new_node->index; + } else { + return node->index; + } +} + +static void +prof_try_log(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); + + prof_tdata_t *cons_tdata = prof_tdata_get(tsd, false); + if (cons_tdata == NULL) { + /* + * We decide not to log these allocations. cons_tdata will be + * NULL only when the current thread is in a weird state (e.g. + * it's being destroyed). + */ + return; + } + + malloc_mutex_lock(tsd_tsdn(tsd), &log_mtx); + + if (prof_logging_state != prof_logging_state_started) { + goto label_done; + } + + if (!log_tables_initialized) { + bool err1 = ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS, + prof_bt_node_hash, prof_bt_node_keycomp); + bool err2 = ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS, + prof_thr_node_hash, prof_thr_node_keycomp); + if (err1 || err2) { + goto label_done; + } + log_tables_initialized = true; + } + + nstime_t alloc_time = prof_alloc_time_get(tsd_tsdn(tsd), ptr, + (alloc_ctx_t *)NULL); + nstime_t free_time = NSTIME_ZERO_INITIALIZER; + nstime_update(&free_time); + + size_t sz = sizeof(prof_alloc_node_t); + prof_alloc_node_t *new_node = (prof_alloc_node_t *) + iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); + + const char *prod_thr_name = (tctx->tdata->thread_name == NULL)? + "" : tctx->tdata->thread_name; + const char *cons_thr_name = prof_thread_name_get(tsd); + + prof_bt_t bt; + /* Initialize the backtrace, using the buffer in tdata to store it. */ + bt_init(&bt, cons_tdata->vec); + prof_backtrace(&bt); + prof_bt_t *cons_bt = &bt; + + /* We haven't destroyed tctx yet, so gctx should be good to read. */ + prof_bt_t *prod_bt = &tctx->gctx->bt; + + new_node->next = NULL; + new_node->alloc_thr_ind = prof_log_thr_index(tsd, tctx->tdata->thr_uid, + prod_thr_name); + new_node->free_thr_ind = prof_log_thr_index(tsd, cons_tdata->thr_uid, + cons_thr_name); + new_node->alloc_bt_ind = prof_log_bt_index(tsd, prod_bt); + new_node->free_bt_ind = prof_log_bt_index(tsd, cons_bt); + new_node->alloc_time_ns = nstime_ns(&alloc_time); + new_node->free_time_ns = nstime_ns(&free_time); + new_node->usize = usize; + + if (log_alloc_first == NULL) { + log_alloc_first = new_node; + log_alloc_last = new_node; + } else { + log_alloc_last->next = new_node; + log_alloc_last = new_node; + } + +label_done: + malloc_mutex_unlock(tsd_tsdn(tsd), &log_mtx); +} + void prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) { @@ -1435,7 +1693,7 @@ prof_open_maps(const char *format, ...) { } #endif -int +static int prof_getpid(void) { #ifdef _WIN32 return GetCurrentProcessId(); @@ -1877,7 +2135,7 @@ prof_gdump(tsdn_t *tsdn) { } } -void +static void prof_bt_hash(const void *key, size_t r_hash[2]) { prof_bt_t *bt = (prof_bt_t *)key; @@ -1886,7 +2144,7 @@ prof_bt_hash(const void *key, size_t r_hash[2]) { hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash); } -bool +static bool prof_bt_keycomp(const void *k1, const void *k2) { const prof_bt_t *bt1 = (prof_bt_t *)k1; const prof_bt_t *bt2 = (prof_bt_t *)k2; @@ -1899,6 +2157,33 @@ prof_bt_keycomp(const void *k1, const void *k2) { return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); } +static void +prof_bt_node_hash(const void *key, size_t r_hash[2]) { + const prof_bt_node_t *bt_node = (prof_bt_node_t *)key; + prof_bt_hash((void *)(&bt_node->bt), r_hash); +} + +static bool +prof_bt_node_keycomp(const void *k1, const void *k2) { + const prof_bt_node_t *bt_node1 = (prof_bt_node_t *)k1; + const prof_bt_node_t *bt_node2 = (prof_bt_node_t *)k2; + return prof_bt_keycomp((void *)(&bt_node1->bt), + (void *)(&bt_node2->bt)); +} + +static void +prof_thr_node_hash(const void *key, size_t r_hash[2]) { + const prof_thr_node_t *thr_node = (prof_thr_node_t *)key; + hash(&thr_node->thr_uid, sizeof(uint64_t), 0x94122f35U, r_hash); +} + +static bool +prof_thr_node_keycomp(const void *k1, const void *k2) { + const prof_thr_node_t *thr_node1 = (prof_thr_node_t *)k1; + const prof_thr_node_t *thr_node2 = (prof_thr_node_t *)k2; + return thr_node1->thr_uid == thr_node2->thr_uid; +} + static uint64_t prof_thr_uid_alloc(tsdn_t *tsdn) { uint64_t thr_uid; @@ -2131,6 +2416,368 @@ prof_active_set(tsdn_t *tsdn, bool active) { return prof_active_old; } +#ifdef JEMALLOC_JET +size_t +prof_log_bt_count(void) { + size_t cnt = 0; + prof_bt_node_t *node = log_bt_first; + while (node != NULL) { + cnt++; + node = node->next; + } + return cnt; +} + +size_t +prof_log_alloc_count(void) { + size_t cnt = 0; + prof_alloc_node_t *node = log_alloc_first; + while (node != NULL) { + cnt++; + node = node->next; + } + return cnt; +} + +size_t +prof_log_thr_count(void) { + size_t cnt = 0; + prof_thr_node_t *node = log_thr_first; + while (node != NULL) { + cnt++; + node = node->next; + } + return cnt; +} + +bool +prof_log_is_logging(void) { + return prof_logging_state == prof_logging_state_started; +} + +bool +prof_log_rep_check(void) { + if (prof_logging_state == prof_logging_state_stopped + && log_tables_initialized) { + return true; + } + + if (log_bt_last != NULL && log_bt_last->next != NULL) { + return true; + } + if (log_thr_last != NULL && log_thr_last->next != NULL) { + return true; + } + if (log_alloc_last != NULL && log_alloc_last->next != NULL) { + return true; + } + + size_t bt_count = prof_log_bt_count(); + size_t thr_count = prof_log_thr_count(); + size_t alloc_count = prof_log_alloc_count(); + + + if (prof_logging_state == prof_logging_state_stopped) { + if (bt_count != 0 || thr_count != 0 || alloc_count || 0) { + return true; + } + } + + prof_alloc_node_t *node = log_alloc_first; + while (node != NULL) { + if (node->alloc_bt_ind >= bt_count) { + return true; + } + if (node->free_bt_ind >= bt_count) { + return true; + } + if (node->alloc_thr_ind >= thr_count) { + return true; + } + if (node->free_thr_ind >= thr_count) { + return true; + } + if (node->alloc_time_ns > node->free_time_ns) { + return true; + } + node = node->next; + } + + return false; +} + +void +prof_log_dummy_set(bool new_value) { + prof_log_dummy = new_value; +} +#endif + +bool +prof_log_start(tsdn_t *tsdn, const char *filename) { + if (!opt_prof || !prof_booted) { + return true; + } + + bool ret = false; + size_t buf_size = PATH_MAX + 1; + + malloc_mutex_lock(tsdn, &log_mtx); + + if (prof_logging_state != prof_logging_state_stopped) { + ret = true; + } else if (filename == NULL) { + /* Make default name. */ + malloc_snprintf(log_filename, buf_size, "%s.%d.%"FMTu64".json", + opt_prof_prefix, prof_getpid(), log_seq); + log_seq++; + prof_logging_state = prof_logging_state_started; + } else if (strlen(filename) >= buf_size) { + ret = true; + } else { + strcpy(log_filename, filename); + prof_logging_state = prof_logging_state_started; + } + + if (!ret) { + nstime_update(&log_start_timestamp); + } + + malloc_mutex_unlock(tsdn, &log_mtx); + + return ret; +} + +/* Used as an atexit function to stop logging on exit. */ +static void +prof_log_stop_final(void) { + tsd_t *tsd = tsd_fetch(); + prof_log_stop(tsd_tsdn(tsd)); +} + +struct prof_emitter_cb_arg_s { + int fd; + ssize_t ret; +}; + +static void +prof_emitter_write_cb(void *opaque, const char *to_write) { + struct prof_emitter_cb_arg_s *arg = + (struct prof_emitter_cb_arg_s *)opaque; + size_t bytes = strlen(to_write); +#ifdef JEMALLOC_JET + if (prof_log_dummy) { + return; + } +#endif + arg->ret = write(arg->fd, (void *)to_write, bytes); +} + +/* + * prof_log_emit_{...} goes through the appropriate linked list, emitting each + * node to the json and deallocating it. + */ +static void +prof_log_emit_threads(tsd_t *tsd, emitter_t *emitter) { + emitter_json_array_kv_begin(emitter, "threads"); + prof_thr_node_t *thr_node = log_thr_first; + prof_thr_node_t *thr_old_node; + while (thr_node != NULL) { + emitter_json_object_begin(emitter); + + emitter_json_kv(emitter, "thr_uid", emitter_type_uint64, + &thr_node->thr_uid); + + char *thr_name = thr_node->name; + + emitter_json_kv(emitter, "thr_name", emitter_type_string, + &thr_name); + + emitter_json_object_end(emitter); + thr_old_node = thr_node; + thr_node = thr_node->next; + idalloc(tsd, thr_old_node); + } + emitter_json_array_end(emitter); +} + +static void +prof_log_emit_traces(tsd_t *tsd, emitter_t *emitter) { + emitter_json_array_kv_begin(emitter, "stack_traces"); + prof_bt_node_t *bt_node = log_bt_first; + prof_bt_node_t *bt_old_node; + /* + * Calculate how many hex digits we need: twice number of bytes, two for + * "0x", and then one more for terminating '\0'. + */ + char buf[2 * sizeof(intptr_t) + 3]; + size_t buf_sz = sizeof(buf); + while (bt_node != NULL) { + emitter_json_array_begin(emitter); + size_t i; + for (i = 0; i < bt_node->bt.len; i++) { + malloc_snprintf(buf, buf_sz, "%p", bt_node->bt.vec[i]); + char *trace_str = buf; + emitter_json_value(emitter, emitter_type_string, + &trace_str); + } + emitter_json_array_end(emitter); + + bt_old_node = bt_node; + bt_node = bt_node->next; + idalloc(tsd, bt_old_node); + } + emitter_json_array_end(emitter); +} + +static void +prof_log_emit_allocs(tsd_t *tsd, emitter_t *emitter) { + emitter_json_array_kv_begin(emitter, "allocations"); + prof_alloc_node_t *alloc_node = log_alloc_first; + prof_alloc_node_t *alloc_old_node; + while (alloc_node != NULL) { + emitter_json_object_begin(emitter); + + emitter_json_kv(emitter, "alloc_thread", emitter_type_size, + &alloc_node->alloc_thr_ind); + + emitter_json_kv(emitter, "free_thread", emitter_type_size, + &alloc_node->free_thr_ind); + + emitter_json_kv(emitter, "alloc_trace", emitter_type_size, + &alloc_node->alloc_bt_ind); + + emitter_json_kv(emitter, "free_trace", emitter_type_size, + &alloc_node->free_bt_ind); + + emitter_json_kv(emitter, "alloc_timestamp", + emitter_type_uint64, &alloc_node->alloc_time_ns); + + emitter_json_kv(emitter, "free_timestamp", emitter_type_uint64, + &alloc_node->free_time_ns); + + emitter_json_kv(emitter, "usize", emitter_type_uint64, + &alloc_node->usize); + + emitter_json_object_end(emitter); + + alloc_old_node = alloc_node; + alloc_node = alloc_node->next; + idalloc(tsd, alloc_old_node); + } + emitter_json_array_end(emitter); +} + +static void +prof_log_emit_metadata(emitter_t *emitter) { + emitter_json_object_kv_begin(emitter, "info"); + + nstime_t now = NSTIME_ZERO_INITIALIZER; + + nstime_update(&now); + uint64_t ns = nstime_ns(&now) - nstime_ns(&log_start_timestamp); + emitter_json_kv(emitter, "duration", emitter_type_uint64, &ns); + + char *vers = JEMALLOC_VERSION; + emitter_json_kv(emitter, "version", + emitter_type_string, &vers); + + emitter_json_kv(emitter, "lg_sample_rate", + emitter_type_int, &lg_prof_sample); + + int pid = prof_getpid(); + emitter_json_kv(emitter, "pid", emitter_type_int, &pid); + + emitter_json_object_end(emitter); +} + + +bool +prof_log_stop(tsdn_t *tsdn) { + if (!opt_prof || !prof_booted) { + return true; + } + + tsd_t *tsd = tsdn_tsd(tsdn); + malloc_mutex_lock(tsdn, &log_mtx); + + if (prof_logging_state != prof_logging_state_started) { + malloc_mutex_unlock(tsdn, &log_mtx); + return true; + } + + /* + * Set the state to dumping. We'll set it to stopped when we're done. + * Since other threads won't be able to start/stop/log when the state is + * dumping, we don't have to hold the lock during the whole method. + */ + prof_logging_state = prof_logging_state_dumping; + malloc_mutex_unlock(tsdn, &log_mtx); + + + emitter_t emitter; + + /* Create a file. */ + + int fd; +#ifdef JEMALLOC_JET + if (prof_log_dummy) { + fd = 0; + } else { + fd = creat(log_filename, 0644); + } +#else + fd = creat(log_filename, 0644); +#endif + + if (fd == -1) { + malloc_printf(": creat() for log file \"%s\" " + " failed with %d\n", log_filename, errno); + if (opt_abort) { + abort(); + } + return true; + } + + /* Emit to json. */ + struct prof_emitter_cb_arg_s arg; + arg.fd = fd; + emitter_init(&emitter, emitter_output_json, &prof_emitter_write_cb, + (void *)(&arg)); + + emitter_json_object_begin(&emitter); + prof_log_emit_metadata(&emitter); + prof_log_emit_threads(tsd, &emitter); + prof_log_emit_traces(tsd, &emitter); + prof_log_emit_allocs(tsd, &emitter); + emitter_json_object_end(&emitter); + + /* Reset global state. */ + if (log_tables_initialized) { + ckh_delete(tsd, &log_bt_node_set); + ckh_delete(tsd, &log_thr_node_set); + } + log_tables_initialized = false; + log_bt_index = 0; + log_thr_index = 0; + log_bt_first = NULL; + log_bt_last = NULL; + log_thr_first = NULL; + log_thr_last = NULL; + log_alloc_first = NULL; + log_alloc_last = NULL; + + malloc_mutex_lock(tsdn, &log_mtx); + prof_logging_state = prof_logging_state_stopped; + malloc_mutex_unlock(tsdn, &log_mtx); + +#ifdef JEMALLOC_JET + if (prof_log_dummy) { + return false; + } +#endif + return close(fd); +} + const char * prof_thread_name_get(tsd_t *tsd) { prof_tdata_t *tdata; @@ -2367,10 +3014,35 @@ prof_boot2(tsd_t *tsd) { } } - if (prof_log_init(tsd)) { + if (opt_prof_log) { + prof_log_start(tsd_tsdn(tsd), NULL); + } + + if (atexit(prof_log_stop_final) != 0) { + malloc_write(": Error in atexit() " + "for logging\n"); + if (opt_abort) { + abort(); + } + } + + if (malloc_mutex_init(&log_mtx, "prof_log", + WITNESS_RANK_PROF_LOG, malloc_mutex_rank_exclusive)) { + return true; + } + + if (ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS, + prof_bt_node_hash, prof_bt_node_keycomp)) { return true; } + if (ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS, + prof_thr_node_hash, prof_thr_node_keycomp)) { + return true; + } + + log_tables_initialized = true; + gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t), CACHELINE); diff --git a/src/prof_log.c b/src/prof_log.c deleted file mode 100644 index 25a6abe..0000000 --- a/src/prof_log.c +++ /dev/null @@ -1,698 +0,0 @@ -#define JEMALLOC_PROF_C_ -#include "jemalloc/internal/jemalloc_preamble.h" -#include "jemalloc/internal/jemalloc_internal_includes.h" - -#include "jemalloc/internal/assert.h" -#include "jemalloc/internal/ckh.h" -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/malloc_io.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/emitter.h" - -bool opt_prof_log = false; -typedef enum prof_logging_state_e prof_logging_state_t; -enum prof_logging_state_e { - prof_logging_state_stopped, - prof_logging_state_started, - prof_logging_state_dumping -}; - -/* - * - stopped: log_start never called, or previous log_stop has completed. - * - started: log_start called, log_stop not called yet. Allocations are logged. - * - dumping: log_stop called but not finished; samples are not logged anymore. - */ -prof_logging_state_t prof_logging_state = prof_logging_state_stopped; - -#ifdef JEMALLOC_JET -static bool prof_log_dummy = false; -#endif - -/* Incremented for every log file that is output. */ -static uint64_t log_seq = 0; -static char log_filename[ - /* Minimize memory bloat for non-prof builds. */ -#ifdef JEMALLOC_PROF - PATH_MAX + -#endif - 1]; - -/* Timestamp for most recent call to log_start(). */ -static nstime_t log_start_timestamp = NSTIME_ZERO_INITIALIZER; - -/* Increment these when adding to the log_bt and log_thr linked lists. */ -static size_t log_bt_index = 0; -static size_t log_thr_index = 0; - -/* Linked list node definitions. These are only used in this file. */ -typedef struct prof_bt_node_s prof_bt_node_t; - -struct prof_bt_node_s { - prof_bt_node_t *next; - size_t index; - prof_bt_t bt; - /* Variable size backtrace vector pointed to by bt. */ - void *vec[1]; -}; - -typedef struct prof_thr_node_s prof_thr_node_t; - -struct prof_thr_node_s { - prof_thr_node_t *next; - size_t index; - uint64_t thr_uid; - /* Variable size based on thr_name_sz. */ - char name[1]; -}; - -typedef struct prof_alloc_node_s prof_alloc_node_t; - -/* This is output when logging sampled allocations. */ -struct prof_alloc_node_s { - prof_alloc_node_t *next; - /* Indices into an array of thread data. */ - size_t alloc_thr_ind; - size_t free_thr_ind; - - /* Indices into an array of backtraces. */ - size_t alloc_bt_ind; - size_t free_bt_ind; - - uint64_t alloc_time_ns; - uint64_t free_time_ns; - - size_t usize; -}; - -/* - * Created on the first call to prof_log_start and deleted on prof_log_stop. - * These are the backtraces and threads that have already been logged by an - * allocation. - */ -static bool log_tables_initialized = false; -static ckh_t log_bt_node_set; -static ckh_t log_thr_node_set; - -/* Store linked lists for logged data. */ -static prof_bt_node_t *log_bt_first = NULL; -static prof_bt_node_t *log_bt_last = NULL; -static prof_thr_node_t *log_thr_first = NULL; -static prof_thr_node_t *log_thr_last = NULL; -static prof_alloc_node_t *log_alloc_first = NULL; -static prof_alloc_node_t *log_alloc_last = NULL; - -/* Protects the prof_logging_state and any log_{...} variable. */ -static malloc_mutex_t log_mtx; - -/******************************************************************************/ -/* - * Function prototypes for static functions that are referenced prior to - * definition. - */ - -/* Hashtable functions for log_bt_node_set and log_thr_node_set. */ -static void prof_thr_node_hash(const void *key, size_t r_hash[2]); -static bool prof_thr_node_keycomp(const void *k1, const void *k2); -static void prof_bt_node_hash(const void *key, size_t r_hash[2]); -static bool prof_bt_node_keycomp(const void *k1, const void *k2); - -/******************************************************************************/ - -static size_t -prof_log_bt_index(tsd_t *tsd, prof_bt_t *bt) { - assert(prof_logging_state == prof_logging_state_started); - malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx); - - prof_bt_node_t dummy_node; - dummy_node.bt = *bt; - prof_bt_node_t *node; - - /* See if this backtrace is already cached in the table. */ - if (ckh_search(&log_bt_node_set, (void *)(&dummy_node), - (void **)(&node), NULL)) { - size_t sz = offsetof(prof_bt_node_t, vec) + - (bt->len * sizeof(void *)); - prof_bt_node_t *new_node = (prof_bt_node_t *) - iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, - true, arena_get(TSDN_NULL, 0, true), true); - if (log_bt_first == NULL) { - log_bt_first = new_node; - log_bt_last = new_node; - } else { - log_bt_last->next = new_node; - log_bt_last = new_node; - } - - new_node->next = NULL; - new_node->index = log_bt_index; - /* - * Copy the backtrace: bt is inside a tdata or gctx, which - * might die before prof_log_stop is called. - */ - new_node->bt.len = bt->len; - memcpy(new_node->vec, bt->vec, bt->len * sizeof(void *)); - new_node->bt.vec = new_node->vec; - - log_bt_index++; - ckh_insert(tsd, &log_bt_node_set, (void *)new_node, NULL); - return new_node->index; - } else { - return node->index; - } -} -static size_t -prof_log_thr_index(tsd_t *tsd, uint64_t thr_uid, const char *name) { - assert(prof_logging_state == prof_logging_state_started); - malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx); - - prof_thr_node_t dummy_node; - dummy_node.thr_uid = thr_uid; - prof_thr_node_t *node; - - /* See if this thread is already cached in the table. */ - if (ckh_search(&log_thr_node_set, (void *)(&dummy_node), - (void **)(&node), NULL)) { - size_t sz = offsetof(prof_thr_node_t, name) + strlen(name) + 1; - prof_thr_node_t *new_node = (prof_thr_node_t *) - iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, - true, arena_get(TSDN_NULL, 0, true), true); - if (log_thr_first == NULL) { - log_thr_first = new_node; - log_thr_last = new_node; - } else { - log_thr_last->next = new_node; - log_thr_last = new_node; - } - - new_node->next = NULL; - new_node->index = log_thr_index; - new_node->thr_uid = thr_uid; - strcpy(new_node->name, name); - - log_thr_index++; - ckh_insert(tsd, &log_thr_node_set, (void *)new_node, NULL); - return new_node->index; - } else { - return node->index; - } -} - -void -prof_try_log(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) { - malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); - - prof_tdata_t *cons_tdata = prof_tdata_get(tsd, false); - if (cons_tdata == NULL) { - /* - * We decide not to log these allocations. cons_tdata will be - * NULL only when the current thread is in a weird state (e.g. - * it's being destroyed). - */ - return; - } - - malloc_mutex_lock(tsd_tsdn(tsd), &log_mtx); - - if (prof_logging_state != prof_logging_state_started) { - goto label_done; - } - - if (!log_tables_initialized) { - bool err1 = ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS, - prof_bt_node_hash, prof_bt_node_keycomp); - bool err2 = ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS, - prof_thr_node_hash, prof_thr_node_keycomp); - if (err1 || err2) { - goto label_done; - } - log_tables_initialized = true; - } - - nstime_t alloc_time = prof_alloc_time_get(tsd_tsdn(tsd), ptr, - (alloc_ctx_t *)NULL); - nstime_t free_time = NSTIME_ZERO_INITIALIZER; - nstime_update(&free_time); - - size_t sz = sizeof(prof_alloc_node_t); - prof_alloc_node_t *new_node = (prof_alloc_node_t *) - iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, true, - arena_get(TSDN_NULL, 0, true), true); - - const char *prod_thr_name = (tctx->tdata->thread_name == NULL)? - "" : tctx->tdata->thread_name; - const char *cons_thr_name = prof_thread_name_get(tsd); - - prof_bt_t bt; - /* Initialize the backtrace, using the buffer in tdata to store it. */ - bt_init(&bt, cons_tdata->vec); - prof_backtrace(&bt); - prof_bt_t *cons_bt = &bt; - - /* We haven't destroyed tctx yet, so gctx should be good to read. */ - prof_bt_t *prod_bt = &tctx->gctx->bt; - - new_node->next = NULL; - new_node->alloc_thr_ind = prof_log_thr_index(tsd, tctx->tdata->thr_uid, - prod_thr_name); - new_node->free_thr_ind = prof_log_thr_index(tsd, cons_tdata->thr_uid, - cons_thr_name); - new_node->alloc_bt_ind = prof_log_bt_index(tsd, prod_bt); - new_node->free_bt_ind = prof_log_bt_index(tsd, cons_bt); - new_node->alloc_time_ns = nstime_ns(&alloc_time); - new_node->free_time_ns = nstime_ns(&free_time); - new_node->usize = usize; - - if (log_alloc_first == NULL) { - log_alloc_first = new_node; - log_alloc_last = new_node; - } else { - log_alloc_last->next = new_node; - log_alloc_last = new_node; - } - -label_done: - malloc_mutex_unlock(tsd_tsdn(tsd), &log_mtx); -} - -static void -prof_bt_node_hash(const void *key, size_t r_hash[2]) { - const prof_bt_node_t *bt_node = (prof_bt_node_t *)key; - prof_bt_hash((void *)(&bt_node->bt), r_hash); -} - -static bool -prof_bt_node_keycomp(const void *k1, const void *k2) { - const prof_bt_node_t *bt_node1 = (prof_bt_node_t *)k1; - const prof_bt_node_t *bt_node2 = (prof_bt_node_t *)k2; - return prof_bt_keycomp((void *)(&bt_node1->bt), - (void *)(&bt_node2->bt)); -} - -static void -prof_thr_node_hash(const void *key, size_t r_hash[2]) { - const prof_thr_node_t *thr_node = (prof_thr_node_t *)key; - hash(&thr_node->thr_uid, sizeof(uint64_t), 0x94122f35U, r_hash); -} - -static bool -prof_thr_node_keycomp(const void *k1, const void *k2) { - const prof_thr_node_t *thr_node1 = (prof_thr_node_t *)k1; - const prof_thr_node_t *thr_node2 = (prof_thr_node_t *)k2; - return thr_node1->thr_uid == thr_node2->thr_uid; -} - -#ifdef JEMALLOC_JET -size_t -prof_log_bt_count(void) { - size_t cnt = 0; - prof_bt_node_t *node = log_bt_first; - while (node != NULL) { - cnt++; - node = node->next; - } - return cnt; -} - -size_t -prof_log_alloc_count(void) { - size_t cnt = 0; - prof_alloc_node_t *node = log_alloc_first; - while (node != NULL) { - cnt++; - node = node->next; - } - return cnt; -} - -size_t -prof_log_thr_count(void) { - size_t cnt = 0; - prof_thr_node_t *node = log_thr_first; - while (node != NULL) { - cnt++; - node = node->next; - } - return cnt; -} - -bool -prof_log_is_logging(void) { - return prof_logging_state == prof_logging_state_started; -} - -bool -prof_log_rep_check(void) { - if (prof_logging_state == prof_logging_state_stopped - && log_tables_initialized) { - return true; - } - - if (log_bt_last != NULL && log_bt_last->next != NULL) { - return true; - } - if (log_thr_last != NULL && log_thr_last->next != NULL) { - return true; - } - if (log_alloc_last != NULL && log_alloc_last->next != NULL) { - return true; - } - - size_t bt_count = prof_log_bt_count(); - size_t thr_count = prof_log_thr_count(); - size_t alloc_count = prof_log_alloc_count(); - - - if (prof_logging_state == prof_logging_state_stopped) { - if (bt_count != 0 || thr_count != 0 || alloc_count || 0) { - return true; - } - } - - prof_alloc_node_t *node = log_alloc_first; - while (node != NULL) { - if (node->alloc_bt_ind >= bt_count) { - return true; - } - if (node->free_bt_ind >= bt_count) { - return true; - } - if (node->alloc_thr_ind >= thr_count) { - return true; - } - if (node->free_thr_ind >= thr_count) { - return true; - } - if (node->alloc_time_ns > node->free_time_ns) { - return true; - } - node = node->next; - } - - return false; -} - -void -prof_log_dummy_set(bool new_value) { - prof_log_dummy = new_value; -} -#endif - -bool -prof_log_start(tsdn_t *tsdn, const char *filename) { - if (!opt_prof || !prof_booted) { - return true; - } - - bool ret = false; - size_t buf_size = PATH_MAX + 1; - - malloc_mutex_lock(tsdn, &log_mtx); - - if (prof_logging_state != prof_logging_state_stopped) { - ret = true; - } else if (filename == NULL) { - /* Make default name. */ - malloc_snprintf(log_filename, buf_size, "%s.%d.%"FMTu64".json", - opt_prof_prefix, prof_getpid(), log_seq); - log_seq++; - prof_logging_state = prof_logging_state_started; - } else if (strlen(filename) >= buf_size) { - ret = true; - } else { - strcpy(log_filename, filename); - prof_logging_state = prof_logging_state_started; - } - - if (!ret) { - nstime_update(&log_start_timestamp); - } - - malloc_mutex_unlock(tsdn, &log_mtx); - - return ret; -} - -/* Used as an atexit function to stop logging on exit. */ -static void -prof_log_stop_final(void) { - tsd_t *tsd = tsd_fetch(); - prof_log_stop(tsd_tsdn(tsd)); -} - -struct prof_emitter_cb_arg_s { - int fd; - ssize_t ret; -}; - -static void -prof_emitter_write_cb(void *opaque, const char *to_write) { - struct prof_emitter_cb_arg_s *arg = - (struct prof_emitter_cb_arg_s *)opaque; - size_t bytes = strlen(to_write); -#ifdef JEMALLOC_JET - if (prof_log_dummy) { - return; - } -#endif - arg->ret = write(arg->fd, (void *)to_write, bytes); -} - -/* - * prof_log_emit_{...} goes through the appropriate linked list, emitting each - * node to the json and deallocating it. - */ -static void -prof_log_emit_threads(tsd_t *tsd, emitter_t *emitter) { - emitter_json_array_kv_begin(emitter, "threads"); - prof_thr_node_t *thr_node = log_thr_first; - prof_thr_node_t *thr_old_node; - while (thr_node != NULL) { - emitter_json_object_begin(emitter); - - emitter_json_kv(emitter, "thr_uid", emitter_type_uint64, - &thr_node->thr_uid); - - char *thr_name = thr_node->name; - - emitter_json_kv(emitter, "thr_name", emitter_type_string, - &thr_name); - - emitter_json_object_end(emitter); - thr_old_node = thr_node; - thr_node = thr_node->next; - idalloc(tsd, thr_old_node); - } - emitter_json_array_end(emitter); -} - -static void -prof_log_emit_traces(tsd_t *tsd, emitter_t *emitter) { - emitter_json_array_kv_begin(emitter, "stack_traces"); - prof_bt_node_t *bt_node = log_bt_first; - prof_bt_node_t *bt_old_node; - /* - * Calculate how many hex digits we need: twice number of bytes, two for - * "0x", and then one more for terminating '\0'. - */ - char buf[2 * sizeof(intptr_t) + 3]; - size_t buf_sz = sizeof(buf); - while (bt_node != NULL) { - emitter_json_array_begin(emitter); - size_t i; - for (i = 0; i < bt_node->bt.len; i++) { - malloc_snprintf(buf, buf_sz, "%p", bt_node->bt.vec[i]); - char *trace_str = buf; - emitter_json_value(emitter, emitter_type_string, - &trace_str); - } - emitter_json_array_end(emitter); - - bt_old_node = bt_node; - bt_node = bt_node->next; - idalloc(tsd, bt_old_node); - } - emitter_json_array_end(emitter); -} - -static void -prof_log_emit_allocs(tsd_t *tsd, emitter_t *emitter) { - emitter_json_array_kv_begin(emitter, "allocations"); - prof_alloc_node_t *alloc_node = log_alloc_first; - prof_alloc_node_t *alloc_old_node; - while (alloc_node != NULL) { - emitter_json_object_begin(emitter); - - emitter_json_kv(emitter, "alloc_thread", emitter_type_size, - &alloc_node->alloc_thr_ind); - - emitter_json_kv(emitter, "free_thread", emitter_type_size, - &alloc_node->free_thr_ind); - - emitter_json_kv(emitter, "alloc_trace", emitter_type_size, - &alloc_node->alloc_bt_ind); - - emitter_json_kv(emitter, "free_trace", emitter_type_size, - &alloc_node->free_bt_ind); - - emitter_json_kv(emitter, "alloc_timestamp", - emitter_type_uint64, &alloc_node->alloc_time_ns); - - emitter_json_kv(emitter, "free_timestamp", emitter_type_uint64, - &alloc_node->free_time_ns); - - emitter_json_kv(emitter, "usize", emitter_type_uint64, - &alloc_node->usize); - - emitter_json_object_end(emitter); - - alloc_old_node = alloc_node; - alloc_node = alloc_node->next; - idalloc(tsd, alloc_old_node); - } - emitter_json_array_end(emitter); -} - -static void -prof_log_emit_metadata(emitter_t *emitter) { - emitter_json_object_kv_begin(emitter, "info"); - - nstime_t now = NSTIME_ZERO_INITIALIZER; - - nstime_update(&now); - uint64_t ns = nstime_ns(&now) - nstime_ns(&log_start_timestamp); - emitter_json_kv(emitter, "duration", emitter_type_uint64, &ns); - - char *vers = JEMALLOC_VERSION; - emitter_json_kv(emitter, "version", - emitter_type_string, &vers); - - emitter_json_kv(emitter, "lg_sample_rate", - emitter_type_int, &lg_prof_sample); - - int pid = prof_getpid(); - emitter_json_kv(emitter, "pid", emitter_type_int, &pid); - - emitter_json_object_end(emitter); -} - - -bool -prof_log_stop(tsdn_t *tsdn) { - if (!opt_prof || !prof_booted) { - return true; - } - - tsd_t *tsd = tsdn_tsd(tsdn); - malloc_mutex_lock(tsdn, &log_mtx); - - if (prof_logging_state != prof_logging_state_started) { - malloc_mutex_unlock(tsdn, &log_mtx); - return true; - } - - /* - * Set the state to dumping. We'll set it to stopped when we're done. - * Since other threads won't be able to start/stop/log when the state is - * dumping, we don't have to hold the lock during the whole method. - */ - prof_logging_state = prof_logging_state_dumping; - malloc_mutex_unlock(tsdn, &log_mtx); - - - emitter_t emitter; - - /* Create a file. */ - - int fd; -#ifdef JEMALLOC_JET - if (prof_log_dummy) { - fd = 0; - } else { - fd = creat(log_filename, 0644); - } -#else - fd = creat(log_filename, 0644); -#endif - - if (fd == -1) { - malloc_printf(": creat() for log file \"%s\" " - " failed with %d\n", log_filename, errno); - if (opt_abort) { - abort(); - } - return true; - } - - /* Emit to json. */ - struct prof_emitter_cb_arg_s arg; - arg.fd = fd; - emitter_init(&emitter, emitter_output_json, &prof_emitter_write_cb, - (void *)(&arg)); - - emitter_json_object_begin(&emitter); - prof_log_emit_metadata(&emitter); - prof_log_emit_threads(tsd, &emitter); - prof_log_emit_traces(tsd, &emitter); - prof_log_emit_allocs(tsd, &emitter); - emitter_json_object_end(&emitter); - - /* Reset global state. */ - if (log_tables_initialized) { - ckh_delete(tsd, &log_bt_node_set); - ckh_delete(tsd, &log_thr_node_set); - } - log_tables_initialized = false; - log_bt_index = 0; - log_thr_index = 0; - log_bt_first = NULL; - log_bt_last = NULL; - log_thr_first = NULL; - log_thr_last = NULL; - log_alloc_first = NULL; - log_alloc_last = NULL; - - malloc_mutex_lock(tsdn, &log_mtx); - prof_logging_state = prof_logging_state_stopped; - malloc_mutex_unlock(tsdn, &log_mtx); - -#ifdef JEMALLOC_JET - if (prof_log_dummy) { - return false; - } -#endif - return close(fd); -} - -bool prof_log_init(tsd_t *tsd) { - if (opt_prof_log) { - prof_log_start(tsd_tsdn(tsd), NULL); - } - - if (atexit(prof_log_stop_final) != 0) { - malloc_write(": Error in atexit() " - "for logging\n"); - if (opt_abort) { - abort(); - } - } - - if (malloc_mutex_init(&log_mtx, "prof_log", - WITNESS_RANK_PROF_LOG, malloc_mutex_rank_exclusive)) { - return true; - } - - if (ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS, - prof_bt_node_hash, prof_bt_node_keycomp)) { - return true; - } - - if (ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS, - prof_thr_node_hash, prof_thr_node_keycomp)) { - return true; - } - - log_tables_initialized = true; - return false; -} - -/******************************************************************************/ -- cgit v0.12 From c9cdc1b27f8aa9c1e81e733e60d470c04be960b3 Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Mon, 29 Jul 2019 11:43:08 -0700 Subject: Limit to exact fit on Windows with retain off. W/o retain, split and merge are disallowed on Windows. Avoid doing first-fit which needs splitting almost always. Instead, try exact fit only and bail out early. --- src/extent.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/extent.c b/src/extent.c index a2dbde1..9237f90 100644 --- a/src/extent.c +++ b/src/extent.c @@ -445,6 +445,16 @@ extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, extent_t *ret = NULL; pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size)); + + if (!maps_coalesce && !opt_retain) { + /* + * No split / merge allowed (Windows w/o retain). Try exact fit + * only. + */ + return extent_heap_empty(&extents->heaps[pind]) ? NULL : + extent_heap_first(&extents->heaps[pind]); + } + for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, (size_t)pind); i < SC_NPSIZES + 1; -- cgit v0.12 From 9344d25488b626739c9080eb471d1bd15eeb046b Mon Sep 17 00:00:00 2001 From: Yinan Zhang Date: Fri, 26 Jul 2019 17:00:24 -0700 Subject: Workaround to address g++ unused variable warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit g++ 5.5.0+ complained `parameter ‘expected’ set but not used [-Werror=unused-but-set-parameter]` (despite that `expected` is in fact used). --- include/jemalloc/internal/atomic_gcc_atomic.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/include/jemalloc/internal/atomic_gcc_atomic.h b/include/jemalloc/internal/atomic_gcc_atomic.h index 6b73a14..471515e 100644 --- a/include/jemalloc/internal/atomic_gcc_atomic.h +++ b/include/jemalloc/internal/atomic_gcc_atomic.h @@ -67,7 +67,8 @@ atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ \ ATOMIC_INLINE bool \ atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ - type *expected, type desired, atomic_memory_order_t success_mo, \ + UNUSED type *expected, type desired, \ + atomic_memory_order_t success_mo, \ atomic_memory_order_t failure_mo) { \ return __atomic_compare_exchange(&a->repr, expected, &desired, \ true, atomic_enum_to_builtin(success_mo), \ @@ -76,7 +77,8 @@ atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ \ ATOMIC_INLINE bool \ atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ - type *expected, type desired, atomic_memory_order_t success_mo, \ + UNUSED type *expected, type desired, \ + atomic_memory_order_t success_mo, \ atomic_memory_order_t failure_mo) { \ return __atomic_compare_exchange(&a->repr, expected, &desired, \ false, \ -- cgit v0.12 From 82b8aaaeb68ccb65ca52532f4806a43fbdb26b7a Mon Sep 17 00:00:00 2001 From: Yinan Zhang Date: Tue, 30 Jul 2019 11:26:13 -0700 Subject: Quick fix for prof log printing The emitter APIs used were incorrect, a side effect of which was extra lines being printed. --- src/prof.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/prof.c b/src/prof.c index 4ebe279..fcf9c6f 100644 --- a/src/prof.c +++ b/src/prof.c @@ -2744,12 +2744,12 @@ prof_log_stop(tsdn_t *tsdn) { emitter_init(&emitter, emitter_output_json, &prof_emitter_write_cb, (void *)(&arg)); - emitter_json_object_begin(&emitter); + emitter_begin(&emitter); prof_log_emit_metadata(&emitter); prof_log_emit_threads(tsd, &emitter); prof_log_emit_traces(tsd, &emitter); prof_log_emit_allocs(tsd, &emitter); - emitter_json_object_end(&emitter); + emitter_end(&emitter); /* Reset global state. */ if (log_tables_initialized) { -- cgit v0.12 From 8a94ac25d597e439b05b38c013e4cb2d1169c681 Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Thu, 1 Aug 2019 16:16:44 -0700 Subject: Sanity check on prof dump buffer size. --- src/prof.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/prof.c b/src/prof.c index fcf9c6f..13334cb 100644 --- a/src/prof.c +++ b/src/prof.c @@ -1303,6 +1303,7 @@ prof_dump_write(bool propagate_err, const char *s) { prof_dump_buf_end += n; i += n; } + assert(i == slen); return false; } -- cgit v0.12 From 0cfa36a58a91b30996b30c948d67e1daf184c663 Mon Sep 17 00:00:00 2001 From: Qi Wang Date: Mon, 29 Jul 2019 11:30:30 -0700 Subject: Update Changelog for 5.2.1. --- ChangeLog | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/ChangeLog b/ChangeLog index 7c73a8f..e55813b 100644 --- a/ChangeLog +++ b/ChangeLog @@ -4,6 +4,39 @@ brevity. Much more detail can be found in the git revision history: https://github.com/jemalloc/jemalloc +* 5.2.1 (August 5, 2019) + + This release is primarily about Windows. A critical virtual memory leak is + resolved on all Windows platforms. The regression was present in all releases + since 5.0.0. + + Bug fixes: + - Fix a severe virtual memory leak on Windows. This regression was first + released in 5.0.0. (@Ignition, @j0t, @frederik-h, @davidtgoldblatt, + @interwq) + - Fix size 0 handling in posix_memalign(). This regression was first released + in 5.2.0. (@interwq) + - Fix the prof_log unit test which may observe unexpected backtraces from + compiler optimizations. The test was first added in 5.2.0. (@marxin, + @gnzlbg, @interwq) + - Fix the declaration of the extent_avail tree. This regression was first + released in 5.1.0. (@zoulasc) + - Fix an incorrect reference in jeprof. This functionality was first released + in 3.0.0. (@prehistoric-penguin) + - Fix an assertion on the deallocation fast-path. This regression was first + released in 5.2.0. (@yinan1048576) + - Fix the TLS_MODEL attribute in headers. This regression was first released + in 5.0.0. (@zoulasc, @interwq) + + Optimizations and refactors: + - Implement opt.retain on Windows and enable by default on 64-bit. (@interwq, + @davidtgoldblatt) + - Optimize away a branch on the operator delete[] path. (@mgrice) + - Add format annotation to the format generator function. (@zoulasc) + - Refactor and improve the size class header generation. (@yinan1048576) + - Remove best fit. (@djwatson) + - Avoid blocking on background thread locks for stats. (@oranagra, @interwq) + * 5.2.0 (April 2, 2019) This release includes a few notable improvements, which are summarized below: -- cgit v0.12