diff options
| author | Jason Evans <jasone@canonware.com> | 2017-06-13 19:49:58 (GMT) |
|---|---|---|
| committer | Jason Evans <jasone@canonware.com> | 2017-06-13 19:51:09 (GMT) |
| commit | 5018fe3f0979b7f9db9930accdf7ee31071fd703 (patch) | |
| tree | 894055b5ff4ccde3d9d782861d45af4664f12ad2 /test | |
| parent | 04380e79f1e2428bd0ad000bbc6e3d2dfc6b66a5 (diff) | |
| parent | ba29113e5a58caeb6b4a65b1db6d8efae79cae45 (diff) | |
| download | jemalloc-5.0.0.zip jemalloc-5.0.0.tar.gz jemalloc-5.0.0.tar.bz2 | |
Merge branch 'dev'5.0.0
Diffstat (limited to 'test')
96 files changed, 4727 insertions, 3704 deletions
diff --git a/test/include/test/SFMT-alti.h b/test/include/test/SFMT-alti.h index 0005df6..a1885db 100644 --- a/test/include/test/SFMT-alti.h +++ b/test/include/test/SFMT-alti.h @@ -33,8 +33,8 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -/** - * @file SFMT-alti.h +/** + * @file SFMT-alti.h * * @brief SIMD oriented Fast Mersenne Twister(SFMT) * pseudorandom number generator @@ -95,7 +95,7 @@ vector unsigned int vec_recursion(vector unsigned int a, * This function fills the internal state array with pseudorandom * integers. */ -JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) { +static inline void gen_rand_all(sfmt_t *ctx) { int i; vector unsigned int r, r1, r2; @@ -119,10 +119,10 @@ JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) { * This function fills the user-specified array with pseudorandom * integers. * - * @param array an 128-bit array to be filled by pseudorandom numbers. + * @param array an 128-bit array to be filled by pseudorandom numbers. * @param size number of 128-bit pesudorandom numbers to be generated. */ -JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { +static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { int i, j; vector unsigned int r, r1, r2; @@ -173,7 +173,7 @@ JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { * @param array an 128-bit array to be swaped. * @param size size of 128-bit array. */ -JEMALLOC_INLINE void swap(w128_t *array, int size) { +static inline void swap(w128_t *array, int size) { int i; const vector unsigned char perm = ALTI_SWAP; diff --git a/test/include/test/SFMT-sse2.h b/test/include/test/SFMT-sse2.h index 0314a16..169ad55 100644 --- a/test/include/test/SFMT-sse2.h +++ b/test/include/test/SFMT-sse2.h @@ -33,7 +33,7 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -/** +/** * @file SFMT-sse2.h * @brief SIMD oriented Fast Mersenne Twister(SFMT) for Intel SSE2 * @@ -60,10 +60,10 @@ * @param mask 128-bit mask * @return output */ -JEMALLOC_ALWAYS_INLINE __m128i mm_recursion(__m128i *a, __m128i *b, +JEMALLOC_ALWAYS_INLINE __m128i mm_recursion(__m128i *a, __m128i *b, __m128i c, __m128i d, __m128i mask) { __m128i v, x, y, z; - + x = _mm_load_si128(a); y = _mm_srli_epi32(*b, SR1); z = _mm_srli_si128(c, SR2); @@ -81,7 +81,7 @@ JEMALLOC_ALWAYS_INLINE __m128i mm_recursion(__m128i *a, __m128i *b, * This function fills the internal state array with pseudorandom * integers. */ -JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) { +static inline void gen_rand_all(sfmt_t *ctx) { int i; __m128i r, r1, r2, mask; mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1); @@ -108,10 +108,10 @@ JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) { * This function fills the user-specified array with pseudorandom * integers. * - * @param array an 128-bit array to be filled by pseudorandom numbers. + * @param array an 128-bit array to be filled by pseudorandom numbers. * @param size number of 128-bit pesudorandom numbers to be generated. */ -JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { +static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { int i, j; __m128i r, r1, r2, mask; mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1); diff --git a/test/include/test/SFMT.h b/test/include/test/SFMT.h index 09c1607..863fc55 100644 --- a/test/include/test/SFMT.h +++ b/test/include/test/SFMT.h @@ -81,91 +81,66 @@ const char *get_idstring(void); int get_min_array_size32(void); int get_min_array_size64(void); -#ifndef JEMALLOC_ENABLE_INLINE -double to_real1(uint32_t v); -double genrand_real1(sfmt_t *ctx); -double to_real2(uint32_t v); -double genrand_real2(sfmt_t *ctx); -double to_real3(uint32_t v); -double genrand_real3(sfmt_t *ctx); -double to_res53(uint64_t v); -double to_res53_mix(uint32_t x, uint32_t y); -double genrand_res53(sfmt_t *ctx); -double genrand_res53_mix(sfmt_t *ctx); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(SFMT_C_)) /* These real versions are due to Isaku Wada */ /** generates a random number on [0,1]-real-interval */ -JEMALLOC_INLINE double to_real1(uint32_t v) -{ +static inline double to_real1(uint32_t v) { return v * (1.0/4294967295.0); /* divided by 2^32-1 */ } /** generates a random number on [0,1]-real-interval */ -JEMALLOC_INLINE double genrand_real1(sfmt_t *ctx) -{ +static inline double genrand_real1(sfmt_t *ctx) { return to_real1(gen_rand32(ctx)); } /** generates a random number on [0,1)-real-interval */ -JEMALLOC_INLINE double to_real2(uint32_t v) -{ +static inline double to_real2(uint32_t v) { return v * (1.0/4294967296.0); /* divided by 2^32 */ } /** generates a random number on [0,1)-real-interval */ -JEMALLOC_INLINE double genrand_real2(sfmt_t *ctx) -{ +static inline double genrand_real2(sfmt_t *ctx) { return to_real2(gen_rand32(ctx)); } /** generates a random number on (0,1)-real-interval */ -JEMALLOC_INLINE double to_real3(uint32_t v) -{ +static inline double to_real3(uint32_t v) { return (((double)v) + 0.5)*(1.0/4294967296.0); /* divided by 2^32 */ } /** generates a random number on (0,1)-real-interval */ -JEMALLOC_INLINE double genrand_real3(sfmt_t *ctx) -{ +static inline double genrand_real3(sfmt_t *ctx) { return to_real3(gen_rand32(ctx)); } /** These real versions are due to Isaku Wada */ /** generates a random number on [0,1) with 53-bit resolution*/ -JEMALLOC_INLINE double to_res53(uint64_t v) -{ +static inline double to_res53(uint64_t v) { return v * (1.0/18446744073709551616.0L); } /** generates a random number on [0,1) with 53-bit resolution from two * 32 bit integers */ -JEMALLOC_INLINE double to_res53_mix(uint32_t x, uint32_t y) -{ +static inline double to_res53_mix(uint32_t x, uint32_t y) { return to_res53(x | ((uint64_t)y << 32)); } /** generates a random number on [0,1) with 53-bit resolution */ -JEMALLOC_INLINE double genrand_res53(sfmt_t *ctx) -{ +static inline double genrand_res53(sfmt_t *ctx) { return to_res53(gen_rand64(ctx)); -} +} /** generates a random number on [0,1) with 53-bit resolution using 32bit integer. */ -JEMALLOC_INLINE double genrand_res53_mix(sfmt_t *ctx) -{ +static inline double genrand_res53_mix(sfmt_t *ctx) { uint32_t x, y; x = gen_rand32(ctx); y = gen_rand32(ctx); return to_res53_mix(x, y); -} -#endif +} #endif diff --git a/test/include/test/btalloc.h b/test/include/test/btalloc.h index c3f9d4d..5877ea7 100644 --- a/test/include/test/btalloc.h +++ b/test/include/test/btalloc.h @@ -1,20 +1,19 @@ /* btalloc() provides a mechanism for allocating via permuted backtraces. */ void *btalloc(size_t size, unsigned bits); -#define btalloc_n_proto(n) \ +#define btalloc_n_proto(n) \ void *btalloc_##n(size_t size, unsigned bits); btalloc_n_proto(0) btalloc_n_proto(1) -#define btalloc_n_gen(n) \ +#define btalloc_n_gen(n) \ void * \ -btalloc_##n(size_t size, unsigned bits) \ -{ \ +btalloc_##n(size_t size, unsigned bits) { \ void *p; \ \ - if (bits == 0) \ + if (bits == 0) { \ p = mallocx(size, 0); \ - else { \ + } else { \ switch (bits & 0x1U) { \ case 0: \ p = (btalloc_0(size, bits >> 1)); \ @@ -27,5 +26,5 @@ btalloc_##n(size_t size, unsigned bits) \ } \ /* Intentionally sabotage tail call optimization. */ \ assert_ptr_not_null(p, "Unexpected mallocx() failure"); \ - return (p); \ + return p; \ } diff --git a/test/include/test/extent_hooks.h b/test/include/test/extent_hooks.h new file mode 100644 index 0000000..ea01285 --- /dev/null +++ b/test/include/test/extent_hooks.h @@ -0,0 +1,287 @@ +/* + * Boilerplate code used for testing extent hooks via interception and + * passthrough. + */ + +static void *extent_alloc_hook(extent_hooks_t *extent_hooks, void *new_addr, + size_t size, size_t alignment, bool *zero, bool *commit, + unsigned arena_ind); +static bool extent_dalloc_hook(extent_hooks_t *extent_hooks, void *addr, + size_t size, bool committed, unsigned arena_ind); +static void extent_destroy_hook(extent_hooks_t *extent_hooks, void *addr, + size_t size, bool committed, unsigned arena_ind); +static bool extent_commit_hook(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind); +static bool extent_decommit_hook(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind); +static bool extent_purge_lazy_hook(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind); +static bool extent_purge_forced_hook(extent_hooks_t *extent_hooks, + void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); +static bool extent_split_hook(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t size_a, size_t size_b, bool committed, + unsigned arena_ind); +static bool extent_merge_hook(extent_hooks_t *extent_hooks, void *addr_a, + size_t size_a, void *addr_b, size_t size_b, bool committed, + unsigned arena_ind); + +static extent_hooks_t *default_hooks; +static extent_hooks_t hooks = { + extent_alloc_hook, + extent_dalloc_hook, + extent_destroy_hook, + extent_commit_hook, + extent_decommit_hook, + extent_purge_lazy_hook, + extent_purge_forced_hook, + extent_split_hook, + extent_merge_hook +}; + +/* Control whether hook functions pass calls through to default hooks. */ +static bool try_alloc = true; +static bool try_dalloc = true; +static bool try_destroy = true; +static bool try_commit = true; +static bool try_decommit = true; +static bool try_purge_lazy = true; +static bool try_purge_forced = true; +static bool try_split = true; +static bool try_merge = true; + +/* Set to false prior to operations, then introspect after operations. */ +static bool called_alloc; +static bool called_dalloc; +static bool called_destroy; +static bool called_commit; +static bool called_decommit; +static bool called_purge_lazy; +static bool called_purge_forced; +static bool called_split; +static bool called_merge; + +/* Set to false prior to operations, then introspect after operations. */ +static bool did_alloc; +static bool did_dalloc; +static bool did_destroy; +static bool did_commit; +static bool did_decommit; +static bool did_purge_lazy; +static bool did_purge_forced; +static bool did_split; +static bool did_merge; + +#if 0 +# define TRACE_HOOK(fmt, ...) malloc_printf(fmt, __VA_ARGS__) +#else +# define TRACE_HOOK(fmt, ...) +#endif + +static void * +extent_alloc_hook(extent_hooks_t *extent_hooks, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit, unsigned arena_ind) { + void *ret; + + TRACE_HOOK("%s(extent_hooks=%p, new_addr=%p, size=%zu, alignment=%zu, " + "*zero=%s, *commit=%s, arena_ind=%u)\n", __func__, extent_hooks, + new_addr, size, alignment, *zero ? "true" : "false", *commit ? + "true" : "false", arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->alloc, extent_alloc_hook, + "Wrong hook function"); + called_alloc = true; + if (!try_alloc) { + return NULL; + } + ret = default_hooks->alloc(default_hooks, new_addr, size, alignment, + zero, commit, 0); + did_alloc = (ret != NULL); + return ret; +} + +static bool +extent_dalloc_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, + bool committed, unsigned arena_ind) { + bool err; + + TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, " + "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ? + "true" : "false", arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->dalloc, extent_dalloc_hook, + "Wrong hook function"); + called_dalloc = true; + if (!try_dalloc) { + return true; + } + err = default_hooks->dalloc(default_hooks, addr, size, committed, 0); + did_dalloc = !err; + return err; +} + +static void +extent_destroy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, + bool committed, unsigned arena_ind) { + TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, " + "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ? + "true" : "false", arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->destroy, extent_destroy_hook, + "Wrong hook function"); + called_destroy = true; + if (!try_destroy) { + return; + } + default_hooks->destroy(default_hooks, addr, size, committed, 0); + did_destroy = true; +} + +static bool +extent_commit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + bool err; + + TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, " + "length=%zu, arena_ind=%u)\n", __func__, extent_hooks, addr, size, + offset, length, arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->commit, extent_commit_hook, + "Wrong hook function"); + called_commit = true; + if (!try_commit) { + return true; + } + err = default_hooks->commit(default_hooks, addr, size, offset, length, + 0); + did_commit = !err; + return err; +} + +static bool +extent_decommit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + bool err; + + TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, " + "length=%zu, arena_ind=%u)\n", __func__, extent_hooks, addr, size, + offset, length, arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->decommit, extent_decommit_hook, + "Wrong hook function"); + called_decommit = true; + if (!try_decommit) { + return true; + } + err = default_hooks->decommit(default_hooks, addr, size, offset, length, + 0); + did_decommit = !err; + return err; +} + +static bool +extent_purge_lazy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + bool err; + + TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, " + "length=%zu arena_ind=%u)\n", __func__, extent_hooks, addr, size, + offset, length, arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->purge_lazy, extent_purge_lazy_hook, + "Wrong hook function"); + called_purge_lazy = true; + if (!try_purge_lazy) { + return true; + } + err = default_hooks->purge_lazy == NULL || + default_hooks->purge_lazy(default_hooks, addr, size, offset, length, + 0); + did_purge_lazy = !err; + return err; +} + +static bool +extent_purge_forced_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + bool err; + + TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, " + "length=%zu arena_ind=%u)\n", __func__, extent_hooks, addr, size, + offset, length, arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->purge_forced, extent_purge_forced_hook, + "Wrong hook function"); + called_purge_forced = true; + if (!try_purge_forced) { + return true; + } + err = default_hooks->purge_forced == NULL || + default_hooks->purge_forced(default_hooks, addr, size, offset, + length, 0); + did_purge_forced = !err; + return err; +} + +static bool +extent_split_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t size_a, size_t size_b, bool committed, unsigned arena_ind) { + bool err; + + TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, size_a=%zu, " + "size_b=%zu, committed=%s, arena_ind=%u)\n", __func__, extent_hooks, + addr, size, size_a, size_b, committed ? "true" : "false", + arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->split, extent_split_hook, + "Wrong hook function"); + called_split = true; + if (!try_split) { + return true; + } + err = (default_hooks->split == NULL || + default_hooks->split(default_hooks, addr, size, size_a, size_b, + committed, 0)); + did_split = !err; + return err; +} + +static bool +extent_merge_hook(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, + void *addr_b, size_t size_b, bool committed, unsigned arena_ind) { + bool err; + + TRACE_HOOK("%s(extent_hooks=%p, addr_a=%p, size_a=%zu, addr_b=%p " + "size_b=%zu, committed=%s, arena_ind=%u)\n", __func__, extent_hooks, + addr_a, size_a, addr_b, size_b, committed ? "true" : "false", + arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->merge, extent_merge_hook, + "Wrong hook function"); + called_merge = true; + if (!try_merge) { + return true; + } + err = (default_hooks->merge == NULL || + default_hooks->merge(default_hooks, addr_a, size_a, addr_b, size_b, + committed, 0)); + did_merge = !err; + return err; +} + +static void +extent_hooks_prep(void) { + size_t sz; + + sz = sizeof(default_hooks); + assert_d_eq(mallctl("arena.0.extent_hooks", (void *)&default_hooks, &sz, + NULL, 0), 0, "Unexpected mallctl() error"); +} diff --git a/test/include/test/jemalloc_test.h.in b/test/include/test/jemalloc_test.h.in index 1f36e46..67caa86 100644 --- a/test/include/test/jemalloc_test.h.in +++ b/test/include/test/jemalloc_test.h.in @@ -1,3 +1,7 @@ +#ifdef __cplusplus +extern "C" { +#endif + #include <limits.h> #ifndef SIZE_T_MAX # define SIZE_T_MAX SIZE_MAX @@ -39,7 +43,8 @@ #ifdef JEMALLOC_UNIT_TEST # define JEMALLOC_JET # define JEMALLOC_MANGLE -# include "jemalloc/internal/jemalloc_internal.h" +# include "jemalloc/internal/jemalloc_preamble.h" +# include "jemalloc/internal/jemalloc_internal_includes.h" /******************************************************************************/ /* @@ -47,7 +52,8 @@ * expose the minimum necessary internal utility code (to avoid re-implementing * essentially identical code within the test infrastructure). */ -#elif defined(JEMALLOC_INTEGRATION_TEST) +#elif defined(JEMALLOC_INTEGRATION_TEST) || \ + defined(JEMALLOC_INTEGRATION_CPP_TEST) # define JEMALLOC_MANGLE # include "jemalloc/jemalloc@install_suffix@.h" # include "jemalloc/internal/jemalloc_internal_defs.h" @@ -63,19 +69,17 @@ static const bool config_debug = # define JEMALLOC_N(n) @private_namespace@##n # include "jemalloc/internal/private_namespace.h" +# include "jemalloc/internal/hooks.h" -# define JEMALLOC_H_TYPES -# define JEMALLOC_H_STRUCTS -# define JEMALLOC_H_EXTERNS -# define JEMALLOC_H_INLINES +/* Hermetic headers. */ +# include "jemalloc/internal/assert.h" +# include "jemalloc/internal/malloc_io.h" # include "jemalloc/internal/nstime.h" # include "jemalloc/internal/util.h" + +/* Non-hermetic headers. */ # include "jemalloc/internal/qr.h" # include "jemalloc/internal/ql.h" -# undef JEMALLOC_H_TYPES -# undef JEMALLOC_H_STRUCTS -# undef JEMALLOC_H_EXTERNS -# undef JEMALLOC_H_INLINES /******************************************************************************/ /* @@ -90,7 +94,8 @@ static const bool config_debug = # include "jemalloc/jemalloc_protos_jet.h" # define JEMALLOC_JET -# include "jemalloc/internal/jemalloc_internal.h" +# include "jemalloc/internal/jemalloc_preamble.h" +# include "jemalloc/internal/jemalloc_internal_includes.h" # include "jemalloc/internal/public_unnamespace.h" # undef JEMALLOC_JET @@ -122,7 +127,7 @@ static const bool config_debug = #include "test/test.h" #include "test/timer.h" #include "test/thd.h" -#define MEXP 19937 +#define MEXP 19937 #include "test/SFMT.h" /******************************************************************************/ @@ -135,7 +140,7 @@ static const bool config_debug = #undef not_implemented #undef assert_not_implemented -#define assert(e) do { \ +#define assert(e) do { \ if (!(e)) { \ malloc_printf( \ "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \ @@ -144,20 +149,25 @@ static const bool config_debug = } \ } while (0) -#define not_reached() do { \ +#define not_reached() do { \ malloc_printf( \ "<jemalloc>: %s:%d: Unreachable code reached\n", \ __FILE__, __LINE__); \ abort(); \ } while (0) -#define not_implemented() do { \ +#define not_implemented() do { \ malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \ __FILE__, __LINE__); \ abort(); \ } while (0) -#define assert_not_implemented(e) do { \ - if (!(e)) \ +#define assert_not_implemented(e) do { \ + if (!(e)) { \ not_implemented(); \ + } \ } while (0) + +#ifdef __cplusplus +} +#endif diff --git a/test/include/test/math.h b/test/include/test/math.h index b057b29..efba086 100644 --- a/test/include/test/math.h +++ b/test/include/test/math.h @@ -1,12 +1,3 @@ -#ifndef JEMALLOC_ENABLE_INLINE -double ln_gamma(double x); -double i_gamma(double x, double p, double ln_gamma_p); -double pt_norm(double p); -double pt_chi2(double p, double df, double ln_gamma_df_2); -double pt_gamma(double p, double shape, double scale, double ln_gamma_shape); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(MATH_C_)) /* * Compute the natural log of Gamma(x), accurate to 10 decimal places. * @@ -15,9 +6,8 @@ double pt_gamma(double p, double shape, double scale, double ln_gamma_shape); * Pike, M.C., I.D. Hill (1966) Algorithm 291: Logarithm of Gamma function * [S14]. Communications of the ACM 9(9):684. */ -JEMALLOC_INLINE double -ln_gamma(double x) -{ +static inline double +ln_gamma(double x) { double f, z; assert(x > 0.0); @@ -31,14 +21,15 @@ ln_gamma(double x) } x = z; f = -log(f); - } else + } else { f = 0.0; + } z = 1.0 / (x * x); - return (f + (x-0.5) * log(x) - x + 0.918938533204673 + + return f + (x-0.5) * log(x) - x + 0.918938533204673 + (((-0.000595238095238 * z + 0.000793650793651) * z - - 0.002777777777778) * z + 0.083333333333333) / x); + 0.002777777777778) * z + 0.083333333333333) / x; } /* @@ -50,9 +41,8 @@ ln_gamma(double x) * Bhattacharjee, G.P. (1970) Algorithm AS 32: The incomplete Gamma integral. * Applied Statistics 19:285-287. */ -JEMALLOC_INLINE double -i_gamma(double x, double p, double ln_gamma_p) -{ +static inline double +i_gamma(double x, double p, double ln_gamma_p) { double acu, factor, oflo, gin, term, rn, a, b, an, dif; double pn[6]; unsigned i; @@ -60,8 +50,9 @@ i_gamma(double x, double p, double ln_gamma_p) assert(p > 0.0); assert(x >= 0.0); - if (x == 0.0) - return (0.0); + if (x == 0.0) { + return 0.0; + } acu = 1.0e-10; oflo = 1.0e30; @@ -80,7 +71,7 @@ i_gamma(double x, double p, double ln_gamma_p) gin += term; if (term <= acu) { gin *= factor / p; - return (gin); + return gin; } } } else { @@ -99,23 +90,26 @@ i_gamma(double x, double p, double ln_gamma_p) b += 2.0; term += 1.0; an = a * term; - for (i = 0; i < 2; i++) + for (i = 0; i < 2; i++) { pn[i+4] = b * pn[i+2] - an * pn[i]; + } if (pn[5] != 0.0) { rn = pn[4] / pn[5]; dif = fabs(gin - rn); if (dif <= acu && dif <= acu * rn) { gin = 1.0 - factor * gin; - return (gin); + return gin; } gin = rn; } - for (i = 0; i < 4; i++) + for (i = 0; i < 4; i++) { pn[i] = pn[i+2]; + } if (fabs(pn[4]) >= oflo) { - for (i = 0; i < 4; i++) + for (i = 0; i < 4; i++) { pn[i] /= oflo; + } } } } @@ -131,9 +125,8 @@ i_gamma(double x, double p, double ln_gamma_p) * Wichura, M.J. (1988) Algorithm AS 241: The percentage points of the normal * distribution. Applied Statistics 37(3):477-484. */ -JEMALLOC_INLINE double -pt_norm(double p) -{ +static inline double +pt_norm(double p) { double q, r, ret; assert(p > 0.0 && p < 1.0); @@ -142,7 +135,7 @@ pt_norm(double p) if (fabs(q) <= 0.425) { /* p close to 1/2. */ r = 0.180625 - q * q; - return (q * (((((((2.5090809287301226727e3 * r + + return q * (((((((2.5090809287301226727e3 * r + 3.3430575583588128105e4) * r + 6.7265770927008700853e4) * r + 4.5921953931549871457e4) * r + 1.3731693765509461125e4) * r + 1.9715909503065514427e3) * r + 1.3314166789178437745e2) @@ -151,12 +144,13 @@ pt_norm(double p) 2.8729085735721942674e4) * r + 3.9307895800092710610e4) * r + 2.1213794301586595867e4) * r + 5.3941960214247511077e3) * r + 6.8718700749205790830e2) * r + 4.2313330701600911252e1) - * r + 1.0)); + * r + 1.0); } else { - if (q < 0.0) + if (q < 0.0) { r = p; - else + } else { r = 1.0 - p; + } assert(r > 0.0); r = sqrt(-log(r)); @@ -198,9 +192,10 @@ pt_norm(double p) 5.99832206555887937690e-1) * r + 1.0)); } - if (q < 0.0) + if (q < 0.0) { ret = -ret; - return (ret); + } + return ret; } } @@ -218,9 +213,8 @@ pt_norm(double p) * Shea, B.L. (1991) Algorithm AS R85: A remark on AS 91: The percentage * points of the Chi^2 distribution. Applied Statistics 40(1):233-235. */ -JEMALLOC_INLINE double -pt_chi2(double p, double df, double ln_gamma_df_2) -{ +static inline double +pt_chi2(double p, double df, double ln_gamma_df_2) { double e, aa, xx, c, ch, a, q, p1, p2, t, x, b, s1, s2, s3, s4, s5, s6; unsigned i; @@ -236,8 +230,9 @@ pt_chi2(double p, double df, double ln_gamma_df_2) if (df < -1.24 * log(p)) { /* Starting approximation for small Chi^2. */ ch = pow(p * xx * exp(ln_gamma_df_2 + xx * aa), 1.0 / xx); - if (ch - e < 0.0) - return (ch); + if (ch - e < 0.0) { + return ch; + } } else { if (df > 0.32) { x = pt_norm(p); @@ -263,8 +258,9 @@ pt_chi2(double p, double df, double ln_gamma_df_2) * (13.32 + 3.0 * ch)) / p2; ch -= (1.0 - exp(a + ln_gamma_df_2 + 0.5 * ch + c * aa) * p2 / p1) / t; - if (fabs(q / ch - 1.0) - 0.01 <= 0.0) + if (fabs(q / ch - 1.0) - 0.01 <= 0.0) { break; + } } } } @@ -273,8 +269,9 @@ pt_chi2(double p, double df, double ln_gamma_df_2) /* Calculation of seven-term Taylor series. */ q = ch; p1 = 0.5 * ch; - if (p1 < 0.0) - return (-1.0); + if (p1 < 0.0) { + return -1.0; + } p2 = p - i_gamma(p1, xx, ln_gamma_df_2); t = p2 * exp(xx * aa + ln_gamma_df_2 + p1 - c * log(ch)); b = t / ch; @@ -290,11 +287,12 @@ pt_chi2(double p, double df, double ln_gamma_df_2) s6 = (120.0 + c * (346.0 + 127.0 * c)) / 5040.0; ch += t * (1.0 + 0.5 * t * s1 - b * c * (s1 - b * (s2 - b * (s3 - b * (s4 - b * (s5 - b * s6)))))); - if (fabs(q / ch - 1.0) <= e) + if (fabs(q / ch - 1.0) <= e) { break; + } } - return (ch); + return ch; } /* @@ -302,10 +300,7 @@ pt_chi2(double p, double df, double ln_gamma_df_2) * compute the upper limit on the definite integral from [0..z] that satisfies * p. */ -JEMALLOC_INLINE double -pt_gamma(double p, double shape, double scale, double ln_gamma_shape) -{ - - return (pt_chi2(p, shape * 2.0, ln_gamma_shape) * 0.5 * scale); +static inline double +pt_gamma(double p, double shape, double scale, double ln_gamma_shape) { + return pt_chi2(p, shape * 2.0, ln_gamma_shape) * 0.5 * scale; } -#endif diff --git a/test/include/test/mq.h b/test/include/test/mq.h index 7c4df49..af2c078 100644 --- a/test/include/test/mq.h +++ b/test/include/test/mq.h @@ -26,9 +26,9 @@ void mq_nanosleep(unsigned ns); * does not perform any cleanup of messages, since it knows nothing of their * payloads. */ -#define mq_msg(a_mq_msg_type) ql_elm(a_mq_msg_type) +#define mq_msg(a_mq_msg_type) ql_elm(a_mq_msg_type) -#define mq_gen(a_attr, a_prefix, a_mq_type, a_mq_msg_type, a_field) \ +#define mq_gen(a_attr, a_prefix, a_mq_type, a_mq_msg_type, a_field) \ typedef struct { \ mtx_t lock; \ ql_head(a_mq_msg_type) msgs; \ @@ -37,31 +37,28 @@ typedef struct { \ a_attr bool \ a_prefix##init(a_mq_type *mq) { \ \ - if (mtx_init(&mq->lock)) \ - return (true); \ + if (mtx_init(&mq->lock)) { \ + return true; \ + } \ ql_new(&mq->msgs); \ mq->count = 0; \ - return (false); \ + return false; \ } \ a_attr void \ -a_prefix##fini(a_mq_type *mq) \ -{ \ - \ +a_prefix##fini(a_mq_type *mq) { \ mtx_fini(&mq->lock); \ } \ a_attr unsigned \ -a_prefix##count(a_mq_type *mq) \ -{ \ +a_prefix##count(a_mq_type *mq) { \ unsigned count; \ \ mtx_lock(&mq->lock); \ count = mq->count; \ mtx_unlock(&mq->lock); \ - return (count); \ + return count; \ } \ a_attr a_mq_msg_type * \ -a_prefix##tryget(a_mq_type *mq) \ -{ \ +a_prefix##tryget(a_mq_type *mq) { \ a_mq_msg_type *msg; \ \ mtx_lock(&mq->lock); \ @@ -71,35 +68,36 @@ a_prefix##tryget(a_mq_type *mq) \ mq->count--; \ } \ mtx_unlock(&mq->lock); \ - return (msg); \ + return msg; \ } \ a_attr a_mq_msg_type * \ -a_prefix##get(a_mq_type *mq) \ -{ \ +a_prefix##get(a_mq_type *mq) { \ a_mq_msg_type *msg; \ unsigned ns; \ \ msg = a_prefix##tryget(mq); \ - if (msg != NULL) \ - return (msg); \ + if (msg != NULL) { \ + return msg; \ + } \ \ ns = 1; \ while (true) { \ mq_nanosleep(ns); \ msg = a_prefix##tryget(mq); \ - if (msg != NULL) \ - return (msg); \ + if (msg != NULL) { \ + return msg; \ + } \ if (ns < 1000*1000*1000) { \ /* Double sleep time, up to max 1 second. */ \ ns <<= 1; \ - if (ns > 1000*1000*1000) \ + if (ns > 1000*1000*1000) { \ ns = 1000*1000*1000; \ + } \ } \ } \ } \ a_attr void \ -a_prefix##put(a_mq_type *mq, a_mq_msg_type *msg) \ -{ \ +a_prefix##put(a_mq_type *mq, a_mq_msg_type *msg) { \ \ mtx_lock(&mq->lock); \ ql_elm_new(msg, a_field); \ diff --git a/test/include/test/test.h b/test/include/test/test.h index c8112eb..fd0e526 100644 --- a/test/include/test/test.h +++ b/test/include/test/test.h @@ -1,6 +1,6 @@ -#define ASSERT_BUFSIZE 256 +#define ASSERT_BUFSIZE 256 -#define assert_cmp(t, a, b, cmp, neg_cmp, pri, ...) do { \ +#define assert_cmp(t, a, b, cmp, neg_cmp, pri, ...) do { \ t a_ = (a); \ t b_ = (b); \ if (!(a_ cmp b_)) { \ @@ -8,8 +8,8 @@ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Failed assertion: " \ - "(%s) "#cmp" (%s) --> " \ - "%"pri" "#neg_cmp" %"pri": ", \ + "(%s) " #cmp " (%s) --> " \ + "%" pri " " #neg_cmp " %" pri ": ", \ __func__, __FILE__, __LINE__, \ #a, #b, a_, b_); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ @@ -17,200 +17,200 @@ } \ } while (0) -#define assert_ptr_eq(a, b, ...) assert_cmp(void *, a, b, ==, \ +#define assert_ptr_eq(a, b, ...) assert_cmp(void *, a, b, ==, \ !=, "p", __VA_ARGS__) -#define assert_ptr_ne(a, b, ...) assert_cmp(void *, a, b, !=, \ +#define assert_ptr_ne(a, b, ...) assert_cmp(void *, a, b, !=, \ ==, "p", __VA_ARGS__) -#define assert_ptr_null(a, ...) assert_cmp(void *, a, NULL, ==, \ +#define assert_ptr_null(a, ...) assert_cmp(void *, a, NULL, ==, \ !=, "p", __VA_ARGS__) -#define assert_ptr_not_null(a, ...) assert_cmp(void *, a, NULL, !=, \ +#define assert_ptr_not_null(a, ...) assert_cmp(void *, a, NULL, !=, \ ==, "p", __VA_ARGS__) -#define assert_c_eq(a, b, ...) assert_cmp(char, a, b, ==, !=, "c", __VA_ARGS__) -#define assert_c_ne(a, b, ...) assert_cmp(char, a, b, !=, ==, "c", __VA_ARGS__) -#define assert_c_lt(a, b, ...) assert_cmp(char, a, b, <, >=, "c", __VA_ARGS__) -#define assert_c_le(a, b, ...) assert_cmp(char, a, b, <=, >, "c", __VA_ARGS__) -#define assert_c_ge(a, b, ...) assert_cmp(char, a, b, >=, <, "c", __VA_ARGS__) -#define assert_c_gt(a, b, ...) assert_cmp(char, a, b, >, <=, "c", __VA_ARGS__) - -#define assert_x_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "#x", __VA_ARGS__) -#define assert_x_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "#x", __VA_ARGS__) -#define assert_x_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "#x", __VA_ARGS__) -#define assert_x_le(a, b, ...) assert_cmp(int, a, b, <=, >, "#x", __VA_ARGS__) -#define assert_x_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "#x", __VA_ARGS__) -#define assert_x_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "#x", __VA_ARGS__) - -#define assert_d_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "d", __VA_ARGS__) -#define assert_d_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "d", __VA_ARGS__) -#define assert_d_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "d", __VA_ARGS__) -#define assert_d_le(a, b, ...) assert_cmp(int, a, b, <=, >, "d", __VA_ARGS__) -#define assert_d_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "d", __VA_ARGS__) -#define assert_d_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "d", __VA_ARGS__) - -#define assert_u_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "u", __VA_ARGS__) -#define assert_u_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "u", __VA_ARGS__) -#define assert_u_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "u", __VA_ARGS__) -#define assert_u_le(a, b, ...) assert_cmp(int, a, b, <=, >, "u", __VA_ARGS__) -#define assert_u_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "u", __VA_ARGS__) -#define assert_u_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "u", __VA_ARGS__) - -#define assert_ld_eq(a, b, ...) assert_cmp(long, a, b, ==, \ +#define assert_c_eq(a, b, ...) assert_cmp(char, a, b, ==, !=, "c", __VA_ARGS__) +#define assert_c_ne(a, b, ...) assert_cmp(char, a, b, !=, ==, "c", __VA_ARGS__) +#define assert_c_lt(a, b, ...) assert_cmp(char, a, b, <, >=, "c", __VA_ARGS__) +#define assert_c_le(a, b, ...) assert_cmp(char, a, b, <=, >, "c", __VA_ARGS__) +#define assert_c_ge(a, b, ...) assert_cmp(char, a, b, >=, <, "c", __VA_ARGS__) +#define assert_c_gt(a, b, ...) assert_cmp(char, a, b, >, <=, "c", __VA_ARGS__) + +#define assert_x_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "#x", __VA_ARGS__) +#define assert_x_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "#x", __VA_ARGS__) +#define assert_x_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "#x", __VA_ARGS__) +#define assert_x_le(a, b, ...) assert_cmp(int, a, b, <=, >, "#x", __VA_ARGS__) +#define assert_x_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "#x", __VA_ARGS__) +#define assert_x_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "#x", __VA_ARGS__) + +#define assert_d_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "d", __VA_ARGS__) +#define assert_d_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "d", __VA_ARGS__) +#define assert_d_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "d", __VA_ARGS__) +#define assert_d_le(a, b, ...) assert_cmp(int, a, b, <=, >, "d", __VA_ARGS__) +#define assert_d_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "d", __VA_ARGS__) +#define assert_d_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "d", __VA_ARGS__) + +#define assert_u_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "u", __VA_ARGS__) +#define assert_u_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "u", __VA_ARGS__) +#define assert_u_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "u", __VA_ARGS__) +#define assert_u_le(a, b, ...) assert_cmp(int, a, b, <=, >, "u", __VA_ARGS__) +#define assert_u_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "u", __VA_ARGS__) +#define assert_u_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "u", __VA_ARGS__) + +#define assert_ld_eq(a, b, ...) assert_cmp(long, a, b, ==, \ !=, "ld", __VA_ARGS__) -#define assert_ld_ne(a, b, ...) assert_cmp(long, a, b, !=, \ +#define assert_ld_ne(a, b, ...) assert_cmp(long, a, b, !=, \ ==, "ld", __VA_ARGS__) -#define assert_ld_lt(a, b, ...) assert_cmp(long, a, b, <, \ +#define assert_ld_lt(a, b, ...) assert_cmp(long, a, b, <, \ >=, "ld", __VA_ARGS__) -#define assert_ld_le(a, b, ...) assert_cmp(long, a, b, <=, \ +#define assert_ld_le(a, b, ...) assert_cmp(long, a, b, <=, \ >, "ld", __VA_ARGS__) -#define assert_ld_ge(a, b, ...) assert_cmp(long, a, b, >=, \ +#define assert_ld_ge(a, b, ...) assert_cmp(long, a, b, >=, \ <, "ld", __VA_ARGS__) -#define assert_ld_gt(a, b, ...) assert_cmp(long, a, b, >, \ +#define assert_ld_gt(a, b, ...) assert_cmp(long, a, b, >, \ <=, "ld", __VA_ARGS__) -#define assert_lu_eq(a, b, ...) assert_cmp(unsigned long, \ +#define assert_lu_eq(a, b, ...) assert_cmp(unsigned long, \ a, b, ==, !=, "lu", __VA_ARGS__) -#define assert_lu_ne(a, b, ...) assert_cmp(unsigned long, \ +#define assert_lu_ne(a, b, ...) assert_cmp(unsigned long, \ a, b, !=, ==, "lu", __VA_ARGS__) -#define assert_lu_lt(a, b, ...) assert_cmp(unsigned long, \ +#define assert_lu_lt(a, b, ...) assert_cmp(unsigned long, \ a, b, <, >=, "lu", __VA_ARGS__) -#define assert_lu_le(a, b, ...) assert_cmp(unsigned long, \ +#define assert_lu_le(a, b, ...) assert_cmp(unsigned long, \ a, b, <=, >, "lu", __VA_ARGS__) -#define assert_lu_ge(a, b, ...) assert_cmp(unsigned long, \ +#define assert_lu_ge(a, b, ...) assert_cmp(unsigned long, \ a, b, >=, <, "lu", __VA_ARGS__) -#define assert_lu_gt(a, b, ...) assert_cmp(unsigned long, \ +#define assert_lu_gt(a, b, ...) assert_cmp(unsigned long, \ a, b, >, <=, "lu", __VA_ARGS__) -#define assert_qd_eq(a, b, ...) assert_cmp(long long, a, b, ==, \ +#define assert_qd_eq(a, b, ...) assert_cmp(long long, a, b, ==, \ !=, "qd", __VA_ARGS__) -#define assert_qd_ne(a, b, ...) assert_cmp(long long, a, b, !=, \ +#define assert_qd_ne(a, b, ...) assert_cmp(long long, a, b, !=, \ ==, "qd", __VA_ARGS__) -#define assert_qd_lt(a, b, ...) assert_cmp(long long, a, b, <, \ +#define assert_qd_lt(a, b, ...) assert_cmp(long long, a, b, <, \ >=, "qd", __VA_ARGS__) -#define assert_qd_le(a, b, ...) assert_cmp(long long, a, b, <=, \ +#define assert_qd_le(a, b, ...) assert_cmp(long long, a, b, <=, \ >, "qd", __VA_ARGS__) -#define assert_qd_ge(a, b, ...) assert_cmp(long long, a, b, >=, \ +#define assert_qd_ge(a, b, ...) assert_cmp(long long, a, b, >=, \ <, "qd", __VA_ARGS__) -#define assert_qd_gt(a, b, ...) assert_cmp(long long, a, b, >, \ +#define assert_qd_gt(a, b, ...) assert_cmp(long long, a, b, >, \ <=, "qd", __VA_ARGS__) -#define assert_qu_eq(a, b, ...) assert_cmp(unsigned long long, \ +#define assert_qu_eq(a, b, ...) assert_cmp(unsigned long long, \ a, b, ==, !=, "qu", __VA_ARGS__) -#define assert_qu_ne(a, b, ...) assert_cmp(unsigned long long, \ +#define assert_qu_ne(a, b, ...) assert_cmp(unsigned long long, \ a, b, !=, ==, "qu", __VA_ARGS__) -#define assert_qu_lt(a, b, ...) assert_cmp(unsigned long long, \ +#define assert_qu_lt(a, b, ...) assert_cmp(unsigned long long, \ a, b, <, >=, "qu", __VA_ARGS__) -#define assert_qu_le(a, b, ...) assert_cmp(unsigned long long, \ +#define assert_qu_le(a, b, ...) assert_cmp(unsigned long long, \ a, b, <=, >, "qu", __VA_ARGS__) -#define assert_qu_ge(a, b, ...) assert_cmp(unsigned long long, \ +#define assert_qu_ge(a, b, ...) assert_cmp(unsigned long long, \ a, b, >=, <, "qu", __VA_ARGS__) -#define assert_qu_gt(a, b, ...) assert_cmp(unsigned long long, \ +#define assert_qu_gt(a, b, ...) assert_cmp(unsigned long long, \ a, b, >, <=, "qu", __VA_ARGS__) -#define assert_jd_eq(a, b, ...) assert_cmp(intmax_t, a, b, ==, \ +#define assert_jd_eq(a, b, ...) assert_cmp(intmax_t, a, b, ==, \ !=, "jd", __VA_ARGS__) -#define assert_jd_ne(a, b, ...) assert_cmp(intmax_t, a, b, !=, \ +#define assert_jd_ne(a, b, ...) assert_cmp(intmax_t, a, b, !=, \ ==, "jd", __VA_ARGS__) -#define assert_jd_lt(a, b, ...) assert_cmp(intmax_t, a, b, <, \ +#define assert_jd_lt(a, b, ...) assert_cmp(intmax_t, a, b, <, \ >=, "jd", __VA_ARGS__) -#define assert_jd_le(a, b, ...) assert_cmp(intmax_t, a, b, <=, \ +#define assert_jd_le(a, b, ...) assert_cmp(intmax_t, a, b, <=, \ >, "jd", __VA_ARGS__) -#define assert_jd_ge(a, b, ...) assert_cmp(intmax_t, a, b, >=, \ +#define assert_jd_ge(a, b, ...) assert_cmp(intmax_t, a, b, >=, \ <, "jd", __VA_ARGS__) -#define assert_jd_gt(a, b, ...) assert_cmp(intmax_t, a, b, >, \ +#define assert_jd_gt(a, b, ...) assert_cmp(intmax_t, a, b, >, \ <=, "jd", __VA_ARGS__) -#define assert_ju_eq(a, b, ...) assert_cmp(uintmax_t, a, b, ==, \ +#define assert_ju_eq(a, b, ...) assert_cmp(uintmax_t, a, b, ==, \ !=, "ju", __VA_ARGS__) -#define assert_ju_ne(a, b, ...) assert_cmp(uintmax_t, a, b, !=, \ +#define assert_ju_ne(a, b, ...) assert_cmp(uintmax_t, a, b, !=, \ ==, "ju", __VA_ARGS__) -#define assert_ju_lt(a, b, ...) assert_cmp(uintmax_t, a, b, <, \ +#define assert_ju_lt(a, b, ...) assert_cmp(uintmax_t, a, b, <, \ >=, "ju", __VA_ARGS__) -#define assert_ju_le(a, b, ...) assert_cmp(uintmax_t, a, b, <=, \ +#define assert_ju_le(a, b, ...) assert_cmp(uintmax_t, a, b, <=, \ >, "ju", __VA_ARGS__) -#define assert_ju_ge(a, b, ...) assert_cmp(uintmax_t, a, b, >=, \ +#define assert_ju_ge(a, b, ...) assert_cmp(uintmax_t, a, b, >=, \ <, "ju", __VA_ARGS__) -#define assert_ju_gt(a, b, ...) assert_cmp(uintmax_t, a, b, >, \ +#define assert_ju_gt(a, b, ...) assert_cmp(uintmax_t, a, b, >, \ <=, "ju", __VA_ARGS__) -#define assert_zd_eq(a, b, ...) assert_cmp(ssize_t, a, b, ==, \ +#define assert_zd_eq(a, b, ...) assert_cmp(ssize_t, a, b, ==, \ !=, "zd", __VA_ARGS__) -#define assert_zd_ne(a, b, ...) assert_cmp(ssize_t, a, b, !=, \ +#define assert_zd_ne(a, b, ...) assert_cmp(ssize_t, a, b, !=, \ ==, "zd", __VA_ARGS__) -#define assert_zd_lt(a, b, ...) assert_cmp(ssize_t, a, b, <, \ +#define assert_zd_lt(a, b, ...) assert_cmp(ssize_t, a, b, <, \ >=, "zd", __VA_ARGS__) -#define assert_zd_le(a, b, ...) assert_cmp(ssize_t, a, b, <=, \ +#define assert_zd_le(a, b, ...) assert_cmp(ssize_t, a, b, <=, \ >, "zd", __VA_ARGS__) -#define assert_zd_ge(a, b, ...) assert_cmp(ssize_t, a, b, >=, \ +#define assert_zd_ge(a, b, ...) assert_cmp(ssize_t, a, b, >=, \ <, "zd", __VA_ARGS__) -#define assert_zd_gt(a, b, ...) assert_cmp(ssize_t, a, b, >, \ +#define assert_zd_gt(a, b, ...) assert_cmp(ssize_t, a, b, >, \ <=, "zd", __VA_ARGS__) -#define assert_zu_eq(a, b, ...) assert_cmp(size_t, a, b, ==, \ +#define assert_zu_eq(a, b, ...) assert_cmp(size_t, a, b, ==, \ !=, "zu", __VA_ARGS__) -#define assert_zu_ne(a, b, ...) assert_cmp(size_t, a, b, !=, \ +#define assert_zu_ne(a, b, ...) assert_cmp(size_t, a, b, !=, \ ==, "zu", __VA_ARGS__) -#define assert_zu_lt(a, b, ...) assert_cmp(size_t, a, b, <, \ +#define assert_zu_lt(a, b, ...) assert_cmp(size_t, a, b, <, \ >=, "zu", __VA_ARGS__) -#define assert_zu_le(a, b, ...) assert_cmp(size_t, a, b, <=, \ +#define assert_zu_le(a, b, ...) assert_cmp(size_t, a, b, <=, \ >, "zu", __VA_ARGS__) -#define assert_zu_ge(a, b, ...) assert_cmp(size_t, a, b, >=, \ +#define assert_zu_ge(a, b, ...) assert_cmp(size_t, a, b, >=, \ <, "zu", __VA_ARGS__) -#define assert_zu_gt(a, b, ...) assert_cmp(size_t, a, b, >, \ +#define assert_zu_gt(a, b, ...) assert_cmp(size_t, a, b, >, \ <=, "zu", __VA_ARGS__) -#define assert_d32_eq(a, b, ...) assert_cmp(int32_t, a, b, ==, \ +#define assert_d32_eq(a, b, ...) assert_cmp(int32_t, a, b, ==, \ !=, FMTd32, __VA_ARGS__) -#define assert_d32_ne(a, b, ...) assert_cmp(int32_t, a, b, !=, \ +#define assert_d32_ne(a, b, ...) assert_cmp(int32_t, a, b, !=, \ ==, FMTd32, __VA_ARGS__) -#define assert_d32_lt(a, b, ...) assert_cmp(int32_t, a, b, <, \ +#define assert_d32_lt(a, b, ...) assert_cmp(int32_t, a, b, <, \ >=, FMTd32, __VA_ARGS__) -#define assert_d32_le(a, b, ...) assert_cmp(int32_t, a, b, <=, \ +#define assert_d32_le(a, b, ...) assert_cmp(int32_t, a, b, <=, \ >, FMTd32, __VA_ARGS__) -#define assert_d32_ge(a, b, ...) assert_cmp(int32_t, a, b, >=, \ +#define assert_d32_ge(a, b, ...) assert_cmp(int32_t, a, b, >=, \ <, FMTd32, __VA_ARGS__) -#define assert_d32_gt(a, b, ...) assert_cmp(int32_t, a, b, >, \ +#define assert_d32_gt(a, b, ...) assert_cmp(int32_t, a, b, >, \ <=, FMTd32, __VA_ARGS__) -#define assert_u32_eq(a, b, ...) assert_cmp(uint32_t, a, b, ==, \ +#define assert_u32_eq(a, b, ...) assert_cmp(uint32_t, a, b, ==, \ !=, FMTu32, __VA_ARGS__) -#define assert_u32_ne(a, b, ...) assert_cmp(uint32_t, a, b, !=, \ +#define assert_u32_ne(a, b, ...) assert_cmp(uint32_t, a, b, !=, \ ==, FMTu32, __VA_ARGS__) -#define assert_u32_lt(a, b, ...) assert_cmp(uint32_t, a, b, <, \ +#define assert_u32_lt(a, b, ...) assert_cmp(uint32_t, a, b, <, \ >=, FMTu32, __VA_ARGS__) -#define assert_u32_le(a, b, ...) assert_cmp(uint32_t, a, b, <=, \ +#define assert_u32_le(a, b, ...) assert_cmp(uint32_t, a, b, <=, \ >, FMTu32, __VA_ARGS__) -#define assert_u32_ge(a, b, ...) assert_cmp(uint32_t, a, b, >=, \ +#define assert_u32_ge(a, b, ...) assert_cmp(uint32_t, a, b, >=, \ <, FMTu32, __VA_ARGS__) -#define assert_u32_gt(a, b, ...) assert_cmp(uint32_t, a, b, >, \ +#define assert_u32_gt(a, b, ...) assert_cmp(uint32_t, a, b, >, \ <=, FMTu32, __VA_ARGS__) -#define assert_d64_eq(a, b, ...) assert_cmp(int64_t, a, b, ==, \ +#define assert_d64_eq(a, b, ...) assert_cmp(int64_t, a, b, ==, \ !=, FMTd64, __VA_ARGS__) -#define assert_d64_ne(a, b, ...) assert_cmp(int64_t, a, b, !=, \ +#define assert_d64_ne(a, b, ...) assert_cmp(int64_t, a, b, !=, \ ==, FMTd64, __VA_ARGS__) -#define assert_d64_lt(a, b, ...) assert_cmp(int64_t, a, b, <, \ +#define assert_d64_lt(a, b, ...) assert_cmp(int64_t, a, b, <, \ >=, FMTd64, __VA_ARGS__) -#define assert_d64_le(a, b, ...) assert_cmp(int64_t, a, b, <=, \ +#define assert_d64_le(a, b, ...) assert_cmp(int64_t, a, b, <=, \ >, FMTd64, __VA_ARGS__) -#define assert_d64_ge(a, b, ...) assert_cmp(int64_t, a, b, >=, \ +#define assert_d64_ge(a, b, ...) assert_cmp(int64_t, a, b, >=, \ <, FMTd64, __VA_ARGS__) -#define assert_d64_gt(a, b, ...) assert_cmp(int64_t, a, b, >, \ +#define assert_d64_gt(a, b, ...) assert_cmp(int64_t, a, b, >, \ <=, FMTd64, __VA_ARGS__) -#define assert_u64_eq(a, b, ...) assert_cmp(uint64_t, a, b, ==, \ +#define assert_u64_eq(a, b, ...) assert_cmp(uint64_t, a, b, ==, \ !=, FMTu64, __VA_ARGS__) -#define assert_u64_ne(a, b, ...) assert_cmp(uint64_t, a, b, !=, \ +#define assert_u64_ne(a, b, ...) assert_cmp(uint64_t, a, b, !=, \ ==, FMTu64, __VA_ARGS__) -#define assert_u64_lt(a, b, ...) assert_cmp(uint64_t, a, b, <, \ +#define assert_u64_lt(a, b, ...) assert_cmp(uint64_t, a, b, <, \ >=, FMTu64, __VA_ARGS__) -#define assert_u64_le(a, b, ...) assert_cmp(uint64_t, a, b, <=, \ +#define assert_u64_le(a, b, ...) assert_cmp(uint64_t, a, b, <=, \ >, FMTu64, __VA_ARGS__) -#define assert_u64_ge(a, b, ...) assert_cmp(uint64_t, a, b, >=, \ +#define assert_u64_ge(a, b, ...) assert_cmp(uint64_t, a, b, >=, \ <, FMTu64, __VA_ARGS__) -#define assert_u64_gt(a, b, ...) assert_cmp(uint64_t, a, b, >, \ +#define assert_u64_gt(a, b, ...) assert_cmp(uint64_t, a, b, >, \ <=, FMTu64, __VA_ARGS__) -#define assert_b_eq(a, b, ...) do { \ +#define assert_b_eq(a, b, ...) do { \ bool a_ = (a); \ bool b_ = (b); \ if (!(a_ == b_)) { \ @@ -226,7 +226,7 @@ p_test_fail(prefix, message); \ } \ } while (0) -#define assert_b_ne(a, b, ...) do { \ +#define assert_b_ne(a, b, ...) do { \ bool a_ = (a); \ bool b_ = (b); \ if (!(a_ != b_)) { \ @@ -242,10 +242,10 @@ p_test_fail(prefix, message); \ } \ } while (0) -#define assert_true(a, ...) assert_b_eq(a, true, __VA_ARGS__) -#define assert_false(a, ...) assert_b_eq(a, false, __VA_ARGS__) +#define assert_true(a, ...) assert_b_eq(a, true, __VA_ARGS__) +#define assert_false(a, ...) assert_b_eq(a, false, __VA_ARGS__) -#define assert_str_eq(a, b, ...) do { \ +#define assert_str_eq(a, b, ...) do { \ if (strcmp((a), (b))) { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ @@ -258,7 +258,7 @@ p_test_fail(prefix, message); \ } \ } while (0) -#define assert_str_ne(a, b, ...) do { \ +#define assert_str_ne(a, b, ...) do { \ if (!strcmp((a), (b))) { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ @@ -272,7 +272,7 @@ } \ } while (0) -#define assert_not_reached(...) do { \ +#define assert_not_reached(...) do { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ @@ -296,25 +296,27 @@ typedef enum { typedef void (test_t)(void); -#define TEST_BEGIN(f) \ +#define TEST_BEGIN(f) \ static void \ -f(void) \ -{ \ +f(void) { \ p_test_init(#f); -#define TEST_END \ +#define TEST_END \ goto label_test_end; \ label_test_end: \ p_test_fini(); \ } -#define test(...) \ +#define test(...) \ p_test(__VA_ARGS__, NULL) -#define test_no_malloc_init(...) \ +#define test_no_reentrancy(...) \ + p_test_no_reentrancy(__VA_ARGS__, NULL) + +#define test_no_malloc_init(...) \ p_test_no_malloc_init(__VA_ARGS__, NULL) -#define test_skip_if(e) do { \ +#define test_skip_if(e) do { \ if (e) { \ test_skip("%s:%s:%d: Test skipped: (%s)", \ __func__, __FILE__, __LINE__, #e); \ @@ -322,11 +324,14 @@ label_test_end: \ } \ } while (0) +bool test_is_reentrant(); + void test_skip(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); void test_fail(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); /* For private use by macros. */ test_status_t p_test(test_t *t, ...); +test_status_t p_test_no_reentrancy(test_t *t, ...); test_status_t p_test_no_malloc_init(test_t *t, ...); void p_test_init(const char *name); void p_test_fini(void); diff --git a/test/integration/MALLOCX_ARENA.c b/test/integration/MALLOCX_ARENA.c index 910a096..222164d 100644 --- a/test/integration/MALLOCX_ARENA.c +++ b/test/integration/MALLOCX_ARENA.c @@ -1,6 +1,6 @@ #include "test/jemalloc_test.h" -#define NTHREADS 10 +#define NTHREADS 10 static bool have_dss = #ifdef JEMALLOC_DSS @@ -11,16 +11,15 @@ static bool have_dss = ; void * -thd_start(void *arg) -{ +thd_start(void *arg) { unsigned thread_ind = (unsigned)(uintptr_t)arg; unsigned arena_ind; void *p; size_t sz; sz = sizeof(arena_ind); - assert_d_eq(mallctl("arenas.extend", (void *)&arena_ind, &sz, NULL, 0), - 0, "Error in arenas.extend"); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), + 0, "Error in arenas.create"); if (thread_ind % 4 != 3) { size_t mib[3]; @@ -42,11 +41,10 @@ thd_start(void *arg) assert_ptr_not_null(p, "Unexpected mallocx() error"); dallocx(p, 0); - return (NULL); + return NULL; } -TEST_BEGIN(test_MALLOCX_ARENA) -{ +TEST_BEGIN(test_MALLOCX_ARENA) { thd_t thds[NTHREADS]; unsigned i; @@ -55,15 +53,14 @@ TEST_BEGIN(test_MALLOCX_ARENA) (void *)(uintptr_t)i); } - for (i = 0; i < NTHREADS; i++) + for (i = 0; i < NTHREADS; i++) { thd_join(thds[i], NULL); + } } TEST_END int -main(void) -{ - - return (test( - test_MALLOCX_ARENA)); +main(void) { + return test( + test_MALLOCX_ARENA); } diff --git a/test/integration/aligned_alloc.c b/test/integration/aligned_alloc.c index 5843842..536b67e 100644 --- a/test/integration/aligned_alloc.c +++ b/test/integration/aligned_alloc.c @@ -1,7 +1,6 @@ #include "test/jemalloc_test.h" -#define CHUNK 0x400000 -#define MAXALIGN (((size_t)1) << 23) +#define MAXALIGN (((size_t)1) << 23) /* * On systems which can't merge extents, tests that call this function generate @@ -9,15 +8,12 @@ * potential OOM on e.g. 32-bit Windows. */ static void -purge(void) -{ - +purge(void) { assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl error"); } -TEST_BEGIN(test_alignment_errors) -{ +TEST_BEGIN(test_alignment_errors) { size_t alignment; void *p; @@ -38,8 +34,7 @@ TEST_BEGIN(test_alignment_errors) } TEST_END -TEST_BEGIN(test_oom_errors) -{ +TEST_BEGIN(test_oom_errors) { size_t alignment, size; void *p; @@ -83,15 +78,15 @@ TEST_BEGIN(test_oom_errors) } TEST_END -TEST_BEGIN(test_alignment_and_size) -{ -#define NITER 4 +TEST_BEGIN(test_alignment_and_size) { +#define NITER 4 size_t alignment, size, total; unsigned i; void *ps[NITER]; - for (i = 0; i < NITER; i++) + for (i = 0; i < NITER; i++) { ps[i] = NULL; + } for (alignment = 8; alignment <= MAXALIGN; @@ -112,8 +107,9 @@ TEST_BEGIN(test_alignment_and_size) alignment, size, size, buf); } total += malloc_usable_size(ps[i]); - if (total >= (MAXALIGN << 1)) + if (total >= (MAXALIGN << 1)) { break; + } } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { @@ -129,11 +125,9 @@ TEST_BEGIN(test_alignment_and_size) TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_alignment_errors, test_oom_errors, - test_alignment_and_size)); + test_alignment_and_size); } diff --git a/test/integration/allocated.c b/test/integration/allocated.c index 6ce145b..1425fd0 100644 --- a/test/integration/allocated.c +++ b/test/integration/allocated.c @@ -9,8 +9,7 @@ static const bool config_stats = ; void * -thd_start(void *arg) -{ +thd_start(void *arg) { int err; void *p; uint64_t a0, a1, d0, d1; @@ -19,15 +18,17 @@ thd_start(void *arg) sz = sizeof(a0); if ((err = mallctl("thread.allocated", (void *)&a0, &sz, NULL, 0))) { - if (err == ENOENT) + if (err == ENOENT) { goto label_ENOENT; + } test_fail("%s(): Error in mallctl(): %s", __func__, strerror(err)); } sz = sizeof(ap0); if ((err = mallctl("thread.allocatedp", (void *)&ap0, &sz, NULL, 0))) { - if (err == ENOENT) + if (err == ENOENT) { goto label_ENOENT; + } test_fail("%s(): Error in mallctl(): %s", __func__, strerror(err)); } @@ -37,16 +38,18 @@ thd_start(void *arg) sz = sizeof(d0); if ((err = mallctl("thread.deallocated", (void *)&d0, &sz, NULL, 0))) { - if (err == ENOENT) + if (err == ENOENT) { goto label_ENOENT; + } test_fail("%s(): Error in mallctl(): %s", __func__, strerror(err)); } sz = sizeof(dp0); if ((err = mallctl("thread.deallocatedp", (void *)&dp0, &sz, NULL, 0))) { - if (err == ENOENT) + if (err == ENOENT) { goto label_ENOENT; + } test_fail("%s(): Error in mallctl(): %s", __func__, strerror(err)); } @@ -88,23 +91,20 @@ thd_start(void *arg) "Deallocated memory counter should increase by at least the amount " "explicitly deallocated"); - return (NULL); + return NULL; label_ENOENT: assert_false(config_stats, "ENOENT should only be returned if stats are disabled"); test_skip("\"thread.allocated\" mallctl not available"); - return (NULL); + return NULL; } -TEST_BEGIN(test_main_thread) -{ - +TEST_BEGIN(test_main_thread) { thd_start(NULL); } TEST_END -TEST_BEGIN(test_subthread) -{ +TEST_BEGIN(test_subthread) { thd_t thd; thd_create(&thd, thd_start, NULL); @@ -113,14 +113,12 @@ TEST_BEGIN(test_subthread) TEST_END int -main(void) -{ - +main(void) { /* Run tests multiple times to check for bad interactions. */ - return (test( + return test( test_main_thread, test_subthread, test_main_thread, test_subthread, - test_main_thread)); + test_main_thread); } diff --git a/test/integration/chunk.c b/test/integration/chunk.c deleted file mode 100644 index 997567a..0000000 --- a/test/integration/chunk.c +++ /dev/null @@ -1,290 +0,0 @@ -#include "test/jemalloc_test.h" - -static chunk_hooks_t orig_hooks; -static chunk_hooks_t old_hooks; - -static bool do_dalloc = true; -static bool do_decommit; - -static bool did_alloc; -static bool did_dalloc; -static bool did_commit; -static bool did_decommit; -static bool did_purge; -static bool did_split; -static bool did_merge; - -#if 0 -# define TRACE_HOOK(fmt, ...) malloc_printf(fmt, __VA_ARGS__) -#else -# define TRACE_HOOK(fmt, ...) -#endif - -void * -chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero, - bool *commit, unsigned arena_ind) -{ - - TRACE_HOOK("%s(new_addr=%p, size=%zu, alignment=%zu, *zero=%s, " - "*commit=%s, arena_ind=%u)\n", __func__, new_addr, size, alignment, - *zero ? "true" : "false", *commit ? "true" : "false", arena_ind); - did_alloc = true; - return (old_hooks.alloc(new_addr, size, alignment, zero, commit, - arena_ind)); -} - -bool -chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind) -{ - - TRACE_HOOK("%s(chunk=%p, size=%zu, committed=%s, arena_ind=%u)\n", - __func__, chunk, size, committed ? "true" : "false", arena_ind); - did_dalloc = true; - if (!do_dalloc) - return (true); - return (old_hooks.dalloc(chunk, size, committed, arena_ind)); -} - -bool -chunk_commit(void *chunk, size_t size, size_t offset, size_t length, - unsigned arena_ind) -{ - bool err; - - TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu, " - "arena_ind=%u)\n", __func__, chunk, size, offset, length, - arena_ind); - err = old_hooks.commit(chunk, size, offset, length, arena_ind); - did_commit = !err; - return (err); -} - -bool -chunk_decommit(void *chunk, size_t size, size_t offset, size_t length, - unsigned arena_ind) -{ - bool err; - - TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu, " - "arena_ind=%u)\n", __func__, chunk, size, offset, length, - arena_ind); - if (!do_decommit) - return (true); - err = old_hooks.decommit(chunk, size, offset, length, arena_ind); - did_decommit = !err; - return (err); -} - -bool -chunk_purge(void *chunk, size_t size, size_t offset, size_t length, - unsigned arena_ind) -{ - - TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu " - "arena_ind=%u)\n", __func__, chunk, size, offset, length, - arena_ind); - did_purge = true; - return (old_hooks.purge(chunk, size, offset, length, arena_ind)); -} - -bool -chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b, - bool committed, unsigned arena_ind) -{ - - TRACE_HOOK("%s(chunk=%p, size=%zu, size_a=%zu, size_b=%zu, " - "committed=%s, arena_ind=%u)\n", __func__, chunk, size, size_a, - size_b, committed ? "true" : "false", arena_ind); - did_split = true; - return (old_hooks.split(chunk, size, size_a, size_b, committed, - arena_ind)); -} - -bool -chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, - bool committed, unsigned arena_ind) -{ - - TRACE_HOOK("%s(chunk_a=%p, size_a=%zu, chunk_b=%p size_b=%zu, " - "committed=%s, arena_ind=%u)\n", __func__, chunk_a, size_a, chunk_b, - size_b, committed ? "true" : "false", arena_ind); - did_merge = true; - return (old_hooks.merge(chunk_a, size_a, chunk_b, size_b, - committed, arena_ind)); -} - -TEST_BEGIN(test_chunk) -{ - void *p; - size_t old_size, new_size, large0, large1, huge0, huge1, huge2, sz; - unsigned arena_ind; - int flags; - size_t hooks_mib[3], purge_mib[3]; - size_t hooks_miblen, purge_miblen; - chunk_hooks_t new_hooks = { - chunk_alloc, - chunk_dalloc, - chunk_commit, - chunk_decommit, - chunk_purge, - chunk_split, - chunk_merge - }; - bool xallocx_success_a, xallocx_success_b, xallocx_success_c; - - sz = sizeof(unsigned); - assert_d_eq(mallctl("arenas.extend", (void *)&arena_ind, &sz, NULL, 0), - 0, "Unexpected mallctl() failure"); - flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; - - /* Install custom chunk hooks. */ - hooks_miblen = sizeof(hooks_mib)/sizeof(size_t); - assert_d_eq(mallctlnametomib("arena.0.chunk_hooks", hooks_mib, - &hooks_miblen), 0, "Unexpected mallctlnametomib() failure"); - hooks_mib[1] = (size_t)arena_ind; - old_size = sizeof(chunk_hooks_t); - new_size = sizeof(chunk_hooks_t); - assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks, - &old_size, (void *)&new_hooks, new_size), 0, - "Unexpected chunk_hooks error"); - orig_hooks = old_hooks; - assert_ptr_ne(old_hooks.alloc, chunk_alloc, "Unexpected alloc error"); - assert_ptr_ne(old_hooks.dalloc, chunk_dalloc, - "Unexpected dalloc error"); - assert_ptr_ne(old_hooks.commit, chunk_commit, - "Unexpected commit error"); - assert_ptr_ne(old_hooks.decommit, chunk_decommit, - "Unexpected decommit error"); - assert_ptr_ne(old_hooks.purge, chunk_purge, "Unexpected purge error"); - assert_ptr_ne(old_hooks.split, chunk_split, "Unexpected split error"); - assert_ptr_ne(old_hooks.merge, chunk_merge, "Unexpected merge error"); - - /* Get large size classes. */ - sz = sizeof(size_t); - assert_d_eq(mallctl("arenas.lrun.0.size", (void *)&large0, &sz, NULL, - 0), 0, "Unexpected arenas.lrun.0.size failure"); - assert_d_eq(mallctl("arenas.lrun.1.size", (void *)&large1, &sz, NULL, - 0), 0, "Unexpected arenas.lrun.1.size failure"); - - /* Get huge size classes. */ - assert_d_eq(mallctl("arenas.hchunk.0.size", (void *)&huge0, &sz, NULL, - 0), 0, "Unexpected arenas.hchunk.0.size failure"); - assert_d_eq(mallctl("arenas.hchunk.1.size", (void *)&huge1, &sz, NULL, - 0), 0, "Unexpected arenas.hchunk.1.size failure"); - assert_d_eq(mallctl("arenas.hchunk.2.size", (void *)&huge2, &sz, NULL, - 0), 0, "Unexpected arenas.hchunk.2.size failure"); - - /* Test dalloc/decommit/purge cascade. */ - purge_miblen = sizeof(purge_mib)/sizeof(size_t); - assert_d_eq(mallctlnametomib("arena.0.purge", purge_mib, &purge_miblen), - 0, "Unexpected mallctlnametomib() failure"); - purge_mib[1] = (size_t)arena_ind; - do_dalloc = false; - do_decommit = false; - p = mallocx(huge0 * 2, flags); - assert_ptr_not_null(p, "Unexpected mallocx() error"); - did_dalloc = false; - did_decommit = false; - did_purge = false; - did_split = false; - xallocx_success_a = (xallocx(p, huge0, 0, flags) == huge0); - assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0), - 0, "Unexpected arena.%u.purge error", arena_ind); - if (xallocx_success_a) { - assert_true(did_dalloc, "Expected dalloc"); - assert_false(did_decommit, "Unexpected decommit"); - assert_true(did_purge, "Expected purge"); - } - assert_true(did_split, "Expected split"); - dallocx(p, flags); - do_dalloc = true; - - /* Test decommit/commit and observe split/merge. */ - do_dalloc = false; - do_decommit = true; - p = mallocx(huge0 * 2, flags); - assert_ptr_not_null(p, "Unexpected mallocx() error"); - did_decommit = false; - did_commit = false; - did_split = false; - did_merge = false; - xallocx_success_b = (xallocx(p, huge0, 0, flags) == huge0); - assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0), - 0, "Unexpected arena.%u.purge error", arena_ind); - if (xallocx_success_b) - assert_true(did_split, "Expected split"); - xallocx_success_c = (xallocx(p, huge0 * 2, 0, flags) == huge0 * 2); - assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match"); - if (xallocx_success_b && xallocx_success_c) - assert_true(did_merge, "Expected merge"); - dallocx(p, flags); - do_dalloc = true; - do_decommit = false; - - /* Test purge for partial-chunk huge allocations. */ - if (huge0 * 2 > huge2) { - /* - * There are at least four size classes per doubling, so a - * successful xallocx() from size=huge2 to size=huge1 is - * guaranteed to leave trailing purgeable memory. - */ - p = mallocx(huge2, flags); - assert_ptr_not_null(p, "Unexpected mallocx() error"); - did_purge = false; - assert_zu_eq(xallocx(p, huge1, 0, flags), huge1, - "Unexpected xallocx() failure"); - assert_true(did_purge, "Expected purge"); - dallocx(p, flags); - } - - /* Test decommit for large allocations. */ - do_decommit = true; - p = mallocx(large1, flags); - assert_ptr_not_null(p, "Unexpected mallocx() error"); - assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0), - 0, "Unexpected arena.%u.purge error", arena_ind); - did_decommit = false; - assert_zu_eq(xallocx(p, large0, 0, flags), large0, - "Unexpected xallocx() failure"); - assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0), - 0, "Unexpected arena.%u.purge error", arena_ind); - did_commit = false; - assert_zu_eq(xallocx(p, large1, 0, flags), large1, - "Unexpected xallocx() failure"); - assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match"); - dallocx(p, flags); - do_decommit = false; - - /* Make sure non-huge allocation succeeds. */ - p = mallocx(42, flags); - assert_ptr_not_null(p, "Unexpected mallocx() error"); - dallocx(p, flags); - - /* Restore chunk hooks. */ - assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, NULL, NULL, - (void *)&old_hooks, new_size), 0, "Unexpected chunk_hooks error"); - assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks, - &old_size, NULL, 0), 0, "Unexpected chunk_hooks error"); - assert_ptr_eq(old_hooks.alloc, orig_hooks.alloc, - "Unexpected alloc error"); - assert_ptr_eq(old_hooks.dalloc, orig_hooks.dalloc, - "Unexpected dalloc error"); - assert_ptr_eq(old_hooks.commit, orig_hooks.commit, - "Unexpected commit error"); - assert_ptr_eq(old_hooks.decommit, orig_hooks.decommit, - "Unexpected decommit error"); - assert_ptr_eq(old_hooks.purge, orig_hooks.purge, - "Unexpected purge error"); - assert_ptr_eq(old_hooks.split, orig_hooks.split, - "Unexpected split error"); - assert_ptr_eq(old_hooks.merge, orig_hooks.merge, - "Unexpected merge error"); -} -TEST_END - -int -main(void) -{ - - return (test(test_chunk)); -} diff --git a/test/integration/cpp/basic.cpp b/test/integration/cpp/basic.cpp new file mode 100644 index 0000000..65890ec --- /dev/null +++ b/test/integration/cpp/basic.cpp @@ -0,0 +1,25 @@ +#include <memory> +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_basic) { + auto foo = new long(4); + assert_ptr_not_null(foo, "Unexpected new[] failure"); + delete foo; + // Test nullptr handling. + foo = nullptr; + delete foo; + + auto bar = new long; + assert_ptr_not_null(bar, "Unexpected new failure"); + delete bar; + // Test nullptr handling. + bar = nullptr; + delete bar; +} +TEST_END + +int +main() { + return test( + test_basic); +} diff --git a/test/integration/extent.c b/test/integration/extent.c new file mode 100644 index 0000000..7262b80 --- /dev/null +++ b/test/integration/extent.c @@ -0,0 +1,190 @@ +#include "test/jemalloc_test.h" + +#include "test/extent_hooks.h" + +static bool +check_background_thread_enabled(void) { + bool enabled; + size_t sz = sizeof(bool); + int ret = mallctl("background_thread", (void *)&enabled, &sz, NULL,0); + if (ret == ENOENT) { + return false; + } + assert_d_eq(ret, 0, "Unexpected mallctl error"); + return enabled; +} + +static void +test_extent_body(unsigned arena_ind) { + void *p; + size_t large0, large1, large2, sz; + size_t purge_mib[3]; + size_t purge_miblen; + int flags; + bool xallocx_success_a, xallocx_success_b, xallocx_success_c; + + flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; + + /* Get large size classes. */ + sz = sizeof(size_t); + assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL, + 0), 0, "Unexpected arenas.lextent.0.size failure"); + assert_d_eq(mallctl("arenas.lextent.1.size", (void *)&large1, &sz, NULL, + 0), 0, "Unexpected arenas.lextent.1.size failure"); + assert_d_eq(mallctl("arenas.lextent.2.size", (void *)&large2, &sz, NULL, + 0), 0, "Unexpected arenas.lextent.2.size failure"); + + /* Test dalloc/decommit/purge cascade. */ + purge_miblen = sizeof(purge_mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.purge", purge_mib, &purge_miblen), + 0, "Unexpected mallctlnametomib() failure"); + purge_mib[1] = (size_t)arena_ind; + try_dalloc = false; + try_decommit = false; + p = mallocx(large0 * 2, flags); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + called_dalloc = false; + called_decommit = false; + did_purge_lazy = false; + did_purge_forced = false; + called_split = false; + xallocx_success_a = (xallocx(p, large0, 0, flags) == large0); + assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0), + 0, "Unexpected arena.%u.purge error", arena_ind); + if (xallocx_success_a) { + assert_true(called_dalloc, "Expected dalloc call"); + assert_true(called_decommit, "Expected decommit call"); + assert_true(did_purge_lazy || did_purge_forced, + "Expected purge"); + } + assert_true(called_split, "Expected split call"); + dallocx(p, flags); + try_dalloc = true; + + /* Test decommit/commit and observe split/merge. */ + try_dalloc = false; + try_decommit = true; + p = mallocx(large0 * 2, flags); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + did_decommit = false; + did_commit = false; + called_split = false; + did_split = false; + did_merge = false; + xallocx_success_b = (xallocx(p, large0, 0, flags) == large0); + assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0), + 0, "Unexpected arena.%u.purge error", arena_ind); + if (xallocx_success_b) { + assert_true(did_split, "Expected split"); + } + xallocx_success_c = (xallocx(p, large0 * 2, 0, flags) == large0 * 2); + if (did_split) { + assert_b_eq(did_decommit, did_commit, + "Expected decommit/commit match"); + } + if (xallocx_success_b && xallocx_success_c) { + assert_true(did_merge, "Expected merge"); + } + dallocx(p, flags); + try_dalloc = true; + try_decommit = false; + + /* Make sure non-large allocation succeeds. */ + p = mallocx(42, flags); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + dallocx(p, flags); +} + +TEST_BEGIN(test_extent_manual_hook) { + unsigned arena_ind; + size_t old_size, new_size, sz; + size_t hooks_mib[3]; + size_t hooks_miblen; + extent_hooks_t *new_hooks, *old_hooks; + + extent_hooks_prep(); + + sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); + + /* Install custom extent hooks. */ + hooks_miblen = sizeof(hooks_mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.extent_hooks", hooks_mib, + &hooks_miblen), 0, "Unexpected mallctlnametomib() failure"); + hooks_mib[1] = (size_t)arena_ind; + old_size = sizeof(extent_hooks_t *); + new_hooks = &hooks; + new_size = sizeof(extent_hooks_t *); + assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks, + &old_size, (void *)&new_hooks, new_size), 0, + "Unexpected extent_hooks error"); + assert_ptr_ne(old_hooks->alloc, extent_alloc_hook, + "Unexpected extent_hooks error"); + assert_ptr_ne(old_hooks->dalloc, extent_dalloc_hook, + "Unexpected extent_hooks error"); + assert_ptr_ne(old_hooks->commit, extent_commit_hook, + "Unexpected extent_hooks error"); + assert_ptr_ne(old_hooks->decommit, extent_decommit_hook, + "Unexpected extent_hooks error"); + assert_ptr_ne(old_hooks->purge_lazy, extent_purge_lazy_hook, + "Unexpected extent_hooks error"); + assert_ptr_ne(old_hooks->purge_forced, extent_purge_forced_hook, + "Unexpected extent_hooks error"); + assert_ptr_ne(old_hooks->split, extent_split_hook, + "Unexpected extent_hooks error"); + assert_ptr_ne(old_hooks->merge, extent_merge_hook, + "Unexpected extent_hooks error"); + + test_skip_if(check_background_thread_enabled()); + test_extent_body(arena_ind); + + /* Restore extent hooks. */ + assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, NULL, NULL, + (void *)&old_hooks, new_size), 0, "Unexpected extent_hooks error"); + assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks, + &old_size, NULL, 0), 0, "Unexpected extent_hooks error"); + assert_ptr_eq(old_hooks, default_hooks, "Unexpected extent_hooks error"); + assert_ptr_eq(old_hooks->alloc, default_hooks->alloc, + "Unexpected extent_hooks error"); + assert_ptr_eq(old_hooks->dalloc, default_hooks->dalloc, + "Unexpected extent_hooks error"); + assert_ptr_eq(old_hooks->commit, default_hooks->commit, + "Unexpected extent_hooks error"); + assert_ptr_eq(old_hooks->decommit, default_hooks->decommit, + "Unexpected extent_hooks error"); + assert_ptr_eq(old_hooks->purge_lazy, default_hooks->purge_lazy, + "Unexpected extent_hooks error"); + assert_ptr_eq(old_hooks->purge_forced, default_hooks->purge_forced, + "Unexpected extent_hooks error"); + assert_ptr_eq(old_hooks->split, default_hooks->split, + "Unexpected extent_hooks error"); + assert_ptr_eq(old_hooks->merge, default_hooks->merge, + "Unexpected extent_hooks error"); +} +TEST_END + +TEST_BEGIN(test_extent_auto_hook) { + unsigned arena_ind; + size_t new_size, sz; + extent_hooks_t *new_hooks; + + extent_hooks_prep(); + + sz = sizeof(unsigned); + new_hooks = &hooks; + new_size = sizeof(extent_hooks_t *); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, + (void *)&new_hooks, new_size), 0, "Unexpected mallctl() failure"); + + test_skip_if(check_background_thread_enabled()); + test_extent_body(arena_ind); +} +TEST_END + +int +main(void) { + return test( + test_extent_manual_hook, + test_extent_auto_hook); +} diff --git a/test/integration/chunk.sh b/test/integration/extent.sh index 0cc2187..0cc2187 100644 --- a/test/integration/chunk.sh +++ b/test/integration/extent.sh diff --git a/test/integration/mallocx.c b/test/integration/mallocx.c index 5a9058d..b0b5cda 100644 --- a/test/integration/mallocx.c +++ b/test/integration/mallocx.c @@ -1,8 +1,7 @@ #include "test/jemalloc_test.h" static unsigned -get_nsizes_impl(const char *cmd) -{ +get_nsizes_impl(const char *cmd) { unsigned ret; size_t z; @@ -10,19 +9,16 @@ get_nsizes_impl(const char *cmd) assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctl(\"%s\", ...) failure", cmd); - return (ret); + return ret; } static unsigned -get_nhuge(void) -{ - - return (get_nsizes_impl("arenas.nhchunks")); +get_nlarge(void) { + return get_nsizes_impl("arenas.nlextents"); } static size_t -get_size_impl(const char *cmd, size_t ind) -{ +get_size_impl(const char *cmd, size_t ind) { size_t ret; size_t z; size_t mib[4]; @@ -36,14 +32,12 @@ get_size_impl(const char *cmd, size_t ind) assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); - return (ret); + return ret; } static size_t -get_huge_size(size_t ind) -{ - - return (get_size_impl("arenas.hchunk.0.size", ind)); +get_large_size(size_t ind) { + return get_size_impl("arenas.lextent.0.size", ind); } /* @@ -52,21 +46,18 @@ get_huge_size(size_t ind) * potential OOM on e.g. 32-bit Windows. */ static void -purge(void) -{ - +purge(void) { assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl error"); } -TEST_BEGIN(test_overflow) -{ - size_t hugemax; +TEST_BEGIN(test_overflow) { + size_t largemax; - hugemax = get_huge_size(get_nhuge()-1); + largemax = get_large_size(get_nlarge()-1); - assert_ptr_null(mallocx(hugemax+1, 0), - "Expected OOM for mallocx(size=%#zx, 0)", hugemax+1); + assert_ptr_null(mallocx(largemax+1, 0), + "Expected OOM for mallocx(size=%#zx, 0)", largemax+1); assert_ptr_null(mallocx(ZU(PTRDIFF_MAX)+1, 0), "Expected OOM for mallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1); @@ -80,9 +71,8 @@ TEST_BEGIN(test_overflow) } TEST_END -TEST_BEGIN(test_oom) -{ - size_t hugemax; +TEST_BEGIN(test_oom) { + size_t largemax; bool oom; void *ptrs[3]; unsigned i; @@ -91,19 +81,21 @@ TEST_BEGIN(test_oom) * It should be impossible to allocate three objects that each consume * nearly half the virtual address space. */ - hugemax = get_huge_size(get_nhuge()-1); + largemax = get_large_size(get_nlarge()-1); oom = false; for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { - ptrs[i] = mallocx(hugemax, 0); - if (ptrs[i] == NULL) + ptrs[i] = mallocx(largemax, 0); + if (ptrs[i] == NULL) { oom = true; + } } assert_true(oom, "Expected OOM during series of calls to mallocx(size=%zu, 0)", - hugemax); + largemax); for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { - if (ptrs[i] != NULL) + if (ptrs[i] != NULL) { dallocx(ptrs[i], 0); + } } purge(); @@ -121,9 +113,8 @@ TEST_BEGIN(test_oom) } TEST_END -TEST_BEGIN(test_basic) -{ -#define MAXSZ (((size_t)1) << 23) +TEST_BEGIN(test_basic) { +#define MAXSZ (((size_t)1) << 23) size_t sz; for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) { @@ -159,16 +150,16 @@ TEST_BEGIN(test_basic) } TEST_END -TEST_BEGIN(test_alignment_and_size) -{ -#define MAXALIGN (((size_t)1) << 23) -#define NITER 4 +TEST_BEGIN(test_alignment_and_size) { +#define MAXALIGN (((size_t)1) << 23) +#define NITER 4 size_t nsz, rsz, sz, alignment, total; unsigned i; void *ps[NITER]; - for (i = 0; i < NITER; i++) + for (i = 0; i < NITER; i++) { ps[i] = NULL; + } for (alignment = 8; alignment <= MAXALIGN; @@ -201,8 +192,9 @@ TEST_BEGIN(test_alignment_and_size) " alignment=%zu, size=%zu", ps[i], alignment, sz); total += rsz; - if (total >= (MAXALIGN << 1)) + if (total >= (MAXALIGN << 1)) { break; + } } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { @@ -219,12 +211,10 @@ TEST_BEGIN(test_alignment_and_size) TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_overflow, test_oom, test_basic, - test_alignment_and_size)); + test_alignment_and_size); } diff --git a/test/integration/overflow.c b/test/integration/overflow.c index 84a3565..6a9785b 100644 --- a/test/integration/overflow.c +++ b/test/integration/overflow.c @@ -1,20 +1,19 @@ #include "test/jemalloc_test.h" -TEST_BEGIN(test_overflow) -{ - unsigned nhchunks; +TEST_BEGIN(test_overflow) { + unsigned nlextents; size_t mib[4]; size_t sz, miblen, max_size_class; void *p; sz = sizeof(unsigned); - assert_d_eq(mallctl("arenas.nhchunks", (void *)&nhchunks, &sz, NULL, 0), - 0, "Unexpected mallctl() error"); + assert_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL, + 0), 0, "Unexpected mallctl() error"); miblen = sizeof(mib) / sizeof(size_t); - assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0, + assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0, "Unexpected mallctlnametomib() error"); - mib[2] = nhchunks - 1; + mib[2] = nlextents - 1; sz = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz, @@ -41,9 +40,7 @@ TEST_BEGIN(test_overflow) TEST_END int -main(void) -{ - - return (test( - test_overflow)); +main(void) { + return test( + test_overflow); } diff --git a/test/integration/posix_memalign.c b/test/integration/posix_memalign.c index e22e102..2c2726d 100644 --- a/test/integration/posix_memalign.c +++ b/test/integration/posix_memalign.c @@ -1,7 +1,6 @@ #include "test/jemalloc_test.h" -#define CHUNK 0x400000 -#define MAXALIGN (((size_t)1) << 23) +#define MAXALIGN (((size_t)1) << 23) /* * On systems which can't merge extents, tests that call this function generate @@ -9,15 +8,12 @@ * potential OOM on e.g. 32-bit Windows. */ static void -purge(void) -{ - +purge(void) { assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl error"); } -TEST_BEGIN(test_alignment_errors) -{ +TEST_BEGIN(test_alignment_errors) { size_t alignment; void *p; @@ -36,8 +32,7 @@ TEST_BEGIN(test_alignment_errors) } TEST_END -TEST_BEGIN(test_oom_errors) -{ +TEST_BEGIN(test_oom_errors) { size_t alignment, size; void *p; @@ -75,16 +70,16 @@ TEST_BEGIN(test_oom_errors) } TEST_END -TEST_BEGIN(test_alignment_and_size) -{ -#define NITER 4 +TEST_BEGIN(test_alignment_and_size) { +#define NITER 4 size_t alignment, size, total; unsigned i; int err; void *ps[NITER]; - for (i = 0; i < NITER; i++) + for (i = 0; i < NITER; i++) { ps[i] = NULL; + } for (alignment = 8; alignment <= MAXALIGN; @@ -106,8 +101,9 @@ TEST_BEGIN(test_alignment_and_size) alignment, size, size, buf); } total += malloc_usable_size(ps[i]); - if (total >= (MAXALIGN << 1)) + if (total >= (MAXALIGN << 1)) { break; + } } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { @@ -123,11 +119,9 @@ TEST_BEGIN(test_alignment_and_size) TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_alignment_errors, test_oom_errors, - test_alignment_and_size)); + test_alignment_and_size); } diff --git a/test/integration/rallocx.c b/test/integration/rallocx.c index 506bf1c..7821ca5 100644 --- a/test/integration/rallocx.c +++ b/test/integration/rallocx.c @@ -1,8 +1,7 @@ #include "test/jemalloc_test.h" static unsigned -get_nsizes_impl(const char *cmd) -{ +get_nsizes_impl(const char *cmd) { unsigned ret; size_t z; @@ -10,19 +9,16 @@ get_nsizes_impl(const char *cmd) assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctl(\"%s\", ...) failure", cmd); - return (ret); + return ret; } static unsigned -get_nhuge(void) -{ - - return (get_nsizes_impl("arenas.nhchunks")); +get_nlarge(void) { + return get_nsizes_impl("arenas.nlextents"); } static size_t -get_size_impl(const char *cmd, size_t ind) -{ +get_size_impl(const char *cmd, size_t ind) { size_t ret; size_t z; size_t mib[4]; @@ -36,25 +32,22 @@ get_size_impl(const char *cmd, size_t ind) assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); - return (ret); + return ret; } static size_t -get_huge_size(size_t ind) -{ - - return (get_size_impl("arenas.hchunk.0.size", ind)); +get_large_size(size_t ind) { + return get_size_impl("arenas.lextent.0.size", ind); } -TEST_BEGIN(test_grow_and_shrink) -{ +TEST_BEGIN(test_grow_and_shrink) { void *p, *q; size_t tsz; -#define NCYCLES 3 +#define NCYCLES 3 unsigned i, j; -#define NSZS 2500 +#define NSZS 1024 size_t szs[NSZS]; -#define MAXSZ ZU(12 * 1024 * 1024) +#define MAXSZ ZU(12 * 1024 * 1024) p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); @@ -92,8 +85,7 @@ TEST_BEGIN(test_grow_and_shrink) TEST_END static bool -validate_fill(const void *p, uint8_t c, size_t offset, size_t len) -{ +validate_fill(const void *p, uint8_t c, size_t offset, size_t len) { bool ret = false; const uint8_t *buf = (const uint8_t *)p; size_t i; @@ -108,16 +100,15 @@ validate_fill(const void *p, uint8_t c, size_t offset, size_t len) } } - return (ret); + return ret; } -TEST_BEGIN(test_zero) -{ +TEST_BEGIN(test_zero) { void *p, *q; size_t psz, qsz, i, j; size_t start_sizes[] = {1, 3*1024, 63*1024, 4095*1024}; -#define FILL_BYTE 0xaaU -#define RANGE 2048 +#define FILL_BYTE 0xaaU +#define RANGE 2048 for (i = 0; i < sizeof(start_sizes)/sizeof(size_t); i++) { size_t start_size = start_sizes[i]; @@ -156,11 +147,10 @@ TEST_BEGIN(test_zero) } TEST_END -TEST_BEGIN(test_align) -{ +TEST_BEGIN(test_align) { void *p, *q; size_t align; -#define MAX_ALIGN (ZU(1) << 25) +#define MAX_ALIGN (ZU(1) << 25) align = ZU(1); p = mallocx(1, MALLOCX_ALIGN(align)); @@ -181,13 +171,12 @@ TEST_BEGIN(test_align) } TEST_END -TEST_BEGIN(test_lg_align_and_zero) -{ +TEST_BEGIN(test_lg_align_and_zero) { void *p, *q; unsigned lg_align; size_t sz; -#define MAX_LG_ALIGN 25 -#define MAX_VALIDATE (ZU(1) << 22) +#define MAX_LG_ALIGN 25 +#define MAX_VALIDATE (ZU(1) << 22) lg_align = 0; p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO); @@ -219,18 +208,17 @@ TEST_BEGIN(test_lg_align_and_zero) } TEST_END -TEST_BEGIN(test_overflow) -{ - size_t hugemax; +TEST_BEGIN(test_overflow) { + size_t largemax; void *p; - hugemax = get_huge_size(get_nhuge()-1); + largemax = get_large_size(get_nlarge()-1); p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); - assert_ptr_null(rallocx(p, hugemax+1, 0), - "Expected OOM for rallocx(p, size=%#zx, 0)", hugemax+1); + assert_ptr_null(rallocx(p, largemax+1, 0), + "Expected OOM for rallocx(p, size=%#zx, 0)", largemax+1); assert_ptr_null(rallocx(p, ZU(PTRDIFF_MAX)+1, 0), "Expected OOM for rallocx(p, size=%#zx, 0)", ZU(PTRDIFF_MAX)+1); @@ -247,13 +235,11 @@ TEST_BEGIN(test_overflow) TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_grow_and_shrink, test_zero, test_align, test_lg_align_and_zero, - test_overflow)); + test_overflow); } diff --git a/test/integration/sdallocx.c b/test/integration/sdallocx.c index f92e058..e7ea1d8 100644 --- a/test/integration/sdallocx.c +++ b/test/integration/sdallocx.c @@ -1,23 +1,22 @@ #include "test/jemalloc_test.h" -#define MAXALIGN (((size_t)1) << 22) -#define NITER 3 +#define MAXALIGN (((size_t)1) << 22) +#define NITER 3 -TEST_BEGIN(test_basic) -{ +TEST_BEGIN(test_basic) { void *ptr = mallocx(64, 0); sdallocx(ptr, 64, 0); } TEST_END -TEST_BEGIN(test_alignment_and_size) -{ +TEST_BEGIN(test_alignment_and_size) { size_t nsz, sz, alignment, total; unsigned i; void *ps[NITER]; - for (i = 0; i < NITER; i++) + for (i = 0; i < NITER; i++) { ps[i] = NULL; + } for (alignment = 8; alignment <= MAXALIGN; @@ -32,8 +31,9 @@ TEST_BEGIN(test_alignment_and_size) ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); total += nsz; - if (total >= (MAXALIGN << 1)) + if (total >= (MAXALIGN << 1)) { break; + } } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { @@ -48,10 +48,8 @@ TEST_BEGIN(test_alignment_and_size) TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_basic, - test_alignment_and_size)); + test_alignment_and_size); } diff --git a/test/integration/thread_arena.c b/test/integration/thread_arena.c index 7a35a63..1e5ec05 100644 --- a/test/integration/thread_arena.c +++ b/test/integration/thread_arena.c @@ -1,10 +1,9 @@ #include "test/jemalloc_test.h" -#define NTHREADS 10 +#define NTHREADS 10 void * -thd_start(void *arg) -{ +thd_start(void *arg) { unsigned main_arena_ind = *(unsigned *)arg; void *p; unsigned arena_ind; @@ -35,14 +34,19 @@ thd_start(void *arg) assert_u_eq(arena_ind, main_arena_ind, "Arena index should be same as for main thread"); - return (NULL); + return NULL; } -TEST_BEGIN(test_thread_arena) -{ +static void +mallctl_failure(int err) { + char buf[BUFERROR_BUF]; + + buferror(err, buf, sizeof(buf)); + test_fail("Error in mallctl(): %s", buf); +} + +TEST_BEGIN(test_thread_arena) { void *p; - unsigned arena_ind; - size_t size; int err; thd_t thds[NTHREADS]; unsigned i; @@ -50,13 +54,15 @@ TEST_BEGIN(test_thread_arena) p = malloc(1); assert_ptr_not_null(p, "Error in malloc()"); - size = sizeof(arena_ind); - if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, NULL, - 0))) { - char buf[BUFERROR_BUF]; + unsigned arena_ind, old_arena_ind; + size_t sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), + 0, "Arena creation failure"); - buferror(err, buf, sizeof(buf)); - test_fail("Error in mallctl(): %s", buf); + size_t size = sizeof(arena_ind); + if ((err = mallctl("thread.arena", (void *)&old_arena_ind, &size, + (void *)&arena_ind, sizeof(arena_ind))) != 0) { + mallctl_failure(err); } for (i = 0; i < NTHREADS; i++) { @@ -69,13 +75,12 @@ TEST_BEGIN(test_thread_arena) thd_join(thds[i], (void *)&join_ret); assert_zd_eq(join_ret, 0, "Unexpected thread join error"); } + free(p); } TEST_END int -main(void) -{ - - return (test( - test_thread_arena)); +main(void) { + return test( + test_thread_arena); } diff --git a/test/integration/thread_tcache_enabled.c b/test/integration/thread_tcache_enabled.c index 2c2825e..0c343a6 100644 --- a/test/integration/thread_tcache_enabled.c +++ b/test/integration/thread_tcache_enabled.c @@ -1,30 +1,11 @@ #include "test/jemalloc_test.h" -static const bool config_tcache = -#ifdef JEMALLOC_TCACHE - true -#else - false -#endif - ; - void * -thd_start(void *arg) -{ - int err; - size_t sz; +thd_start(void *arg) { bool e0, e1; - - sz = sizeof(bool); - if ((err = mallctl("thread.tcache.enabled", (void *)&e0, &sz, NULL, - 0))) { - if (err == ENOENT) { - assert_false(config_tcache, - "ENOENT should only be returned if tcache is " - "disabled"); - } - goto label_ENOENT; - } + size_t sz = sizeof(bool); + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, NULL, + 0), 0, "Unexpected mallctl failure"); if (e0) { e1 = false; @@ -78,21 +59,17 @@ thd_start(void *arg) assert_false(e0, "tcache should be disabled"); free(malloc(1)); - return (NULL); -label_ENOENT: + return NULL; test_skip("\"thread.tcache.enabled\" mallctl not available"); - return (NULL); + return NULL; } -TEST_BEGIN(test_main_thread) -{ - +TEST_BEGIN(test_main_thread) { thd_start(NULL); } TEST_END -TEST_BEGIN(test_subthread) -{ +TEST_BEGIN(test_subthread) { thd_t thd; thd_create(&thd, thd_start, NULL); @@ -101,14 +78,12 @@ TEST_BEGIN(test_subthread) TEST_END int -main(void) -{ - +main(void) { /* Run tests multiple times to check for bad interactions. */ - return (test( + return test( test_main_thread, test_subthread, test_main_thread, test_subthread, - test_main_thread)); + test_main_thread); } diff --git a/test/integration/xallocx.c b/test/integration/xallocx.c index 2517a81..cd0ca04 100644 --- a/test/integration/xallocx.c +++ b/test/integration/xallocx.c @@ -6,21 +6,19 @@ * xallocx() would ordinarily be able to extend. */ static unsigned -arena_ind(void) -{ +arena_ind(void) { static unsigned ind = 0; if (ind == 0) { size_t sz = sizeof(ind); - assert_d_eq(mallctl("arenas.extend", (void *)&ind, &sz, NULL, + assert_d_eq(mallctl("arenas.create", (void *)&ind, &sz, NULL, 0), 0, "Unexpected mallctl failure creating arena"); } - return (ind); + return ind; } -TEST_BEGIN(test_same_size) -{ +TEST_BEGIN(test_same_size) { void *p; size_t sz, tsz; @@ -35,8 +33,7 @@ TEST_BEGIN(test_same_size) } TEST_END -TEST_BEGIN(test_extra_no_move) -{ +TEST_BEGIN(test_extra_no_move) { void *p; size_t sz, tsz; @@ -51,8 +48,7 @@ TEST_BEGIN(test_extra_no_move) } TEST_END -TEST_BEGIN(test_no_move_fail) -{ +TEST_BEGIN(test_no_move_fail) { void *p; size_t sz, tsz; @@ -68,8 +64,7 @@ TEST_BEGIN(test_no_move_fail) TEST_END static unsigned -get_nsizes_impl(const char *cmd) -{ +get_nsizes_impl(const char *cmd) { unsigned ret; size_t z; @@ -77,33 +72,21 @@ get_nsizes_impl(const char *cmd) assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctl(\"%s\", ...) failure", cmd); - return (ret); + return ret; } static unsigned -get_nsmall(void) -{ - - return (get_nsizes_impl("arenas.nbins")); -} - -static unsigned -get_nlarge(void) -{ - - return (get_nsizes_impl("arenas.nlruns")); +get_nsmall(void) { + return get_nsizes_impl("arenas.nbins"); } static unsigned -get_nhuge(void) -{ - - return (get_nsizes_impl("arenas.nhchunks")); +get_nlarge(void) { + return get_nsizes_impl("arenas.nlextents"); } static size_t -get_size_impl(const char *cmd, size_t ind) -{ +get_size_impl(const char *cmd, size_t ind) { size_t ret; size_t z; size_t mib[4]; @@ -117,38 +100,26 @@ get_size_impl(const char *cmd, size_t ind) assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); - return (ret); + return ret; } static size_t -get_small_size(size_t ind) -{ - - return (get_size_impl("arenas.bin.0.size", ind)); +get_small_size(size_t ind) { + return get_size_impl("arenas.bin.0.size", ind); } static size_t -get_large_size(size_t ind) -{ - - return (get_size_impl("arenas.lrun.0.size", ind)); +get_large_size(size_t ind) { + return get_size_impl("arenas.lextent.0.size", ind); } -static size_t -get_huge_size(size_t ind) -{ - - return (get_size_impl("arenas.hchunk.0.size", ind)); -} - -TEST_BEGIN(test_size) -{ - size_t small0, hugemax; +TEST_BEGIN(test_size) { + size_t small0, largemax; void *p; /* Get size classes. */ small0 = get_small_size(0); - hugemax = get_huge_size(get_nhuge()-1); + largemax = get_large_size(get_nlarge()-1); p = mallocx(small0, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); @@ -158,60 +129,58 @@ TEST_BEGIN(test_size) "Unexpected xallocx() behavior"); /* Test largest supported size. */ - assert_zu_le(xallocx(p, hugemax, 0, 0), hugemax, + assert_zu_le(xallocx(p, largemax, 0, 0), largemax, "Unexpected xallocx() behavior"); /* Test size overflow. */ - assert_zu_le(xallocx(p, hugemax+1, 0, 0), hugemax, + assert_zu_le(xallocx(p, largemax+1, 0, 0), largemax, "Unexpected xallocx() behavior"); - assert_zu_le(xallocx(p, SIZE_T_MAX, 0, 0), hugemax, + assert_zu_le(xallocx(p, SIZE_T_MAX, 0, 0), largemax, "Unexpected xallocx() behavior"); dallocx(p, 0); } TEST_END -TEST_BEGIN(test_size_extra_overflow) -{ - size_t small0, hugemax; +TEST_BEGIN(test_size_extra_overflow) { + size_t small0, largemax; void *p; /* Get size classes. */ small0 = get_small_size(0); - hugemax = get_huge_size(get_nhuge()-1); + largemax = get_large_size(get_nlarge()-1); p = mallocx(small0, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); /* Test overflows that can be resolved by clamping extra. */ - assert_zu_le(xallocx(p, hugemax-1, 2, 0), hugemax, + assert_zu_le(xallocx(p, largemax-1, 2, 0), largemax, "Unexpected xallocx() behavior"); - assert_zu_le(xallocx(p, hugemax, 1, 0), hugemax, + assert_zu_le(xallocx(p, largemax, 1, 0), largemax, "Unexpected xallocx() behavior"); - /* Test overflow such that hugemax-size underflows. */ - assert_zu_le(xallocx(p, hugemax+1, 2, 0), hugemax, + /* Test overflow such that largemax-size underflows. */ + assert_zu_le(xallocx(p, largemax+1, 2, 0), largemax, "Unexpected xallocx() behavior"); - assert_zu_le(xallocx(p, hugemax+2, 3, 0), hugemax, + assert_zu_le(xallocx(p, largemax+2, 3, 0), largemax, "Unexpected xallocx() behavior"); - assert_zu_le(xallocx(p, SIZE_T_MAX-2, 2, 0), hugemax, + assert_zu_le(xallocx(p, SIZE_T_MAX-2, 2, 0), largemax, "Unexpected xallocx() behavior"); - assert_zu_le(xallocx(p, SIZE_T_MAX-1, 1, 0), hugemax, + assert_zu_le(xallocx(p, SIZE_T_MAX-1, 1, 0), largemax, "Unexpected xallocx() behavior"); dallocx(p, 0); } TEST_END -TEST_BEGIN(test_extra_small) -{ - size_t small0, small1, hugemax; +TEST_BEGIN(test_extra_small) { + size_t small0, small1, largemax; void *p; /* Get size classes. */ small0 = get_small_size(0); small1 = get_small_size(1); - hugemax = get_huge_size(get_nhuge()-1); + largemax = get_large_size(get_nlarge()-1); p = mallocx(small0, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); @@ -226,7 +195,7 @@ TEST_BEGIN(test_extra_small) "Unexpected xallocx() behavior"); /* Test size+extra overflow. */ - assert_zu_eq(xallocx(p, small0, hugemax - small0 + 1, 0), small0, + assert_zu_eq(xallocx(p, small0, largemax - small0 + 1, 0), small0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, small0, SIZE_T_MAX - small0, 0), small0, "Unexpected xallocx() behavior"); @@ -235,133 +204,69 @@ TEST_BEGIN(test_extra_small) } TEST_END -TEST_BEGIN(test_extra_large) -{ +TEST_BEGIN(test_extra_large) { int flags = MALLOCX_ARENA(arena_ind()); - size_t smallmax, large0, large1, large2, huge0, hugemax; + size_t smallmax, large1, large2, large3, largemax; void *p; /* Get size classes. */ smallmax = get_small_size(get_nsmall()-1); - large0 = get_large_size(0); large1 = get_large_size(1); large2 = get_large_size(2); - huge0 = get_huge_size(0); - hugemax = get_huge_size(get_nhuge()-1); - - p = mallocx(large2, flags); - assert_ptr_not_null(p, "Unexpected mallocx() error"); - - assert_zu_eq(xallocx(p, large2, 0, flags), large2, - "Unexpected xallocx() behavior"); - /* Test size decrease with zero extra. */ - assert_zu_eq(xallocx(p, large0, 0, flags), large0, - "Unexpected xallocx() behavior"); - assert_zu_eq(xallocx(p, smallmax, 0, flags), large0, - "Unexpected xallocx() behavior"); - - assert_zu_eq(xallocx(p, large2, 0, flags), large2, - "Unexpected xallocx() behavior"); - /* Test size decrease with non-zero extra. */ - assert_zu_eq(xallocx(p, large0, large2 - large0, flags), large2, - "Unexpected xallocx() behavior"); - assert_zu_eq(xallocx(p, large1, large2 - large1, flags), large2, - "Unexpected xallocx() behavior"); - assert_zu_eq(xallocx(p, large0, large1 - large0, flags), large1, - "Unexpected xallocx() behavior"); - assert_zu_eq(xallocx(p, smallmax, large0 - smallmax, flags), large0, - "Unexpected xallocx() behavior"); - - assert_zu_eq(xallocx(p, large0, 0, flags), large0, - "Unexpected xallocx() behavior"); - /* Test size increase with zero extra. */ - assert_zu_eq(xallocx(p, large2, 0, flags), large2, - "Unexpected xallocx() behavior"); - assert_zu_eq(xallocx(p, huge0, 0, flags), large2, - "Unexpected xallocx() behavior"); - - assert_zu_eq(xallocx(p, large0, 0, flags), large0, - "Unexpected xallocx() behavior"); - /* Test size increase with non-zero extra. */ - assert_zu_lt(xallocx(p, large0, huge0 - large0, flags), huge0, - "Unexpected xallocx() behavior"); - - assert_zu_eq(xallocx(p, large0, 0, flags), large0, - "Unexpected xallocx() behavior"); - /* Test size increase with non-zero extra. */ - assert_zu_eq(xallocx(p, large0, large2 - large0, flags), large2, - "Unexpected xallocx() behavior"); - - assert_zu_eq(xallocx(p, large2, 0, flags), large2, - "Unexpected xallocx() behavior"); - /* Test size+extra overflow. */ - assert_zu_lt(xallocx(p, large2, hugemax - large2 + 1, flags), huge0, - "Unexpected xallocx() behavior"); - - dallocx(p, flags); -} -TEST_END - -TEST_BEGIN(test_extra_huge) -{ - int flags = MALLOCX_ARENA(arena_ind()); - size_t largemax, huge1, huge2, huge3, hugemax; - void *p; - - /* Get size classes. */ + large3 = get_large_size(3); largemax = get_large_size(get_nlarge()-1); - huge1 = get_huge_size(1); - huge2 = get_huge_size(2); - huge3 = get_huge_size(3); - hugemax = get_huge_size(get_nhuge()-1); - p = mallocx(huge3, flags); + p = mallocx(large3, flags); assert_ptr_not_null(p, "Unexpected mallocx() error"); - assert_zu_eq(xallocx(p, huge3, 0, flags), huge3, + assert_zu_eq(xallocx(p, large3, 0, flags), large3, "Unexpected xallocx() behavior"); /* Test size decrease with zero extra. */ - assert_zu_ge(xallocx(p, huge1, 0, flags), huge1, + assert_zu_ge(xallocx(p, large1, 0, flags), large1, "Unexpected xallocx() behavior"); - assert_zu_ge(xallocx(p, largemax, 0, flags), huge1, + assert_zu_ge(xallocx(p, smallmax, 0, flags), large1, "Unexpected xallocx() behavior"); - assert_zu_eq(xallocx(p, huge3, 0, flags), huge3, - "Unexpected xallocx() behavior"); + if (xallocx(p, large3, 0, flags) != large3) { + p = rallocx(p, large3, flags); + assert_ptr_not_null(p, "Unexpected rallocx() failure"); + } /* Test size decrease with non-zero extra. */ - assert_zu_eq(xallocx(p, huge1, huge3 - huge1, flags), huge3, + assert_zu_eq(xallocx(p, large1, large3 - large1, flags), large3, "Unexpected xallocx() behavior"); - assert_zu_eq(xallocx(p, huge2, huge3 - huge2, flags), huge3, + assert_zu_eq(xallocx(p, large2, large3 - large2, flags), large3, "Unexpected xallocx() behavior"); - assert_zu_eq(xallocx(p, huge1, huge2 - huge1, flags), huge2, + assert_zu_ge(xallocx(p, large1, large2 - large1, flags), large2, "Unexpected xallocx() behavior"); - assert_zu_ge(xallocx(p, largemax, huge1 - largemax, flags), huge1, + assert_zu_ge(xallocx(p, smallmax, large1 - smallmax, flags), large1, "Unexpected xallocx() behavior"); - assert_zu_ge(xallocx(p, huge1, 0, flags), huge1, + assert_zu_ge(xallocx(p, large1, 0, flags), large1, "Unexpected xallocx() behavior"); /* Test size increase with zero extra. */ - assert_zu_le(xallocx(p, huge3, 0, flags), huge3, + assert_zu_le(xallocx(p, large3, 0, flags), large3, "Unexpected xallocx() behavior"); - assert_zu_le(xallocx(p, hugemax+1, 0, flags), huge3, + assert_zu_le(xallocx(p, largemax+1, 0, flags), large3, "Unexpected xallocx() behavior"); - assert_zu_ge(xallocx(p, huge1, 0, flags), huge1, + assert_zu_ge(xallocx(p, large1, 0, flags), large1, "Unexpected xallocx() behavior"); /* Test size increase with non-zero extra. */ - assert_zu_le(xallocx(p, huge1, SIZE_T_MAX - huge1, flags), hugemax, + assert_zu_le(xallocx(p, large1, SIZE_T_MAX - large1, flags), largemax, "Unexpected xallocx() behavior"); - assert_zu_ge(xallocx(p, huge1, 0, flags), huge1, + assert_zu_ge(xallocx(p, large1, 0, flags), large1, "Unexpected xallocx() behavior"); /* Test size increase with non-zero extra. */ - assert_zu_le(xallocx(p, huge1, huge3 - huge1, flags), huge3, + assert_zu_le(xallocx(p, large1, large3 - large1, flags), large3, "Unexpected xallocx() behavior"); - assert_zu_eq(xallocx(p, huge3, 0, flags), huge3, - "Unexpected xallocx() behavior"); + if (xallocx(p, large3, 0, flags) != large3) { + p = rallocx(p, large3, flags); + assert_ptr_not_null(p, "Unexpected rallocx() failure"); + } /* Test size+extra overflow. */ - assert_zu_le(xallocx(p, huge3, hugemax - huge3 + 1, flags), hugemax, + assert_zu_le(xallocx(p, large3, largemax - large3 + 1, flags), largemax, "Unexpected xallocx() behavior"); dallocx(p, flags); @@ -369,8 +274,7 @@ TEST_BEGIN(test_extra_huge) TEST_END static void -print_filled_extents(const void *p, uint8_t c, size_t len) -{ +print_filled_extents(const void *p, uint8_t c, size_t len) { const uint8_t *pc = (const uint8_t *)p; size_t i, range0; uint8_t c0; @@ -389,30 +293,30 @@ print_filled_extents(const void *p, uint8_t c, size_t len) } static bool -validate_fill(const void *p, uint8_t c, size_t offset, size_t len) -{ +validate_fill(const void *p, uint8_t c, size_t offset, size_t len) { const uint8_t *pc = (const uint8_t *)p; bool err; size_t i; for (i = offset, err = false; i < offset+len; i++) { - if (pc[i] != c) + if (pc[i] != c) { err = true; + } } - if (err) + if (err) { print_filled_extents(p, c, offset + len); + } - return (err); + return err; } static void -test_zero(size_t szmin, size_t szmax) -{ +test_zero(size_t szmin, size_t szmax) { int flags = MALLOCX_ARENA(arena_ind()) | MALLOCX_ZERO; size_t sz, nsz; void *p; -#define FILL_BYTE 0x7aU +#define FILL_BYTE 0x7aU sz = szmax; p = mallocx(sz, flags); @@ -430,15 +334,19 @@ test_zero(size_t szmin, size_t szmax) /* Shrink in place so that we can expect growing in place to succeed. */ sz = szmin; - assert_zu_eq(xallocx(p, sz, 0, flags), sz, - "Unexpected xallocx() error"); + if (xallocx(p, sz, 0, flags) != sz) { + p = rallocx(p, sz, flags); + assert_ptr_not_null(p, "Unexpected rallocx() failure"); + } assert_false(validate_fill(p, FILL_BYTE, 0, sz), "Memory not filled: sz=%zu", sz); for (sz = szmin; sz < szmax; sz = nsz) { nsz = nallocx(sz+1, flags); - assert_zu_eq(xallocx(p, sz+1, 0, flags), nsz, - "Unexpected xallocx() failure"); + if (xallocx(p, sz+1, 0, flags) != nsz) { + p = rallocx(p, sz+1, flags); + assert_ptr_not_null(p, "Unexpected rallocx() failure"); + } assert_false(validate_fill(p, FILL_BYTE, 0, sz), "Memory not filled: sz=%zu", sz); assert_false(validate_fill(p, 0x00, sz, nsz-sz), @@ -451,35 +359,20 @@ test_zero(size_t szmin, size_t szmax) dallocx(p, flags); } -TEST_BEGIN(test_zero_large) -{ - size_t large0, largemax; +TEST_BEGIN(test_zero_large) { + size_t large0, large1; /* Get size classes. */ large0 = get_large_size(0); - largemax = get_large_size(get_nlarge()-1); - - test_zero(large0, largemax); -} -TEST_END - -TEST_BEGIN(test_zero_huge) -{ - size_t huge0, huge1; - - /* Get size classes. */ - huge0 = get_huge_size(0); - huge1 = get_huge_size(1); + large1 = get_large_size(1); - test_zero(huge1, huge0 * 2); + test_zero(large1, large0 * 2); } TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_same_size, test_extra_no_move, test_no_move_fail, @@ -487,7 +380,5 @@ main(void) test_size_extra_overflow, test_extra_small, test_extra_large, - test_extra_huge, - test_zero_large, - test_zero_huge)); + test_zero_large); } diff --git a/test/src/SFMT.c b/test/src/SFMT.c index 80cabe0..c05e218 100644 --- a/test/src/SFMT.c +++ b/test/src/SFMT.c @@ -33,7 +33,7 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -/** +/** * @file SFMT.c * @brief SIMD oriented Fast Mersenne Twister(SFMT) * @@ -45,7 +45,7 @@ * * The new BSD License is applied to this software, see LICENSE.txt */ -#define SFMT_C_ +#define SFMT_C_ #include "test/jemalloc_test.h" #include "test/SFMT-params.h" @@ -108,7 +108,7 @@ struct sfmt_s { /*-------------------------------------- FILE GLOBAL VARIABLES - internal state, index counter and flag + internal state, index counter and flag --------------------------------------*/ /** a parity check vector which certificate the period of 2^{MEXP} */ @@ -117,18 +117,18 @@ static uint32_t parity[4] = {PARITY1, PARITY2, PARITY3, PARITY4}; /*---------------- STATIC FUNCTIONS ----------------*/ -JEMALLOC_INLINE_C int idxof(int i); +static inline int idxof(int i); #if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) -JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift); -JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift); +static inline void rshift128(w128_t *out, w128_t const *in, int shift); +static inline void lshift128(w128_t *out, w128_t const *in, int shift); #endif -JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx); -JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size); -JEMALLOC_INLINE_C uint32_t func1(uint32_t x); -JEMALLOC_INLINE_C uint32_t func2(uint32_t x); +static inline void gen_rand_all(sfmt_t *ctx); +static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size); +static inline uint32_t func1(uint32_t x); +static inline uint32_t func2(uint32_t x); static void period_certification(sfmt_t *ctx); #if defined(BIG_ENDIAN64) && !defined(ONLY64) -JEMALLOC_INLINE_C void swap(w128_t *array, int size); +static inline void swap(w128_t *array, int size); #endif #if defined(HAVE_ALTIVEC) @@ -138,15 +138,15 @@ JEMALLOC_INLINE_C void swap(w128_t *array, int size); #endif /** - * This function simulate a 64-bit index of LITTLE ENDIAN + * This function simulate a 64-bit index of LITTLE ENDIAN * in BIG ENDIAN machine. */ #ifdef ONLY64 -JEMALLOC_INLINE_C int idxof(int i) { +static inline int idxof(int i) { return i ^ 1; } #else -JEMALLOC_INLINE_C int idxof(int i) { +static inline int idxof(int i) { return i; } #endif @@ -160,7 +160,7 @@ JEMALLOC_INLINE_C int idxof(int i) { */ #if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) #ifdef ONLY64 -JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) { +static inline void rshift128(w128_t *out, w128_t const *in, int shift) { uint64_t th, tl, oh, ol; th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]); @@ -175,7 +175,7 @@ JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) { out->u[3] = (uint32_t)oh; } #else -JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) { +static inline void rshift128(w128_t *out, w128_t const *in, int shift) { uint64_t th, tl, oh, ol; th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]); @@ -199,7 +199,7 @@ JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) { * @param shift the shift value */ #ifdef ONLY64 -JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) { +static inline void lshift128(w128_t *out, w128_t const *in, int shift) { uint64_t th, tl, oh, ol; th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]); @@ -214,7 +214,7 @@ JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) { out->u[3] = (uint32_t)oh; } #else -JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) { +static inline void lshift128(w128_t *out, w128_t const *in, int shift) { uint64_t th, tl, oh, ol; th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]); @@ -241,37 +241,37 @@ JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) { */ #if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) #ifdef ONLY64 -JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, +static inline void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, w128_t *d) { w128_t x; w128_t y; lshift128(&x, a, SL2); rshift128(&y, c, SR2); - r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK2) ^ y.u[0] + r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK2) ^ y.u[0] ^ (d->u[0] << SL1); - r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK1) ^ y.u[1] + r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK1) ^ y.u[1] ^ (d->u[1] << SL1); - r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK4) ^ y.u[2] + r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK4) ^ y.u[2] ^ (d->u[2] << SL1); - r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK3) ^ y.u[3] + r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK3) ^ y.u[3] ^ (d->u[3] << SL1); } #else -JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, +static inline void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, w128_t *d) { w128_t x; w128_t y; lshift128(&x, a, SL2); rshift128(&y, c, SR2); - r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK1) ^ y.u[0] + r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK1) ^ y.u[0] ^ (d->u[0] << SL1); - r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK2) ^ y.u[1] + r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK2) ^ y.u[1] ^ (d->u[1] << SL1); - r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK3) ^ y.u[2] + r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK3) ^ y.u[2] ^ (d->u[2] << SL1); - r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK4) ^ y.u[3] + r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK4) ^ y.u[3] ^ (d->u[3] << SL1); } #endif @@ -282,7 +282,7 @@ JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, * This function fills the internal state array with pseudorandom * integers. */ -JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx) { +static inline void gen_rand_all(sfmt_t *ctx) { int i; w128_t *r1, *r2; @@ -306,10 +306,10 @@ JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx) { * This function fills the user-specified array with pseudorandom * integers. * - * @param array an 128-bit array to be filled by pseudorandom numbers. + * @param array an 128-bit array to be filled by pseudorandom numbers. * @param size number of 128-bit pseudorandom numbers to be generated. */ -JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { +static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { int i, j; w128_t *r1, *r2; @@ -343,7 +343,7 @@ JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { #endif #if defined(BIG_ENDIAN64) && !defined(ONLY64) && !defined(HAVE_ALTIVEC) -JEMALLOC_INLINE_C void swap(w128_t *array, int size) { +static inline void swap(w128_t *array, int size) { int i; uint32_t x, y; @@ -476,7 +476,7 @@ uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit) { * This function generates and returns 64-bit pseudorandom number. * init_gen_rand or init_by_array must be called before this function. * The function gen_rand64 should not be called after gen_rand32, - * unless an initialization is again executed. + * unless an initialization is again executed. * @return 64-bit pseudorandom number */ uint64_t gen_rand64(sfmt_t *ctx) { @@ -618,7 +618,7 @@ sfmt_t *init_gen_rand(uint32_t seed) { psfmt32[idxof(0)] = seed; for (i = 1; i < N32; i++) { - psfmt32[idxof(i)] = 1812433253UL * (psfmt32[idxof(i - 1)] + psfmt32[idxof(i)] = 1812433253UL * (psfmt32[idxof(i - 1)] ^ (psfmt32[idxof(i - 1)] >> 30)) + i; } @@ -668,7 +668,7 @@ sfmt_t *init_by_array(uint32_t *init_key, int key_length) { } else { count = N32; } - r = func1(psfmt32[idxof(0)] ^ psfmt32[idxof(mid)] + r = func1(psfmt32[idxof(0)] ^ psfmt32[idxof(mid)] ^ psfmt32[idxof(N32 - 1)]); psfmt32[idxof(mid)] += r; r += key_length; @@ -677,7 +677,7 @@ sfmt_t *init_by_array(uint32_t *init_key, int key_length) { count--; for (i = 1, j = 0; (j < count) && (j < key_length); j++) { - r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] + r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] ^ psfmt32[idxof((i + N32 - 1) % N32)]); psfmt32[idxof((i + mid) % N32)] += r; r += init_key[j] + i; @@ -686,7 +686,7 @@ sfmt_t *init_by_array(uint32_t *init_key, int key_length) { i = (i + 1) % N32; } for (; j < count; j++) { - r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] + r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] ^ psfmt32[idxof((i + N32 - 1) % N32)]); psfmt32[idxof((i + mid) % N32)] += r; r += i; @@ -695,7 +695,7 @@ sfmt_t *init_by_array(uint32_t *init_key, int key_length) { i = (i + 1) % N32; } for (j = 0; j < N32; j++) { - r = func2(psfmt32[idxof(i)] + psfmt32[idxof((i + mid) % N32)] + r = func2(psfmt32[idxof(i)] + psfmt32[idxof((i + mid) % N32)] + psfmt32[idxof((i + N32 - 1) % N32)]); psfmt32[idxof((i + mid) % N32)] ^= r; r -= i; diff --git a/test/src/btalloc.c b/test/src/btalloc.c index 9a253d9..d570952 100644 --- a/test/src/btalloc.c +++ b/test/src/btalloc.c @@ -1,8 +1,6 @@ #include "test/jemalloc_test.h" void * -btalloc(size_t size, unsigned bits) -{ - - return (btalloc_0(size, bits)); +btalloc(size_t size, unsigned bits) { + return btalloc_0(size, bits); } diff --git a/test/src/math.c b/test/src/math.c index 887a363..1758c67 100644 --- a/test/src/math.c +++ b/test/src/math.c @@ -1,2 +1,2 @@ -#define MATH_C_ +#define MATH_C_ #include "test/jemalloc_test.h" diff --git a/test/src/mq.c b/test/src/mq.c index 40b31c1..9b5f672 100644 --- a/test/src/mq.c +++ b/test/src/mq.c @@ -5,9 +5,7 @@ * time is guaranteed. */ void -mq_nanosleep(unsigned ns) -{ - +mq_nanosleep(unsigned ns) { assert(ns <= 1000*1000*1000); #ifdef _WIN32 diff --git a/test/src/mtx.c b/test/src/mtx.c index 8a5dfdd..a393c01 100644 --- a/test/src/mtx.c +++ b/test/src/mtx.c @@ -1,16 +1,16 @@ #include "test/jemalloc_test.h" #ifndef _CRT_SPINCOUNT -#define _CRT_SPINCOUNT 4000 +#define _CRT_SPINCOUNT 4000 #endif bool -mtx_init(mtx_t *mtx) -{ - +mtx_init(mtx_t *mtx) { #ifdef _WIN32 - if (!InitializeCriticalSectionAndSpinCount(&mtx->lock, _CRT_SPINCOUNT)) - return (true); + if (!InitializeCriticalSectionAndSpinCount(&mtx->lock, + _CRT_SPINCOUNT)) { + return true; + } #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) mtx->lock = OS_UNFAIR_LOCK_INIT; #elif (defined(JEMALLOC_OSSPIN)) @@ -18,22 +18,21 @@ mtx_init(mtx_t *mtx) #else pthread_mutexattr_t attr; - if (pthread_mutexattr_init(&attr) != 0) - return (true); + if (pthread_mutexattr_init(&attr) != 0) { + return true; + } pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT); if (pthread_mutex_init(&mtx->lock, &attr) != 0) { pthread_mutexattr_destroy(&attr); - return (true); + return true; } pthread_mutexattr_destroy(&attr); #endif - return (false); + return false; } void -mtx_fini(mtx_t *mtx) -{ - +mtx_fini(mtx_t *mtx) { #ifdef _WIN32 #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) #elif (defined(JEMALLOC_OSSPIN)) @@ -43,9 +42,7 @@ mtx_fini(mtx_t *mtx) } void -mtx_lock(mtx_t *mtx) -{ - +mtx_lock(mtx_t *mtx) { #ifdef _WIN32 EnterCriticalSection(&mtx->lock); #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) @@ -58,9 +55,7 @@ mtx_lock(mtx_t *mtx) } void -mtx_unlock(mtx_t *mtx) -{ - +mtx_unlock(mtx_t *mtx) { #ifdef _WIN32 LeaveCriticalSection(&mtx->lock); #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) diff --git a/test/src/test.c b/test/src/test.c index d70cc75..01a4d73 100644 --- a/test/src/test.c +++ b/test/src/test.c @@ -1,14 +1,70 @@ #include "test/jemalloc_test.h" +/* Test status state. */ + static unsigned test_count = 0; static test_status_t test_counts[test_status_count] = {0, 0, 0}; static test_status_t test_status = test_status_pass; static const char * test_name = ""; +/* Reentrancy testing helpers. */ + +#define NUM_REENTRANT_ALLOCS 20 +typedef enum { + non_reentrant = 0, + libc_reentrant = 1, + arena_new_reentrant = 2 +} reentrancy_t; +static reentrancy_t reentrancy; + +static bool libc_hook_ran = false; +static bool arena_new_hook_ran = false; + +static const char * +reentrancy_t_str(reentrancy_t r) { + switch (r) { + case non_reentrant: + return "non-reentrant"; + case libc_reentrant: + return "libc-reentrant"; + case arena_new_reentrant: + return "arena_new-reentrant"; + default: + unreachable(); + } +} + +static void +do_hook(bool *hook_ran, void (**hook)()) { + *hook_ran = true; + *hook = NULL; + + size_t alloc_size = 1; + for (int i = 0; i < NUM_REENTRANT_ALLOCS; i++) { + free(malloc(alloc_size)); + alloc_size *= 2; + } +} + +static void +libc_reentrancy_hook() { + do_hook(&libc_hook_ran, &hooks_libc_hook); +} + +static void +arena_new_reentrancy_hook() { + do_hook(&arena_new_hook_ran, &hooks_arena_new_hook); +} + +/* Actual test infrastructure. */ +bool +test_is_reentrant() { + return reentrancy != non_reentrant; +} + JEMALLOC_FORMAT_PRINTF(1, 2) void -test_skip(const char *format, ...) -{ +test_skip(const char *format, ...) { va_list ap; va_start(ap, format); @@ -20,8 +76,7 @@ test_skip(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2) void -test_fail(const char *format, ...) -{ +test_fail(const char *format, ...) { va_list ap; va_start(ap, format); @@ -32,9 +87,7 @@ test_fail(const char *format, ...) } static const char * -test_status_string(test_status_t test_status) -{ - +test_status_string(test_status_t test_status) { switch (test_status) { case test_status_pass: return "pass"; case test_status_skip: return "skip"; @@ -44,25 +97,21 @@ test_status_string(test_status_t test_status) } void -p_test_init(const char *name) -{ - +p_test_init(const char *name) { test_count++; test_status = test_status_pass; test_name = name; } void -p_test_fini(void) -{ - +p_test_fini(void) { test_counts[test_status]++; - malloc_printf("%s: %s\n", test_name, test_status_string(test_status)); + malloc_printf("%s (%s): %s\n", test_name, reentrancy_t_str(reentrancy), + test_status_string(test_status)); } static test_status_t -p_test_impl(bool do_malloc_init, test_t *t, va_list ap) -{ +p_test_impl(bool do_malloc_init, bool do_reentrant, test_t *t, va_list ap) { test_status_t ret; if (do_malloc_init) { @@ -74,15 +123,37 @@ p_test_impl(bool do_malloc_init, test_t *t, va_list ap) */ if (nallocx(1, 0) == 0) { malloc_printf("Initialization error"); - return (test_status_fail); + return test_status_fail; } } ret = test_status_pass; for (; t != NULL; t = va_arg(ap, test_t *)) { + /* Non-reentrant run. */ + reentrancy = non_reentrant; + hooks_arena_new_hook = hooks_libc_hook = NULL; t(); - if (test_status > ret) + if (test_status > ret) { ret = test_status; + } + /* Reentrant run. */ + if (do_reentrant) { + reentrancy = libc_reentrant; + hooks_arena_new_hook = NULL; + hooks_libc_hook = &libc_reentrancy_hook; + t(); + if (test_status > ret) { + ret = test_status; + } + + reentrancy = arena_new_reentrant; + hooks_libc_hook = NULL; + hooks_arena_new_hook = &arena_new_reentrancy_hook; + t(); + if (test_status > ret) { + ret = test_status; + } + } } malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n", @@ -93,41 +164,54 @@ p_test_impl(bool do_malloc_init, test_t *t, va_list ap) test_status_string(test_status_fail), test_counts[test_status_fail], test_count); - return (ret); + return ret; } test_status_t -p_test(test_t *t, ...) -{ +p_test(test_t *t, ...) { test_status_t ret; va_list ap; ret = test_status_pass; va_start(ap, t); - ret = p_test_impl(true, t, ap); + ret = p_test_impl(true, true, t, ap); va_end(ap); - return (ret); + return ret; } test_status_t -p_test_no_malloc_init(test_t *t, ...) -{ +p_test_no_reentrancy(test_t *t, ...) { test_status_t ret; va_list ap; ret = test_status_pass; va_start(ap, t); - ret = p_test_impl(false, t, ap); + ret = p_test_impl(true, false, t, ap); va_end(ap); - return (ret); + return ret; } -void -p_test_fail(const char *prefix, const char *message) -{ +test_status_t +p_test_no_malloc_init(test_t *t, ...) { + test_status_t ret; + va_list ap; + + ret = test_status_pass; + va_start(ap, t); + /* + * We also omit reentrancy from bootstrapping tests, since we don't + * (yet) care about general reentrancy during bootstrapping. + */ + ret = p_test_impl(false, false, t, ap); + va_end(ap); + + return ret; +} +void +p_test_fail(const char *prefix, const char *message) { malloc_cprintf(NULL, NULL, "%s%s\n", prefix, message); test_status = test_status_fail; } diff --git a/test/src/thd.c b/test/src/thd.c index c9d0065..9a15eab 100644 --- a/test/src/thd.c +++ b/test/src/thd.c @@ -2,18 +2,16 @@ #ifdef _WIN32 void -thd_create(thd_t *thd, void *(*proc)(void *), void *arg) -{ +thd_create(thd_t *thd, void *(*proc)(void *), void *arg) { LPTHREAD_START_ROUTINE routine = (LPTHREAD_START_ROUTINE)proc; *thd = CreateThread(NULL, 0, routine, arg, 0, NULL); - if (*thd == NULL) + if (*thd == NULL) { test_fail("Error in CreateThread()\n"); + } } void -thd_join(thd_t thd, void **ret) -{ - +thd_join(thd_t thd, void **ret) { if (WaitForSingleObject(thd, INFINITE) == WAIT_OBJECT_0 && ret) { DWORD exit_code; GetExitCodeThread(thd, (LPDWORD) &exit_code); @@ -23,17 +21,14 @@ thd_join(thd_t thd, void **ret) #else void -thd_create(thd_t *thd, void *(*proc)(void *), void *arg) -{ - - if (pthread_create(thd, NULL, proc, arg) != 0) +thd_create(thd_t *thd, void *(*proc)(void *), void *arg) { + if (pthread_create(thd, NULL, proc, arg) != 0) { test_fail("Error in pthread_create()\n"); + } } void -thd_join(thd_t thd, void **ret) -{ - +thd_join(thd_t thd, void **ret) { pthread_join(thd, ret); } #endif diff --git a/test/src/timer.c b/test/src/timer.c index 3c7e63a..c451c63 100644 --- a/test/src/timer.c +++ b/test/src/timer.c @@ -1,34 +1,28 @@ #include "test/jemalloc_test.h" void -timer_start(timedelta_t *timer) -{ - +timer_start(timedelta_t *timer) { nstime_init(&timer->t0, 0); nstime_update(&timer->t0); } void -timer_stop(timedelta_t *timer) -{ - +timer_stop(timedelta_t *timer) { nstime_copy(&timer->t1, &timer->t0); nstime_update(&timer->t1); } uint64_t -timer_usec(const timedelta_t *timer) -{ +timer_usec(const timedelta_t *timer) { nstime_t delta; nstime_copy(&delta, &timer->t1); nstime_subtract(&delta, &timer->t0); - return (nstime_ns(&delta) / 1000); + return nstime_ns(&delta) / 1000; } void -timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen) -{ +timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen) { uint64_t t0 = timer_usec(a); uint64_t t1 = timer_usec(b); uint64_t mult; @@ -38,11 +32,13 @@ timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen) /* Whole. */ n = malloc_snprintf(&buf[i], buflen-i, "%"FMTu64, t0 / t1); i += n; - if (i >= buflen) + if (i >= buflen) { return; + } mult = 1; - for (j = 0; j < n; j++) + for (j = 0; j < n; j++) { mult *= 10; + } /* Decimal. */ n = malloc_snprintf(&buf[i], buflen-i, "."); diff --git a/test/stress/microbench.c b/test/stress/microbench.c index 7dc45f8..988b793 100644 --- a/test/stress/microbench.c +++ b/test/stress/microbench.c @@ -1,23 +1,23 @@ #include "test/jemalloc_test.h" -JEMALLOC_INLINE_C void +static inline void time_func(timedelta_t *timer, uint64_t nwarmup, uint64_t niter, - void (*func)(void)) -{ + void (*func)(void)) { uint64_t i; - for (i = 0; i < nwarmup; i++) + for (i = 0; i < nwarmup; i++) { func(); + } timer_start(timer); - for (i = 0; i < niter; i++) + for (i = 0; i < niter; i++) { func(); + } timer_stop(timer); } void compare_funcs(uint64_t nwarmup, uint64_t niter, const char *name_a, - void (*func_a), const char *name_b, void (*func_b)) -{ + void (*func_a), const char *name_b, void (*func_b)) { timedelta_t timer_a, timer_b; char ratio_buf[6]; void *p; @@ -41,8 +41,7 @@ compare_funcs(uint64_t nwarmup, uint64_t niter, const char *name_a, } static void -malloc_free(void) -{ +malloc_free(void) { /* The compiler can optimize away free(malloc(1))! */ void *p = malloc(1); if (p == NULL) { @@ -53,8 +52,7 @@ malloc_free(void) } static void -mallocx_free(void) -{ +mallocx_free(void) { void *p = mallocx(1, 0); if (p == NULL) { test_fail("Unexpected mallocx() failure"); @@ -63,17 +61,14 @@ mallocx_free(void) free(p); } -TEST_BEGIN(test_malloc_vs_mallocx) -{ - +TEST_BEGIN(test_malloc_vs_mallocx) { compare_funcs(10*1000*1000, 100*1000*1000, "malloc", malloc_free, "mallocx", mallocx_free); } TEST_END static void -malloc_dallocx(void) -{ +malloc_dallocx(void) { void *p = malloc(1); if (p == NULL) { test_fail("Unexpected malloc() failure"); @@ -83,8 +78,7 @@ malloc_dallocx(void) } static void -malloc_sdallocx(void) -{ +malloc_sdallocx(void) { void *p = malloc(1); if (p == NULL) { test_fail("Unexpected malloc() failure"); @@ -93,25 +87,20 @@ malloc_sdallocx(void) sdallocx(p, 1, 0); } -TEST_BEGIN(test_free_vs_dallocx) -{ - +TEST_BEGIN(test_free_vs_dallocx) { compare_funcs(10*1000*1000, 100*1000*1000, "free", malloc_free, "dallocx", malloc_dallocx); } TEST_END -TEST_BEGIN(test_dallocx_vs_sdallocx) -{ - +TEST_BEGIN(test_dallocx_vs_sdallocx) { compare_funcs(10*1000*1000, 100*1000*1000, "dallocx", malloc_dallocx, "sdallocx", malloc_sdallocx); } TEST_END static void -malloc_mus_free(void) -{ +malloc_mus_free(void) { void *p; p = malloc(1); @@ -124,8 +113,7 @@ malloc_mus_free(void) } static void -malloc_sallocx_free(void) -{ +malloc_sallocx_free(void) { void *p; p = malloc(1); @@ -133,22 +121,20 @@ malloc_sallocx_free(void) test_fail("Unexpected malloc() failure"); return; } - if (sallocx(p, 0) < 1) + if (sallocx(p, 0) < 1) { test_fail("Unexpected sallocx() failure"); + } free(p); } -TEST_BEGIN(test_mus_vs_sallocx) -{ - +TEST_BEGIN(test_mus_vs_sallocx) { compare_funcs(10*1000*1000, 100*1000*1000, "malloc_usable_size", malloc_mus_free, "sallocx", malloc_sallocx_free); } TEST_END static void -malloc_nallocx_free(void) -{ +malloc_nallocx_free(void) { void *p; p = malloc(1); @@ -156,27 +142,24 @@ malloc_nallocx_free(void) test_fail("Unexpected malloc() failure"); return; } - if (nallocx(1, 0) < 1) + if (nallocx(1, 0) < 1) { test_fail("Unexpected nallocx() failure"); + } free(p); } -TEST_BEGIN(test_sallocx_vs_nallocx) -{ - +TEST_BEGIN(test_sallocx_vs_nallocx) { compare_funcs(10*1000*1000, 100*1000*1000, "sallocx", malloc_sallocx_free, "nallocx", malloc_nallocx_free); } TEST_END int -main(void) -{ - - return (test( +main(void) { + return test_no_reentrancy( test_malloc_vs_mallocx, test_free_vs_dallocx, test_dallocx_vs_sdallocx, test_mus_vs_sallocx, - test_sallocx_vs_nallocx)); + test_sallocx_vs_nallocx); } diff --git a/test/test.sh.in b/test/test.sh.in index f0f0f97..39302ff 100644 --- a/test/test.sh.in +++ b/test/test.sh.in @@ -41,16 +41,15 @@ for t in $@; do # execute the test. This allows the shell script to set MALLOC_CONF, which # is then used to set @JEMALLOC_CPREFIX@MALLOC_CONF (thus allowing the # per test shell script to ignore the @JEMALLOC_CPREFIX@ detail). - $(enable_fill=@enable_fill@ \ - enable_prof=@enable_prof@ \ - enable_tcache=@enable_tcache@ \ - . @srcroot@${t}.sh && \ - export_malloc_conf && \ - ${t}@exe@ @abs_srcroot@ @abs_objroot@) + enable_fill=@enable_fill@ \ + enable_prof=@enable_prof@ \ + . @srcroot@${t}.sh && \ + export_malloc_conf && \ + $JEMALLOC_TEST_PREFIX ${t}@exe@ @abs_srcroot@ @abs_objroot@ else - $(export MALLOC_CONF= && \ - export_malloc_conf && - ${t}@exe@ @abs_srcroot@ @abs_objroot@) + export MALLOC_CONF= && \ + export_malloc_conf && \ + $JEMALLOC_TEST_PREFIX ${t}@exe@ @abs_srcroot@ @abs_objroot@ fi result_code=$? case ${result_code} in @@ -64,7 +63,8 @@ for t in $@; do fail_count=$((fail_count+1)) ;; *) - echo "Test harness error" 1>&2 + echo "Test harness error: ${t} w/ MALLOC_CONF=\"${MALLOC_CONF}\"" 1>&2 + echo "Use prefix to debug, e.g. JEMALLOC_TEST_PREFIX=\"gdb --args\" sh test/test.sh ${t}" 1>&2 exit 1 esac done diff --git a/test/unit/SFMT.c b/test/unit/SFMT.c index ba4be87..1fc8cf1 100644 --- a/test/unit/SFMT.c +++ b/test/unit/SFMT.c @@ -35,10 +35,10 @@ */ #include "test/jemalloc_test.h" -#define BLOCK_SIZE 10000 -#define BLOCK_SIZE64 (BLOCK_SIZE / 2) -#define COUNT_1 1000 -#define COUNT_2 700 +#define BLOCK_SIZE 10000 +#define BLOCK_SIZE64 (BLOCK_SIZE / 2) +#define COUNT_1 1000 +#define COUNT_2 700 static const uint32_t init_gen_rand_32_expected[] = { 3440181298U, 1564997079U, 1510669302U, 2930277156U, 1452439940U, @@ -1449,8 +1449,7 @@ static const uint64_t init_by_array_64_expected[] = { KQU(15570163926716513029), KQU(13356980519185762498) }; -TEST_BEGIN(test_gen_rand_32) -{ +TEST_BEGIN(test_gen_rand_32) { uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); int i; @@ -1484,8 +1483,7 @@ TEST_BEGIN(test_gen_rand_32) } TEST_END -TEST_BEGIN(test_by_array_32) -{ +TEST_BEGIN(test_by_array_32) { uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); int i; @@ -1520,8 +1518,7 @@ TEST_BEGIN(test_by_array_32) } TEST_END -TEST_BEGIN(test_gen_rand_64) -{ +TEST_BEGIN(test_gen_rand_64) { uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); int i; @@ -1556,8 +1553,7 @@ TEST_BEGIN(test_gen_rand_64) } TEST_END -TEST_BEGIN(test_by_array_64) -{ +TEST_BEGIN(test_by_array_64) { uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); int i; @@ -1594,12 +1590,10 @@ TEST_BEGIN(test_by_array_64) TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_gen_rand_32, test_by_array_32, test_gen_rand_64, - test_by_array_64)); + test_by_array_64); } diff --git a/test/unit/a0.c b/test/unit/a0.c index b9ba45a..a27ab3f 100644 --- a/test/unit/a0.c +++ b/test/unit/a0.c @@ -1,7 +1,6 @@ #include "test/jemalloc_test.h" -TEST_BEGIN(test_a0) -{ +TEST_BEGIN(test_a0) { void *p; p = a0malloc(1); @@ -11,9 +10,7 @@ TEST_BEGIN(test_a0) TEST_END int -main(void) -{ - - return (test_no_malloc_init( - test_a0)); +main(void) { + return test_no_malloc_init( + test_a0); } diff --git a/test/unit/arena_reset.c b/test/unit/arena_reset.c index ec1c214..f5fb24d 100644 --- a/test/unit/arena_reset.c +++ b/test/unit/arena_reset.c @@ -1,8 +1,14 @@ +#ifndef ARENA_RESET_PROF_C_ #include "test/jemalloc_test.h" +#endif + +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/rtree.h" + +#include "test/extent_hooks.h" static unsigned -get_nsizes_impl(const char *cmd) -{ +get_nsizes_impl(const char *cmd) { unsigned ret; size_t z; @@ -10,33 +16,21 @@ get_nsizes_impl(const char *cmd) assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctl(\"%s\", ...) failure", cmd); - return (ret); -} - -static unsigned -get_nsmall(void) -{ - - return (get_nsizes_impl("arenas.nbins")); + return ret; } static unsigned -get_nlarge(void) -{ - - return (get_nsizes_impl("arenas.nlruns")); +get_nsmall(void) { + return get_nsizes_impl("arenas.nbins"); } static unsigned -get_nhuge(void) -{ - - return (get_nsizes_impl("arenas.nhchunks")); +get_nlarge(void) { + return get_nsizes_impl("arenas.nlextents"); } static size_t -get_size_impl(const char *cmd, size_t ind) -{ +get_size_impl(const char *cmd, size_t ind) { size_t ret; size_t z; size_t mib[4]; @@ -50,106 +44,301 @@ get_size_impl(const char *cmd, size_t ind) assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); - return (ret); + return ret; } static size_t -get_small_size(size_t ind) -{ - - return (get_size_impl("arenas.bin.0.size", ind)); +get_small_size(size_t ind) { + return get_size_impl("arenas.bin.0.size", ind); } static size_t -get_large_size(size_t ind) -{ - - return (get_size_impl("arenas.lrun.0.size", ind)); +get_large_size(size_t ind) { + return get_size_impl("arenas.lextent.0.size", ind); } +/* Like ivsalloc(), but safe to call on discarded allocations. */ static size_t -get_huge_size(size_t ind) -{ +vsalloc(tsdn_t *tsdn, const void *ptr) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + extent_t *extent; + szind_t szind; + if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, false, &extent, &szind)) { + return 0; + } - return (get_size_impl("arenas.hchunk.0.size", ind)); + if (extent == NULL) { + return 0; + } + if (extent_state_get(extent) != extent_state_active) { + return 0; + } + + if (szind == NSIZES) { + return 0; + } + + return sz_index2size(szind); } -TEST_BEGIN(test_arena_reset) -{ -#define NHUGE 4 - unsigned arena_ind, nsmall, nlarge, nhuge, nptrs, i; - size_t sz, miblen; - void **ptrs; +static unsigned +do_arena_create(extent_hooks_t *h) { + unsigned arena_ind; + size_t sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, + (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0, + "Unexpected mallctl() failure"); + return arena_ind; +} + +static void +do_arena_reset_pre(unsigned arena_ind, void ***ptrs, unsigned *nptrs) { +#define NLARGE 32 + unsigned nsmall, nlarge, i; + size_t sz; int flags; - size_t mib[3]; tsdn_t *tsdn; - test_skip_if((config_valgrind && unlikely(in_valgrind)) || (config_fill - && unlikely(opt_quarantine))); - - sz = sizeof(unsigned); - assert_d_eq(mallctl("arenas.extend", (void *)&arena_ind, &sz, NULL, 0), - 0, "Unexpected mallctl() failure"); - flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; nsmall = get_nsmall(); - nlarge = get_nlarge(); - nhuge = get_nhuge() > NHUGE ? NHUGE : get_nhuge(); - nptrs = nsmall + nlarge + nhuge; - ptrs = (void **)malloc(nptrs * sizeof(void *)); - assert_ptr_not_null(ptrs, "Unexpected malloc() failure"); + nlarge = get_nlarge() > NLARGE ? NLARGE : get_nlarge(); + *nptrs = nsmall + nlarge; + *ptrs = (void **)malloc(*nptrs * sizeof(void *)); + assert_ptr_not_null(*ptrs, "Unexpected malloc() failure"); /* Allocate objects with a wide range of sizes. */ for (i = 0; i < nsmall; i++) { sz = get_small_size(i); - ptrs[i] = mallocx(sz, flags); - assert_ptr_not_null(ptrs[i], + (*ptrs)[i] = mallocx(sz, flags); + assert_ptr_not_null((*ptrs)[i], "Unexpected mallocx(%zu, %#x) failure", sz, flags); } for (i = 0; i < nlarge; i++) { sz = get_large_size(i); - ptrs[nsmall + i] = mallocx(sz, flags); - assert_ptr_not_null(ptrs[i], - "Unexpected mallocx(%zu, %#x) failure", sz, flags); - } - for (i = 0; i < nhuge; i++) { - sz = get_huge_size(i); - ptrs[nsmall + nlarge + i] = mallocx(sz, flags); - assert_ptr_not_null(ptrs[i], + (*ptrs)[nsmall + i] = mallocx(sz, flags); + assert_ptr_not_null((*ptrs)[i], "Unexpected mallocx(%zu, %#x) failure", sz, flags); } tsdn = tsdn_fetch(); /* Verify allocations. */ - for (i = 0; i < nptrs; i++) { - assert_zu_gt(ivsalloc(tsdn, ptrs[i], false), 0, + for (i = 0; i < *nptrs; i++) { + assert_zu_gt(ivsalloc(tsdn, (*ptrs)[i]), 0, "Allocation should have queryable size"); } +} + +static void +do_arena_reset_post(void **ptrs, unsigned nptrs, unsigned arena_ind) { + tsdn_t *tsdn; + unsigned i; + + tsdn = tsdn_fetch(); + + if (have_background_thread) { + malloc_mutex_lock(tsdn, + &background_thread_info[arena_ind % ncpus].mtx); + } + /* Verify allocations no longer exist. */ + for (i = 0; i < nptrs; i++) { + assert_zu_eq(vsalloc(tsdn, ptrs[i]), 0, + "Allocation should no longer exist"); + } + if (have_background_thread) { + malloc_mutex_unlock(tsdn, + &background_thread_info[arena_ind % ncpus].mtx); + } + + free(ptrs); +} + +static void +do_arena_reset_destroy(const char *name, unsigned arena_ind) { + size_t mib[3]; + size_t miblen; - /* Reset. */ miblen = sizeof(mib)/sizeof(size_t); - assert_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0, + assert_d_eq(mallctlnametomib(name, mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)arena_ind; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib() failure"); +} - /* Verify allocations no longer exist. */ - for (i = 0; i < nptrs; i++) { - assert_zu_eq(ivsalloc(tsdn, ptrs[i], false), 0, - "Allocation should no longer exist"); +static void +do_arena_reset(unsigned arena_ind) { + do_arena_reset_destroy("arena.0.reset", arena_ind); +} + +static void +do_arena_destroy(unsigned arena_ind) { + do_arena_reset_destroy("arena.0.destroy", arena_ind); +} + +TEST_BEGIN(test_arena_reset) { + unsigned arena_ind; + void **ptrs; + unsigned nptrs; + + arena_ind = do_arena_create(NULL); + do_arena_reset_pre(arena_ind, &ptrs, &nptrs); + do_arena_reset(arena_ind); + do_arena_reset_post(ptrs, nptrs, arena_ind); +} +TEST_END + +static bool +arena_i_initialized(unsigned arena_ind, bool refresh) { + bool initialized; + size_t mib[3]; + size_t miblen, sz; + + if (refresh) { + uint64_t epoch = 1; + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, + sizeof(epoch)), 0, "Unexpected mallctl() failure"); } - free(ptrs); + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = (size_t)arena_ind; + sz = sizeof(initialized); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&initialized, &sz, NULL, + 0), 0, "Unexpected mallctlbymib() failure"); + + return initialized; +} + +TEST_BEGIN(test_arena_destroy_initial) { + assert_false(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false), + "Destroyed arena stats should not be initialized"); } TEST_END -int -main(void) -{ +TEST_BEGIN(test_arena_destroy_hooks_default) { + unsigned arena_ind, arena_ind_another, arena_ind_prev; + void **ptrs; + unsigned nptrs; + + arena_ind = do_arena_create(NULL); + do_arena_reset_pre(arena_ind, &ptrs, &nptrs); + + assert_false(arena_i_initialized(arena_ind, false), + "Arena stats should not be initialized"); + assert_true(arena_i_initialized(arena_ind, true), + "Arena stats should be initialized"); + + /* + * Create another arena before destroying one, to better verify arena + * index reuse. + */ + arena_ind_another = do_arena_create(NULL); + + do_arena_destroy(arena_ind); - return (test( - test_arena_reset)); + assert_false(arena_i_initialized(arena_ind, true), + "Arena stats should not be initialized"); + assert_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false), + "Destroyed arena stats should be initialized"); + + do_arena_reset_post(ptrs, nptrs, arena_ind); + + arena_ind_prev = arena_ind; + arena_ind = do_arena_create(NULL); + do_arena_reset_pre(arena_ind, &ptrs, &nptrs); + assert_u_eq(arena_ind, arena_ind_prev, + "Arena index should have been recycled"); + do_arena_destroy(arena_ind); + do_arena_reset_post(ptrs, nptrs, arena_ind); + + do_arena_destroy(arena_ind_another); +} +TEST_END + +/* + * Actually unmap extents, regardless of opt_retain, so that attempts to access + * a destroyed arena's memory will segfault. + */ +static bool +extent_dalloc_unmap(extent_hooks_t *extent_hooks, void *addr, size_t size, + bool committed, unsigned arena_ind) { + TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, " + "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ? + "true" : "false", arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->dalloc, extent_dalloc_unmap, + "Wrong hook function"); + called_dalloc = true; + if (!try_dalloc) { + return true; + } + pages_unmap(addr, size); + did_dalloc = true; + return false; +} + +static extent_hooks_t hooks_orig; + +static extent_hooks_t hooks_unmap = { + extent_alloc_hook, + extent_dalloc_unmap, /* dalloc */ + extent_destroy_hook, + extent_commit_hook, + extent_decommit_hook, + extent_purge_lazy_hook, + extent_purge_forced_hook, + extent_split_hook, + extent_merge_hook +}; + +TEST_BEGIN(test_arena_destroy_hooks_unmap) { + unsigned arena_ind; + void **ptrs; + unsigned nptrs; + + extent_hooks_prep(); + try_decommit = false; + memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t)); + memcpy(&hooks, &hooks_unmap, sizeof(extent_hooks_t)); + + did_alloc = false; + arena_ind = do_arena_create(&hooks); + do_arena_reset_pre(arena_ind, &ptrs, &nptrs); + + assert_true(did_alloc, "Expected alloc"); + + assert_false(arena_i_initialized(arena_ind, false), + "Arena stats should not be initialized"); + assert_true(arena_i_initialized(arena_ind, true), + "Arena stats should be initialized"); + + did_dalloc = false; + do_arena_destroy(arena_ind); + assert_true(did_dalloc, "Expected dalloc"); + + assert_false(arena_i_initialized(arena_ind, true), + "Arena stats should not be initialized"); + assert_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false), + "Destroyed arena stats should be initialized"); + + do_arena_reset_post(ptrs, nptrs, arena_ind); + + memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t)); +} +TEST_END + +int +main(void) { + return test( + test_arena_reset, + test_arena_destroy_initial, + test_arena_destroy_hooks_default, + test_arena_destroy_hooks_unmap); } diff --git a/test/unit/arena_reset.sh b/test/unit/arena_reset.sh deleted file mode 100644 index 8fcc7d8..0000000 --- a/test/unit/arena_reset.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh - -if [ "x${enable_prof}" = "x1" ] ; then - export MALLOC_CONF="prof:true,lg_prof_sample:0" -fi diff --git a/test/unit/arena_reset_prof.c b/test/unit/arena_reset_prof.c new file mode 100644 index 0000000..38d8012 --- /dev/null +++ b/test/unit/arena_reset_prof.c @@ -0,0 +1,4 @@ +#include "test/jemalloc_test.h" +#define ARENA_RESET_PROF_C_ + +#include "arena_reset.c" diff --git a/test/unit/arena_reset_prof.sh b/test/unit/arena_reset_prof.sh new file mode 100644 index 0000000..041dc1c --- /dev/null +++ b/test/unit/arena_reset_prof.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +export MALLOC_CONF="prof:true,lg_prof_sample:0" diff --git a/test/unit/atomic.c b/test/unit/atomic.c index bdd74f6..572d8d2 100644 --- a/test/unit/atomic.c +++ b/test/unit/atomic.c @@ -1,122 +1,229 @@ #include "test/jemalloc_test.h" -#define TEST_STRUCT(p, t) \ -struct p##_test_s { \ - t accum0; \ - t x; \ - t s; \ -}; \ -typedef struct p##_test_s p##_test_t; - -#define TEST_BODY(p, t, tc, ta, FMT) do { \ - const p##_test_t tests[] = { \ - {(t)-1, (t)-1, (t)-2}, \ - {(t)-1, (t) 0, (t)-2}, \ - {(t)-1, (t) 1, (t)-2}, \ +/* + * We *almost* have consistent short names (e.g. "u32" for uint32_t, "b" for + * bool, etc. The one exception is that the short name for void * is "p" in + * some places and "ptr" in others. In the long run it would be nice to unify + * these, but in the short run we'll use this shim. + */ +#define assert_p_eq assert_ptr_eq + +/* + * t: the non-atomic type, like "uint32_t". + * ta: the short name for the type, like "u32". + * val[1,2,3]: Values of the given type. The CAS tests use val2 for expected, + * and val3 for desired. + */ + +#define DO_TESTS(t, ta, val1, val2, val3) do { \ + t val; \ + t expected; \ + bool success; \ + /* This (along with the load below) also tests ATOMIC_LOAD. */ \ + atomic_##ta##_t atom = ATOMIC_INIT(val1); \ \ - {(t) 0, (t)-1, (t)-2}, \ - {(t) 0, (t) 0, (t)-2}, \ - {(t) 0, (t) 1, (t)-2}, \ + /* ATOMIC_INIT and load. */ \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1, val, "Load or init failed"); \ \ - {(t) 1, (t)-1, (t)-2}, \ - {(t) 1, (t) 0, (t)-2}, \ - {(t) 1, (t) 1, (t)-2}, \ + /* Store. */ \ + atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ + atomic_store_##ta(&atom, val2, ATOMIC_RELAXED); \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + assert_##ta##_eq(val2, val, "Store failed"); \ \ - {(t)0, (t)-(1 << 22), (t)-2}, \ - {(t)0, (t)(1 << 22), (t)-2}, \ - {(t)(1 << 22), (t)-(1 << 22), (t)-2}, \ - {(t)(1 << 22), (t)(1 << 22), (t)-2} \ - }; \ - unsigned i; \ + /* Exchange. */ \ + atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ + val = atomic_exchange_##ta(&atom, val2, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1, val, "Exchange returned invalid value"); \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + assert_##ta##_eq(val2, val, "Exchange store invalid value"); \ \ - for (i = 0; i < sizeof(tests)/sizeof(p##_test_t); i++) { \ - bool err; \ - t accum = tests[i].accum0; \ - assert_##ta##_eq(atomic_read_##p(&accum), \ - tests[i].accum0, \ - "Erroneous read, i=%u", i); \ + /* \ + * Weak CAS. Spurious failures are allowed, so we loop a few \ + * times. \ + */ \ + atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ + success = false; \ + for (int i = 0; i < 10 && !success; i++) { \ + expected = val2; \ + success = atomic_compare_exchange_weak_##ta(&atom, \ + &expected, val3, ATOMIC_RELAXED, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1, expected, \ + "CAS should update expected"); \ + } \ + assert_b_eq(val1 == val2, success, \ + "Weak CAS did the wrong state update"); \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + if (success) { \ + assert_##ta##_eq(val3, val, \ + "Successful CAS should update atomic"); \ + } else { \ + assert_##ta##_eq(val1, val, \ + "Unsuccessful CAS should not update atomic"); \ + } \ \ - assert_##ta##_eq(atomic_add_##p(&accum, tests[i].x), \ - (t)((tc)tests[i].accum0 + (tc)tests[i].x), \ - "i=%u, accum=%"FMT", x=%"FMT, \ - i, tests[i].accum0, tests[i].x); \ - assert_##ta##_eq(atomic_read_##p(&accum), accum, \ - "Erroneous add, i=%u", i); \ + /* Strong CAS. */ \ + atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ + expected = val2; \ + success = atomic_compare_exchange_strong_##ta(&atom, &expected, \ + val3, ATOMIC_RELAXED, ATOMIC_RELAXED); \ + assert_b_eq(val1 == val2, success, \ + "Strong CAS did the wrong state update"); \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + if (success) { \ + assert_##ta##_eq(val3, val, \ + "Successful CAS should update atomic"); \ + } else { \ + assert_##ta##_eq(val1, val, \ + "Unsuccessful CAS should not update atomic"); \ + } \ \ - accum = tests[i].accum0; \ - assert_##ta##_eq(atomic_sub_##p(&accum, tests[i].x), \ - (t)((tc)tests[i].accum0 - (tc)tests[i].x), \ - "i=%u, accum=%"FMT", x=%"FMT, \ - i, tests[i].accum0, tests[i].x); \ - assert_##ta##_eq(atomic_read_##p(&accum), accum, \ - "Erroneous sub, i=%u", i); \ \ - accum = tests[i].accum0; \ - err = atomic_cas_##p(&accum, tests[i].x, tests[i].s); \ - assert_b_eq(err, tests[i].accum0 != tests[i].x, \ - "Erroneous cas success/failure result"); \ - assert_##ta##_eq(accum, err ? tests[i].accum0 : \ - tests[i].s, "Erroneous cas effect, i=%u", i); \ +} while (0) + +#define DO_INTEGER_TESTS(t, ta, val1, val2) do { \ + atomic_##ta##_t atom; \ + t val; \ \ - accum = tests[i].accum0; \ - atomic_write_##p(&accum, tests[i].s); \ - assert_##ta##_eq(accum, tests[i].s, \ - "Erroneous write, i=%u", i); \ + /* Fetch-add. */ \ + atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ + val = atomic_fetch_add_##ta(&atom, val2, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1, val, \ + "Fetch-add should return previous value"); \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1 + val2, val, \ + "Fetch-add should update atomic"); \ + \ + /* Fetch-sub. */ \ + atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ + val = atomic_fetch_sub_##ta(&atom, val2, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1, val, \ + "Fetch-sub should return previous value"); \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1 - val2, val, \ + "Fetch-sub should update atomic"); \ + \ + /* Fetch-and. */ \ + atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ + val = atomic_fetch_and_##ta(&atom, val2, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1, val, \ + "Fetch-and should return previous value"); \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1 & val2, val, \ + "Fetch-and should update atomic"); \ + \ + /* Fetch-or. */ \ + atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ + val = atomic_fetch_or_##ta(&atom, val2, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1, val, \ + "Fetch-or should return previous value"); \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1 | val2, val, \ + "Fetch-or should update atomic"); \ + \ + /* Fetch-xor. */ \ + atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ + val = atomic_fetch_xor_##ta(&atom, val2, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1, val, \ + "Fetch-xor should return previous value"); \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1 ^ val2, val, \ + "Fetch-xor should update atomic"); \ +} while (0) + +#define TEST_STRUCT(t, ta) \ +typedef struct { \ + t val1; \ + t val2; \ + t val3; \ +} ta##_test_t; + +#define TEST_CASES(t) { \ + {(t)-1, (t)-1, (t)-2}, \ + {(t)-1, (t) 0, (t)-2}, \ + {(t)-1, (t) 1, (t)-2}, \ + \ + {(t) 0, (t)-1, (t)-2}, \ + {(t) 0, (t) 0, (t)-2}, \ + {(t) 0, (t) 1, (t)-2}, \ + \ + {(t) 1, (t)-1, (t)-2}, \ + {(t) 1, (t) 0, (t)-2}, \ + {(t) 1, (t) 1, (t)-2}, \ + \ + {(t)0, (t)-(1 << 22), (t)-2}, \ + {(t)0, (t)(1 << 22), (t)-2}, \ + {(t)(1 << 22), (t)-(1 << 22), (t)-2}, \ + {(t)(1 << 22), (t)(1 << 22), (t)-2} \ +} + +#define TEST_BODY(t, ta) do { \ + const ta##_test_t tests[] = TEST_CASES(t); \ + for (unsigned i = 0; i < sizeof(tests)/sizeof(tests[0]); i++) { \ + ta##_test_t test = tests[i]; \ + DO_TESTS(t, ta, test.val1, test.val2, test.val3); \ } \ } while (0) -TEST_STRUCT(uint64, uint64_t) -TEST_BEGIN(test_atomic_uint64) -{ +#define INTEGER_TEST_BODY(t, ta) do { \ + const ta##_test_t tests[] = TEST_CASES(t); \ + for (unsigned i = 0; i < sizeof(tests)/sizeof(tests[0]); i++) { \ + ta##_test_t test = tests[i]; \ + DO_TESTS(t, ta, test.val1, test.val2, test.val3); \ + DO_INTEGER_TESTS(t, ta, test.val1, test.val2); \ + } \ +} while (0) +TEST_STRUCT(uint64_t, u64); +TEST_BEGIN(test_atomic_u64) { #if !(LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) test_skip("64-bit atomic operations not supported"); #else - TEST_BODY(uint64, uint64_t, uint64_t, u64, FMTx64); + INTEGER_TEST_BODY(uint64_t, u64); #endif } TEST_END -TEST_STRUCT(uint32, uint32_t) -TEST_BEGIN(test_atomic_uint32) -{ - TEST_BODY(uint32, uint32_t, uint32_t, u32, "#"FMTx32); +TEST_STRUCT(uint32_t, u32); +TEST_BEGIN(test_atomic_u32) { + INTEGER_TEST_BODY(uint32_t, u32); } TEST_END -TEST_STRUCT(p, void *) -TEST_BEGIN(test_atomic_p) -{ - - TEST_BODY(p, void *, uintptr_t, ptr, "p"); +TEST_STRUCT(void *, p); +TEST_BEGIN(test_atomic_p) { + TEST_BODY(void *, p); } TEST_END -TEST_STRUCT(z, size_t) -TEST_BEGIN(test_atomic_z) -{ +TEST_STRUCT(size_t, zu); +TEST_BEGIN(test_atomic_zu) { + INTEGER_TEST_BODY(size_t, zu); +} +TEST_END - TEST_BODY(z, size_t, size_t, zu, "#zx"); +TEST_STRUCT(ssize_t, zd); +TEST_BEGIN(test_atomic_zd) { + INTEGER_TEST_BODY(ssize_t, zd); } TEST_END -TEST_STRUCT(u, unsigned) -TEST_BEGIN(test_atomic_u) -{ - TEST_BODY(u, unsigned, unsigned, u, "#x"); +TEST_STRUCT(unsigned, u); +TEST_BEGIN(test_atomic_u) { + INTEGER_TEST_BODY(unsigned, u); } TEST_END int -main(void) -{ - - return (test( - test_atomic_uint64, - test_atomic_uint32, +main(void) { + return test( + test_atomic_u64, + test_atomic_u32, test_atomic_p, - test_atomic_z, - test_atomic_u)); + test_atomic_zu, + test_atomic_zd, + test_atomic_u); } diff --git a/test/unit/background_thread.c b/test/unit/background_thread.c new file mode 100644 index 0000000..f7bd37c --- /dev/null +++ b/test/unit/background_thread.c @@ -0,0 +1,119 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/util.h" + +static void +test_switch_background_thread_ctl(bool new_val) { + bool e0, e1; + size_t sz = sizeof(bool); + + e1 = new_val; + assert_d_eq(mallctl("background_thread", (void *)&e0, &sz, + &e1, sz), 0, "Unexpected mallctl() failure"); + assert_b_eq(e0, !e1, + "background_thread should be %d before.\n", !e1); + if (e1) { + assert_zu_gt(n_background_threads, 0, + "Number of background threads should be non zero.\n"); + } else { + assert_zu_eq(n_background_threads, 0, + "Number of background threads should be zero.\n"); + } +} + +static void +test_repeat_background_thread_ctl(bool before) { + bool e0, e1; + size_t sz = sizeof(bool); + + e1 = before; + assert_d_eq(mallctl("background_thread", (void *)&e0, &sz, + &e1, sz), 0, "Unexpected mallctl() failure"); + assert_b_eq(e0, before, + "background_thread should be %d.\n", before); + if (e1) { + assert_zu_gt(n_background_threads, 0, + "Number of background threads should be non zero.\n"); + } else { + assert_zu_eq(n_background_threads, 0, + "Number of background threads should be zero.\n"); + } +} + +TEST_BEGIN(test_background_thread_ctl) { + test_skip_if(!have_background_thread); + + bool e0, e1; + size_t sz = sizeof(bool); + + assert_d_eq(mallctl("opt.background_thread", (void *)&e0, &sz, + NULL, 0), 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctl("background_thread", (void *)&e1, &sz, + NULL, 0), 0, "Unexpected mallctl() failure"); + assert_b_eq(e0, e1, + "Default and opt.background_thread does not match.\n"); + if (e0) { + test_switch_background_thread_ctl(false); + } + assert_zu_eq(n_background_threads, 0, + "Number of background threads should be 0.\n"); + + for (unsigned i = 0; i < 4; i++) { + test_switch_background_thread_ctl(true); + test_repeat_background_thread_ctl(true); + test_repeat_background_thread_ctl(true); + + test_switch_background_thread_ctl(false); + test_repeat_background_thread_ctl(false); + test_repeat_background_thread_ctl(false); + } +} +TEST_END + +TEST_BEGIN(test_background_thread_running) { + test_skip_if(!have_background_thread); + test_skip_if(!config_stats); + +#if defined(JEMALLOC_BACKGROUND_THREAD) + tsd_t *tsd = tsd_fetch(); + background_thread_info_t *info = &background_thread_info[0]; + + test_repeat_background_thread_ctl(false); + test_switch_background_thread_ctl(true); + assert_b_eq(info->state, background_thread_started, + "Background_thread did not start.\n"); + + nstime_t start, now; + nstime_init(&start, 0); + nstime_update(&start); + + bool ran = false; + while (true) { + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + if (info->tot_n_runs > 0) { + ran = true; + } + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + if (ran) { + break; + } + + nstime_init(&now, 0); + nstime_update(&now); + nstime_subtract(&now, &start); + assert_u64_lt(nstime_sec(&now), 1000, + "Background threads did not run for 1000 seconds."); + sleep(1); + } + test_switch_background_thread_ctl(false); +#endif +} +TEST_END + +int +main(void) { + /* Background_thread creation tests reentrancy naturally. */ + return test_no_reentrancy( + test_background_thread_ctl, + test_background_thread_running); +} diff --git a/test/unit/base.c b/test/unit/base.c new file mode 100644 index 0000000..5dc42f0 --- /dev/null +++ b/test/unit/base.c @@ -0,0 +1,225 @@ +#include "test/jemalloc_test.h" + +#include "test/extent_hooks.h" + +static extent_hooks_t hooks_null = { + extent_alloc_hook, + NULL, /* dalloc */ + NULL, /* destroy */ + NULL, /* commit */ + NULL, /* decommit */ + NULL, /* purge_lazy */ + NULL, /* purge_forced */ + NULL, /* split */ + NULL /* merge */ +}; + +static extent_hooks_t hooks_not_null = { + extent_alloc_hook, + extent_dalloc_hook, + extent_destroy_hook, + NULL, /* commit */ + extent_decommit_hook, + extent_purge_lazy_hook, + extent_purge_forced_hook, + NULL, /* split */ + NULL /* merge */ +}; + +TEST_BEGIN(test_base_hooks_default) { + tsdn_t *tsdn; + base_t *base; + size_t allocated0, allocated1, resident, mapped; + + tsdn = tsdn_fetch(); + base = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default); + + if (config_stats) { + base_stats_get(tsdn, base, &allocated0, &resident, &mapped); + assert_zu_ge(allocated0, sizeof(base_t), + "Base header should count as allocated"); + } + + assert_ptr_not_null(base_alloc(tsdn, base, 42, 1), + "Unexpected base_alloc() failure"); + + if (config_stats) { + base_stats_get(tsdn, base, &allocated1, &resident, &mapped); + assert_zu_ge(allocated1 - allocated0, 42, + "At least 42 bytes were allocated by base_alloc()"); + } + + base_delete(base); +} +TEST_END + +TEST_BEGIN(test_base_hooks_null) { + extent_hooks_t hooks_orig; + tsdn_t *tsdn; + base_t *base; + size_t allocated0, allocated1, resident, mapped; + + extent_hooks_prep(); + try_dalloc = false; + try_destroy = true; + try_decommit = false; + try_purge_lazy = false; + try_purge_forced = false; + memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t)); + memcpy(&hooks, &hooks_null, sizeof(extent_hooks_t)); + + tsdn = tsdn_fetch(); + base = base_new(tsdn, 0, &hooks); + assert_ptr_not_null(base, "Unexpected base_new() failure"); + + if (config_stats) { + base_stats_get(tsdn, base, &allocated0, &resident, &mapped); + assert_zu_ge(allocated0, sizeof(base_t), + "Base header should count as allocated"); + } + + assert_ptr_not_null(base_alloc(tsdn, base, 42, 1), + "Unexpected base_alloc() failure"); + + if (config_stats) { + base_stats_get(tsdn, base, &allocated1, &resident, &mapped); + assert_zu_ge(allocated1 - allocated0, 42, + "At least 42 bytes were allocated by base_alloc()"); + } + + base_delete(base); + + memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t)); +} +TEST_END + +TEST_BEGIN(test_base_hooks_not_null) { + extent_hooks_t hooks_orig; + tsdn_t *tsdn; + base_t *base; + void *p, *q, *r, *r_exp; + + extent_hooks_prep(); + try_dalloc = false; + try_destroy = true; + try_decommit = false; + try_purge_lazy = false; + try_purge_forced = false; + memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t)); + memcpy(&hooks, &hooks_not_null, sizeof(extent_hooks_t)); + + tsdn = tsdn_fetch(); + did_alloc = false; + base = base_new(tsdn, 0, &hooks); + assert_ptr_not_null(base, "Unexpected base_new() failure"); + assert_true(did_alloc, "Expected alloc"); + + /* + * Check for tight packing at specified alignment under simple + * conditions. + */ + { + const size_t alignments[] = { + 1, + QUANTUM, + QUANTUM << 1, + CACHELINE, + CACHELINE << 1, + }; + unsigned i; + + for (i = 0; i < sizeof(alignments) / sizeof(size_t); i++) { + size_t alignment = alignments[i]; + size_t align_ceil = ALIGNMENT_CEILING(alignment, + QUANTUM); + p = base_alloc(tsdn, base, 1, alignment); + assert_ptr_not_null(p, + "Unexpected base_alloc() failure"); + assert_ptr_eq(p, + (void *)(ALIGNMENT_CEILING((uintptr_t)p, + alignment)), "Expected quantum alignment"); + q = base_alloc(tsdn, base, alignment, alignment); + assert_ptr_not_null(q, + "Unexpected base_alloc() failure"); + assert_ptr_eq((void *)((uintptr_t)p + align_ceil), q, + "Minimal allocation should take up %zu bytes", + align_ceil); + r = base_alloc(tsdn, base, 1, alignment); + assert_ptr_not_null(r, + "Unexpected base_alloc() failure"); + assert_ptr_eq((void *)((uintptr_t)q + align_ceil), r, + "Minimal allocation should take up %zu bytes", + align_ceil); + } + } + + /* + * Allocate an object that cannot fit in the first block, then verify + * that the first block's remaining space is considered for subsequent + * allocation. + */ + assert_zu_ge(extent_bsize_get(&base->blocks->extent), QUANTUM, + "Remainder insufficient for test"); + /* Use up all but one quantum of block. */ + while (extent_bsize_get(&base->blocks->extent) > QUANTUM) { + p = base_alloc(tsdn, base, QUANTUM, QUANTUM); + assert_ptr_not_null(p, "Unexpected base_alloc() failure"); + } + r_exp = extent_addr_get(&base->blocks->extent); + assert_zu_eq(base->extent_sn_next, 1, "One extant block expected"); + q = base_alloc(tsdn, base, QUANTUM + 1, QUANTUM); + assert_ptr_not_null(q, "Unexpected base_alloc() failure"); + assert_ptr_ne(q, r_exp, "Expected allocation from new block"); + assert_zu_eq(base->extent_sn_next, 2, "Two extant blocks expected"); + r = base_alloc(tsdn, base, QUANTUM, QUANTUM); + assert_ptr_not_null(r, "Unexpected base_alloc() failure"); + assert_ptr_eq(r, r_exp, "Expected allocation from first block"); + assert_zu_eq(base->extent_sn_next, 2, "Two extant blocks expected"); + + /* + * Check for proper alignment support when normal blocks are too small. + */ + { + const size_t alignments[] = { + HUGEPAGE, + HUGEPAGE << 1 + }; + unsigned i; + + for (i = 0; i < sizeof(alignments) / sizeof(size_t); i++) { + size_t alignment = alignments[i]; + p = base_alloc(tsdn, base, QUANTUM, alignment); + assert_ptr_not_null(p, + "Unexpected base_alloc() failure"); + assert_ptr_eq(p, + (void *)(ALIGNMENT_CEILING((uintptr_t)p, + alignment)), "Expected %zu-byte alignment", + alignment); + } + } + + called_dalloc = called_destroy = called_decommit = called_purge_lazy = + called_purge_forced = false; + base_delete(base); + assert_true(called_dalloc, "Expected dalloc call"); + assert_true(!called_destroy, "Unexpected destroy call"); + assert_true(called_decommit, "Expected decommit call"); + assert_true(called_purge_lazy, "Expected purge_lazy call"); + assert_true(called_purge_forced, "Expected purge_forced call"); + + try_dalloc = true; + try_destroy = true; + try_decommit = true; + try_purge_lazy = true; + try_purge_forced = true; + memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t)); +} +TEST_END + +int +main(void) { + return test( + test_base_hooks_default, + test_base_hooks_null, + test_base_hooks_not_null); +} diff --git a/test/unit/bit_util.c b/test/unit/bit_util.c new file mode 100644 index 0000000..42a9701 --- /dev/null +++ b/test/unit/bit_util.c @@ -0,0 +1,57 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/bit_util.h" + +#define TEST_POW2_CEIL(t, suf, pri) do { \ + unsigned i, pow2; \ + t x; \ + \ + assert_##suf##_eq(pow2_ceil_##suf(0), 0, "Unexpected result"); \ + \ + for (i = 0; i < sizeof(t) * 8; i++) { \ + assert_##suf##_eq(pow2_ceil_##suf(((t)1) << i), ((t)1) \ + << i, "Unexpected result"); \ + } \ + \ + for (i = 2; i < sizeof(t) * 8; i++) { \ + assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) - 1), \ + ((t)1) << i, "Unexpected result"); \ + } \ + \ + for (i = 0; i < sizeof(t) * 8 - 1; i++) { \ + assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) + 1), \ + ((t)1) << (i+1), "Unexpected result"); \ + } \ + \ + for (pow2 = 1; pow2 < 25; pow2++) { \ + for (x = (((t)1) << (pow2-1)) + 1; x <= ((t)1) << pow2; \ + x++) { \ + assert_##suf##_eq(pow2_ceil_##suf(x), \ + ((t)1) << pow2, \ + "Unexpected result, x=%"pri, x); \ + } \ + } \ +} while (0) + +TEST_BEGIN(test_pow2_ceil_u64) { + TEST_POW2_CEIL(uint64_t, u64, FMTu64); +} +TEST_END + +TEST_BEGIN(test_pow2_ceil_u32) { + TEST_POW2_CEIL(uint32_t, u32, FMTu32); +} +TEST_END + +TEST_BEGIN(test_pow2_ceil_zu) { + TEST_POW2_CEIL(size_t, zu, "zu"); +} +TEST_END + +int +main(void) { + return test( + test_pow2_ceil_u64, + test_pow2_ceil_u32, + test_pow2_ceil_zu); +} diff --git a/test/unit/bitmap.c b/test/unit/bitmap.c index a2dd546..cafb203 100644 --- a/test/unit/bitmap.c +++ b/test/unit/bitmap.c @@ -1,163 +1,431 @@ #include "test/jemalloc_test.h" -TEST_BEGIN(test_bitmap_size) -{ - size_t i, prev_size; +#define NBITS_TAB \ + NB( 1) \ + NB( 2) \ + NB( 3) \ + NB( 4) \ + NB( 5) \ + NB( 6) \ + NB( 7) \ + NB( 8) \ + NB( 9) \ + NB(10) \ + NB(11) \ + NB(12) \ + NB(13) \ + NB(14) \ + NB(15) \ + NB(16) \ + NB(17) \ + NB(18) \ + NB(19) \ + NB(20) \ + NB(21) \ + NB(22) \ + NB(23) \ + NB(24) \ + NB(25) \ + NB(26) \ + NB(27) \ + NB(28) \ + NB(29) \ + NB(30) \ + NB(31) \ + NB(32) \ + \ + NB(33) \ + NB(34) \ + NB(35) \ + NB(36) \ + NB(37) \ + NB(38) \ + NB(39) \ + NB(40) \ + NB(41) \ + NB(42) \ + NB(43) \ + NB(44) \ + NB(45) \ + NB(46) \ + NB(47) \ + NB(48) \ + NB(49) \ + NB(50) \ + NB(51) \ + NB(52) \ + NB(53) \ + NB(54) \ + NB(55) \ + NB(56) \ + NB(57) \ + NB(58) \ + NB(59) \ + NB(60) \ + NB(61) \ + NB(62) \ + NB(63) \ + NB(64) \ + NB(65) \ + \ + NB(126) \ + NB(127) \ + NB(128) \ + NB(129) \ + NB(130) \ + \ + NB(254) \ + NB(255) \ + NB(256) \ + NB(257) \ + NB(258) \ + \ + NB(510) \ + NB(511) \ + NB(512) \ + NB(513) \ + NB(514) \ + \ + NB(1024) \ + NB(2048) \ + NB(4096) \ + NB(8192) \ + NB(16384) \ - prev_size = 0; - for (i = 1; i <= BITMAP_MAXBITS; i++) { - bitmap_info_t binfo; - size_t size; +static void +test_bitmap_initializer_body(const bitmap_info_t *binfo, size_t nbits) { + bitmap_info_t binfo_dyn; + bitmap_info_init(&binfo_dyn, nbits); + + assert_zu_eq(bitmap_size(binfo), bitmap_size(&binfo_dyn), + "Unexpected difference between static and dynamic initialization, " + "nbits=%zu", nbits); + assert_zu_eq(binfo->nbits, binfo_dyn.nbits, + "Unexpected difference between static and dynamic initialization, " + "nbits=%zu", nbits); +#ifdef BITMAP_USE_TREE + assert_u_eq(binfo->nlevels, binfo_dyn.nlevels, + "Unexpected difference between static and dynamic initialization, " + "nbits=%zu", nbits); + { + unsigned i; + + for (i = 0; i < binfo->nlevels; i++) { + assert_zu_eq(binfo->levels[i].group_offset, + binfo_dyn.levels[i].group_offset, + "Unexpected difference between static and dynamic " + "initialization, nbits=%zu, level=%u", nbits, i); + } + } +#else + assert_zu_eq(binfo->ngroups, binfo_dyn.ngroups, + "Unexpected difference between static and dynamic initialization"); +#endif +} - bitmap_info_init(&binfo, i); - size = bitmap_size(&binfo); - assert_true(size >= prev_size, - "Bitmap size is smaller than expected"); - prev_size = size; +TEST_BEGIN(test_bitmap_initializer) { +#define NB(nbits) { \ + if (nbits <= BITMAP_MAXBITS) { \ + bitmap_info_t binfo = \ + BITMAP_INFO_INITIALIZER(nbits); \ + test_bitmap_initializer_body(&binfo, nbits); \ + } \ } + NBITS_TAB +#undef NB } TEST_END -TEST_BEGIN(test_bitmap_init) -{ - size_t i; +static size_t +test_bitmap_size_body(const bitmap_info_t *binfo, size_t nbits, + size_t prev_size) { + size_t size = bitmap_size(binfo); + assert_zu_ge(size, (nbits >> 3), + "Bitmap size is smaller than expected"); + assert_zu_ge(size, prev_size, "Bitmap size is smaller than expected"); + return size; +} + +TEST_BEGIN(test_bitmap_size) { + size_t nbits, prev_size; - for (i = 1; i <= BITMAP_MAXBITS; i++) { + prev_size = 0; + for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) { bitmap_info_t binfo; - bitmap_info_init(&binfo, i); - { - size_t j; - bitmap_t *bitmap = (bitmap_t *)malloc( - bitmap_size(&binfo)); - bitmap_init(bitmap, &binfo); - - for (j = 0; j < i; j++) { - assert_false(bitmap_get(bitmap, &binfo, j), - "Bit should be unset"); - } - free(bitmap); - } + bitmap_info_init(&binfo, nbits); + prev_size = test_bitmap_size_body(&binfo, nbits, prev_size); + } +#define NB(nbits) { \ + bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ + prev_size = test_bitmap_size_body(&binfo, nbits, \ + prev_size); \ } + prev_size = 0; + NBITS_TAB +#undef NB } TEST_END -TEST_BEGIN(test_bitmap_set) -{ +static void +test_bitmap_init_body(const bitmap_info_t *binfo, size_t nbits) { size_t i; + bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo)); + assert_ptr_not_null(bitmap, "Unexpected malloc() failure"); - for (i = 1; i <= BITMAP_MAXBITS; i++) { + bitmap_init(bitmap, binfo, false); + for (i = 0; i < nbits; i++) { + assert_false(bitmap_get(bitmap, binfo, i), + "Bit should be unset"); + } + + bitmap_init(bitmap, binfo, true); + for (i = 0; i < nbits; i++) { + assert_true(bitmap_get(bitmap, binfo, i), "Bit should be set"); + } + + free(bitmap); +} + +TEST_BEGIN(test_bitmap_init) { + size_t nbits; + + for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) { bitmap_info_t binfo; - bitmap_info_init(&binfo, i); - { - size_t j; - bitmap_t *bitmap = (bitmap_t *)malloc( - bitmap_size(&binfo)); - bitmap_init(bitmap, &binfo); - - for (j = 0; j < i; j++) - bitmap_set(bitmap, &binfo, j); - assert_true(bitmap_full(bitmap, &binfo), - "All bits should be set"); - free(bitmap); - } + bitmap_info_init(&binfo, nbits); + test_bitmap_init_body(&binfo, nbits); } +#define NB(nbits) { \ + bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ + test_bitmap_init_body(&binfo, nbits); \ + } + NBITS_TAB +#undef NB } TEST_END -TEST_BEGIN(test_bitmap_unset) -{ +static void +test_bitmap_set_body(const bitmap_info_t *binfo, size_t nbits) { size_t i; + bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo)); + assert_ptr_not_null(bitmap, "Unexpected malloc() failure"); + bitmap_init(bitmap, binfo, false); + + for (i = 0; i < nbits; i++) { + bitmap_set(bitmap, binfo, i); + } + assert_true(bitmap_full(bitmap, binfo), "All bits should be set"); + free(bitmap); +} - for (i = 1; i <= BITMAP_MAXBITS; i++) { +TEST_BEGIN(test_bitmap_set) { + size_t nbits; + + for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) { bitmap_info_t binfo; - bitmap_info_init(&binfo, i); - { - size_t j; - bitmap_t *bitmap = (bitmap_t *)malloc( - bitmap_size(&binfo)); - bitmap_init(bitmap, &binfo); - - for (j = 0; j < i; j++) - bitmap_set(bitmap, &binfo, j); - assert_true(bitmap_full(bitmap, &binfo), - "All bits should be set"); - for (j = 0; j < i; j++) - bitmap_unset(bitmap, &binfo, j); - for (j = 0; j < i; j++) - bitmap_set(bitmap, &binfo, j); - assert_true(bitmap_full(bitmap, &binfo), - "All bits should be set"); - free(bitmap); - } + bitmap_info_init(&binfo, nbits); + test_bitmap_set_body(&binfo, nbits); + } +#define NB(nbits) { \ + bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ + test_bitmap_set_body(&binfo, nbits); \ } + NBITS_TAB +#undef NB } TEST_END -TEST_BEGIN(test_bitmap_sfu) -{ +static void +test_bitmap_unset_body(const bitmap_info_t *binfo, size_t nbits) { size_t i; + bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo)); + assert_ptr_not_null(bitmap, "Unexpected malloc() failure"); + bitmap_init(bitmap, binfo, false); - for (i = 1; i <= BITMAP_MAXBITS; i++) { + for (i = 0; i < nbits; i++) { + bitmap_set(bitmap, binfo, i); + } + assert_true(bitmap_full(bitmap, binfo), "All bits should be set"); + for (i = 0; i < nbits; i++) { + bitmap_unset(bitmap, binfo, i); + } + for (i = 0; i < nbits; i++) { + bitmap_set(bitmap, binfo, i); + } + assert_true(bitmap_full(bitmap, binfo), "All bits should be set"); + free(bitmap); +} + +TEST_BEGIN(test_bitmap_unset) { + size_t nbits; + + for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) { bitmap_info_t binfo; - bitmap_info_init(&binfo, i); - { - size_t j; - bitmap_t *bitmap = (bitmap_t *)malloc( - bitmap_size(&binfo)); - bitmap_init(bitmap, &binfo); - - /* Iteratively set bits starting at the beginning. */ - for (j = 0; j < i; j++) { - assert_zd_eq(bitmap_sfu(bitmap, &binfo), j, - "First unset bit should be just after " - "previous first unset bit"); + bitmap_info_init(&binfo, nbits); + test_bitmap_unset_body(&binfo, nbits); + } +#define NB(nbits) { \ + bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ + test_bitmap_unset_body(&binfo, nbits); \ + } + NBITS_TAB +#undef NB +} +TEST_END + +static void +test_bitmap_xfu_body(const bitmap_info_t *binfo, size_t nbits) { + bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo)); + assert_ptr_not_null(bitmap, "Unexpected malloc() failure"); + bitmap_init(bitmap, binfo, false); + + /* Iteratively set bits starting at the beginning. */ + for (size_t i = 0; i < nbits; i++) { + assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), i, + "First unset bit should be just after previous first unset " + "bit"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i, + "First unset bit should be just after previous first unset " + "bit"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i, + "First unset bit should be just after previous first unset " + "bit"); + assert_zu_eq(bitmap_sfu(bitmap, binfo), i, + "First unset bit should be just after previous first unset " + "bit"); + } + assert_true(bitmap_full(bitmap, binfo), "All bits should be set"); + + /* + * Iteratively unset bits starting at the end, and verify that + * bitmap_sfu() reaches the unset bits. + */ + for (size_t i = nbits - 1; i < nbits; i--) { /* (nbits..0] */ + bitmap_unset(bitmap, binfo, i); + assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), i, + "First unset bit should the bit previously unset"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i, + "First unset bit should the bit previously unset"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i, + "First unset bit should the bit previously unset"); + assert_zu_eq(bitmap_sfu(bitmap, binfo), i, + "First unset bit should the bit previously unset"); + bitmap_unset(bitmap, binfo, i); + } + assert_false(bitmap_get(bitmap, binfo, 0), "Bit should be unset"); + + /* + * Iteratively set bits starting at the beginning, and verify that + * bitmap_sfu() looks past them. + */ + for (size_t i = 1; i < nbits; i++) { + bitmap_set(bitmap, binfo, i - 1); + assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), i, + "First unset bit should be just after the bit previously " + "set"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i, + "First unset bit should be just after the bit previously " + "set"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i, + "First unset bit should be just after the bit previously " + "set"); + assert_zu_eq(bitmap_sfu(bitmap, binfo), i, + "First unset bit should be just after the bit previously " + "set"); + bitmap_unset(bitmap, binfo, i); + } + assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), nbits - 1, + "First unset bit should be the last bit"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, (nbits > 1) ? nbits-2 : nbits-1), + nbits - 1, "First unset bit should be the last bit"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, nbits - 1), nbits - 1, + "First unset bit should be the last bit"); + assert_zu_eq(bitmap_sfu(bitmap, binfo), nbits - 1, + "First unset bit should be the last bit"); + assert_true(bitmap_full(bitmap, binfo), "All bits should be set"); + + /* + * Bubble a "usu" pattern through the bitmap and verify that + * bitmap_ffu() finds the correct bit for all five min_bit cases. + */ + if (nbits >= 3) { + for (size_t i = 0; i < nbits-2; i++) { + bitmap_unset(bitmap, binfo, i); + bitmap_unset(bitmap, binfo, i+2); + if (i > 0) { + assert_zu_eq(bitmap_ffu(bitmap, binfo, i-1), i, + "Unexpected first unset bit"); } - assert_true(bitmap_full(bitmap, &binfo), - "All bits should be set"); - - /* - * Iteratively unset bits starting at the end, and - * verify that bitmap_sfu() reaches the unset bits. - */ - for (j = i - 1; j < i; j--) { /* (i..0] */ - bitmap_unset(bitmap, &binfo, j); - assert_zd_eq(bitmap_sfu(bitmap, &binfo), j, - "First unset bit should the bit previously " - "unset"); - bitmap_unset(bitmap, &binfo, j); + assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i, + "Unexpected first unset bit"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, i+1), i+2, + "Unexpected first unset bit"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, i+2), i+2, + "Unexpected first unset bit"); + if (i + 3 < nbits) { + assert_zu_eq(bitmap_ffu(bitmap, binfo, i+3), + nbits, "Unexpected first unset bit"); } - assert_false(bitmap_get(bitmap, &binfo, 0), - "Bit should be unset"); - - /* - * Iteratively set bits starting at the beginning, and - * verify that bitmap_sfu() looks past them. - */ - for (j = 1; j < i; j++) { - bitmap_set(bitmap, &binfo, j - 1); - assert_zd_eq(bitmap_sfu(bitmap, &binfo), j, - "First unset bit should be just after the " - "bit previously set"); - bitmap_unset(bitmap, &binfo, j); + assert_zu_eq(bitmap_sfu(bitmap, binfo), i, + "Unexpected first unset bit"); + assert_zu_eq(bitmap_sfu(bitmap, binfo), i+2, + "Unexpected first unset bit"); + } + } + + /* + * Unset the last bit, bubble another unset bit through the bitmap, and + * verify that bitmap_ffu() finds the correct bit for all four min_bit + * cases. + */ + if (nbits >= 3) { + bitmap_unset(bitmap, binfo, nbits-1); + for (size_t i = 0; i < nbits-1; i++) { + bitmap_unset(bitmap, binfo, i); + if (i > 0) { + assert_zu_eq(bitmap_ffu(bitmap, binfo, i-1), i, + "Unexpected first unset bit"); } - assert_zd_eq(bitmap_sfu(bitmap, &binfo), i - 1, - "First unset bit should be the last bit"); - assert_true(bitmap_full(bitmap, &binfo), - "All bits should be set"); - free(bitmap); + assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i, + "Unexpected first unset bit"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, i+1), nbits-1, + "Unexpected first unset bit"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, nbits-1), + nbits-1, "Unexpected first unset bit"); + + assert_zu_eq(bitmap_sfu(bitmap, binfo), i, + "Unexpected first unset bit"); } + assert_zu_eq(bitmap_sfu(bitmap, binfo), nbits-1, + "Unexpected first unset bit"); } + + free(bitmap); +} + +TEST_BEGIN(test_bitmap_xfu) { + size_t nbits; + + for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) { + bitmap_info_t binfo; + bitmap_info_init(&binfo, nbits); + test_bitmap_xfu_body(&binfo, nbits); + } +#define NB(nbits) { \ + bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ + test_bitmap_xfu_body(&binfo, nbits); \ + } + NBITS_TAB +#undef NB } TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( + test_bitmap_initializer, test_bitmap_size, test_bitmap_init, test_bitmap_set, test_bitmap_unset, - test_bitmap_sfu)); + test_bitmap_xfu); } diff --git a/test/unit/ckh.c b/test/unit/ckh.c index 2cbc226..707ea5f 100644 --- a/test/unit/ckh.c +++ b/test/unit/ckh.c @@ -1,7 +1,6 @@ #include "test/jemalloc_test.h" -TEST_BEGIN(test_new_delete) -{ +TEST_BEGIN(test_new_delete) { tsd_t *tsd; ckh_t ckh; @@ -17,8 +16,7 @@ TEST_BEGIN(test_new_delete) } TEST_END -TEST_BEGIN(test_count_insert_search_remove) -{ +TEST_BEGIN(test_count_insert_search_remove) { tsd_t *tsd; ckh_t ckh; const char *strs[] = { @@ -105,9 +103,8 @@ TEST_BEGIN(test_count_insert_search_remove) } TEST_END -TEST_BEGIN(test_insert_iter_remove) -{ -#define NITEMS ZU(1000) +TEST_BEGIN(test_insert_iter_remove) { +#define NITEMS ZU(1000) tsd_t *tsd; ckh_t ckh; void **p[NITEMS]; @@ -174,10 +171,12 @@ TEST_BEGIN(test_insert_iter_remove) } } - for (j = 0; j < i + 1; j++) + for (j = 0; j < i + 1; j++) { assert_true(seen[j], "Item %zu not seen", j); - for (; j < NITEMS; j++) + } + for (; j < NITEMS; j++) { assert_false(seen[j], "Item %zu seen", j); + } } } @@ -204,11 +203,9 @@ TEST_BEGIN(test_insert_iter_remove) TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_new_delete, test_count_insert_search_remove, - test_insert_iter_remove)); + test_insert_iter_remove); } diff --git a/test/unit/decay.c b/test/unit/decay.c index 2d8d69d..f727bf9 100644 --- a/test/unit/decay.c +++ b/test/unit/decay.c @@ -1,5 +1,7 @@ #include "test/jemalloc_test.h" +#include "jemalloc/internal/ticker.h" + static nstime_monotonic_t *nstime_monotonic_orig; static nstime_update_t *nstime_update_orig; @@ -8,50 +10,208 @@ static nstime_t time_mock; static bool monotonic_mock; static bool -nstime_monotonic_mock(void) -{ - - return (monotonic_mock); +check_background_thread_enabled(void) { + bool enabled; + size_t sz = sizeof(bool); + int ret = mallctl("background_thread", (void *)&enabled, &sz, NULL,0); + if (ret == ENOENT) { + return false; + } + assert_d_eq(ret, 0, "Unexpected mallctl error"); + return enabled; } static bool -nstime_update_mock(nstime_t *time) -{ +nstime_monotonic_mock(void) { + return monotonic_mock; +} +static bool +nstime_update_mock(nstime_t *time) { nupdates_mock++; - if (monotonic_mock) + if (monotonic_mock) { nstime_copy(time, &time_mock); - return (!monotonic_mock); + } + return !monotonic_mock; +} + +static unsigned +do_arena_create(ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) { + unsigned arena_ind; + size_t sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); + size_t mib[3]; + size_t miblen = sizeof(mib)/sizeof(size_t); + + assert_d_eq(mallctlnametomib("arena.0.dirty_decay_ms", mib, &miblen), + 0, "Unexpected mallctlnametomib() failure"); + mib[1] = (size_t)arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, + (void *)&dirty_decay_ms, sizeof(dirty_decay_ms)), 0, + "Unexpected mallctlbymib() failure"); + + assert_d_eq(mallctlnametomib("arena.0.muzzy_decay_ms", mib, &miblen), + 0, "Unexpected mallctlnametomib() failure"); + mib[1] = (size_t)arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, + (void *)&muzzy_decay_ms, sizeof(muzzy_decay_ms)), 0, + "Unexpected mallctlbymib() failure"); + + return arena_ind; +} + +static void +do_arena_destroy(unsigned arena_ind) { + size_t mib[3]; + size_t miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = (size_t)arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); +} + +void +do_epoch(void) { + uint64_t epoch = 1; + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), + 0, "Unexpected mallctl() failure"); +} + +void +do_purge(unsigned arena_ind) { + size_t mib[3]; + size_t miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = (size_t)arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); } -TEST_BEGIN(test_decay_ticks) -{ +void +do_decay(unsigned arena_ind) { + size_t mib[3]; + size_t miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = (size_t)arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); +} + +static uint64_t +get_arena_npurge_impl(const char *mibname, unsigned arena_ind) { + size_t mib[4]; + size_t miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib(mibname, mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[2] = (size_t)arena_ind; + uint64_t npurge = 0; + size_t sz = sizeof(npurge); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&npurge, &sz, NULL, 0), + config_stats ? 0 : ENOENT, "Unexpected mallctlbymib() failure"); + return npurge; +} + +static uint64_t +get_arena_dirty_npurge(unsigned arena_ind) { + do_epoch(); + return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind); +} + +static uint64_t +get_arena_muzzy_npurge(unsigned arena_ind) { + do_epoch(); + return get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind); +} + +static uint64_t +get_arena_npurge(unsigned arena_ind) { + do_epoch(); + return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind) + + get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind); +} + +static size_t +get_arena_pdirty(unsigned arena_ind) { + do_epoch(); + size_t mib[4]; + size_t miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("stats.arenas.0.pdirty", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[2] = (size_t)arena_ind; + size_t pdirty; + size_t sz = sizeof(pdirty); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&pdirty, &sz, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); + return pdirty; +} + +static size_t +get_arena_pmuzzy(unsigned arena_ind) { + do_epoch(); + size_t mib[4]; + size_t miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("stats.arenas.0.pmuzzy", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[2] = (size_t)arena_ind; + size_t pmuzzy; + size_t sz = sizeof(pmuzzy); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&pmuzzy, &sz, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); + return pmuzzy; +} + +static void * +do_mallocx(size_t size, int flags) { + void *p = mallocx(size, flags); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + return p; +} + +static void +generate_dirty(unsigned arena_ind, size_t size) { + int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; + void *p = do_mallocx(size, flags); + dallocx(p, flags); +} + +TEST_BEGIN(test_decay_ticks) { + test_skip_if(check_background_thread_enabled()); + ticker_t *decay_ticker; - unsigned tick0, tick1; - size_t sz, huge0, large0; + unsigned tick0, tick1, arena_ind; + size_t sz, large0; void *p; - test_skip_if(opt_purge != purge_mode_decay); + sz = sizeof(size_t); + assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL, + 0), 0, "Unexpected mallctl failure"); + + /* Set up a manually managed arena for test. */ + arena_ind = do_arena_create(0, 0); - decay_ticker = decay_ticker_get(tsd_fetch(), 0); + /* Migrate to the new arena, and get the ticker. */ + unsigned old_arena_ind; + size_t sz_arena_ind = sizeof(old_arena_ind); + assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, + &sz_arena_ind, (void *)&arena_ind, sizeof(arena_ind)), 0, + "Unexpected mallctl() failure"); + decay_ticker = decay_ticker_get(tsd_fetch(), arena_ind); assert_ptr_not_null(decay_ticker, "Unexpected failure getting decay ticker"); - sz = sizeof(size_t); - assert_d_eq(mallctl("arenas.hchunk.0.size", (void *)&huge0, &sz, NULL, - 0), 0, "Unexpected mallctl failure"); - assert_d_eq(mallctl("arenas.lrun.0.size", (void *)&large0, &sz, NULL, - 0), 0, "Unexpected mallctl failure"); - /* - * Test the standard APIs using a huge size class, since we can't - * control tcache interactions (except by completely disabling tcache - * for the entire test program). + * Test the standard APIs using a large size class, since we can't + * control tcache interactions for small size classes (except by + * completely disabling tcache for the entire test program). */ /* malloc(). */ tick0 = ticker_read(decay_ticker); - p = malloc(huge0); + p = malloc(large0); assert_ptr_not_null(p, "Unexpected malloc() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()"); @@ -63,7 +223,7 @@ TEST_BEGIN(test_decay_ticks) /* calloc(). */ tick0 = ticker_read(decay_ticker); - p = calloc(1, huge0); + p = calloc(1, large0); assert_ptr_not_null(p, "Unexpected calloc() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()"); @@ -71,7 +231,7 @@ TEST_BEGIN(test_decay_ticks) /* posix_memalign(). */ tick0 = ticker_read(decay_ticker); - assert_d_eq(posix_memalign(&p, sizeof(size_t), huge0), 0, + assert_d_eq(posix_memalign(&p, sizeof(size_t), large0), 0, "Unexpected posix_memalign() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, @@ -80,7 +240,7 @@ TEST_BEGIN(test_decay_ticks) /* aligned_alloc(). */ tick0 = ticker_read(decay_ticker); - p = aligned_alloc(sizeof(size_t), huge0); + p = aligned_alloc(sizeof(size_t), large0); assert_ptr_not_null(p, "Unexpected aligned_alloc() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, @@ -90,13 +250,13 @@ TEST_BEGIN(test_decay_ticks) /* realloc(). */ /* Allocate. */ tick0 = ticker_read(decay_ticker); - p = realloc(NULL, huge0); + p = realloc(NULL, large0); assert_ptr_not_null(p, "Unexpected realloc() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); /* Reallocate. */ tick0 = ticker_read(decay_ticker); - p = realloc(p, huge0); + p = realloc(p, large0); assert_ptr_not_null(p, "Unexpected realloc() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); @@ -107,15 +267,14 @@ TEST_BEGIN(test_decay_ticks) assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); /* - * Test the *allocx() APIs using huge, large, and small size classes, - * with tcache explicitly disabled. + * Test the *allocx() APIs using large and small size classes, with + * tcache explicitly disabled. */ { unsigned i; - size_t allocx_sizes[3]; - allocx_sizes[0] = huge0; - allocx_sizes[1] = large0; - allocx_sizes[2] = 1; + size_t allocx_sizes[2]; + allocx_sizes[0] = large0; + allocx_sizes[1] = 1; for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) { sz = allocx_sizes[i]; @@ -166,86 +325,128 @@ TEST_BEGIN(test_decay_ticks) * Test tcache fill/flush interactions for large and small size classes, * using an explicit tcache. */ - if (config_tcache) { - unsigned tcache_ind, i; - size_t tcache_sizes[2]; - tcache_sizes[0] = large0; - tcache_sizes[1] = 1; - - sz = sizeof(unsigned); - assert_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz, - NULL, 0), 0, "Unexpected mallctl failure"); - - for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) { - sz = tcache_sizes[i]; - - /* tcache fill. */ - tick0 = ticker_read(decay_ticker); - p = mallocx(sz, MALLOCX_TCACHE(tcache_ind)); - assert_ptr_not_null(p, "Unexpected mallocx() failure"); - tick1 = ticker_read(decay_ticker); + unsigned tcache_ind, i; + size_t tcache_sizes[2]; + tcache_sizes[0] = large0; + tcache_sizes[1] = 1; + + size_t tcache_max, sz_tcache_max; + sz_tcache_max = sizeof(tcache_max); + assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max, + &sz_tcache_max, NULL, 0), 0, "Unexpected mallctl() failure"); + + sz = sizeof(unsigned); + assert_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz, + NULL, 0), 0, "Unexpected mallctl failure"); + + for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) { + sz = tcache_sizes[i]; + + /* tcache fill. */ + tick0 = ticker_read(decay_ticker); + p = mallocx(sz, MALLOCX_TCACHE(tcache_ind)); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, + "Expected ticker to tick during tcache fill " + "(sz=%zu)", sz); + /* tcache flush. */ + dallocx(p, MALLOCX_TCACHE(tcache_ind)); + tick0 = ticker_read(decay_ticker); + assert_d_eq(mallctl("tcache.flush", NULL, NULL, + (void *)&tcache_ind, sizeof(unsigned)), 0, + "Unexpected mallctl failure"); + tick1 = ticker_read(decay_ticker); + + /* Will only tick if it's in tcache. */ + if (sz <= tcache_max) { assert_u32_ne(tick1, tick0, - "Expected ticker to tick during tcache fill " - "(sz=%zu)", sz); - /* tcache flush. */ - dallocx(p, MALLOCX_TCACHE(tcache_ind)); - tick0 = ticker_read(decay_ticker); - assert_d_eq(mallctl("tcache.flush", NULL, NULL, - (void *)&tcache_ind, sizeof(unsigned)), 0, - "Unexpected mallctl failure"); - tick1 = ticker_read(decay_ticker); - assert_u32_ne(tick1, tick0, - "Expected ticker to tick during tcache flush " - "(sz=%zu)", sz); + "Expected ticker to tick during tcache " + "flush (sz=%zu)", sz); + } else { + assert_u32_eq(tick1, tick0, + "Unexpected ticker tick during tcache " + "flush (sz=%zu)", sz); } } } TEST_END -TEST_BEGIN(test_decay_ticker) -{ -#define NPS 1024 - int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE); - void *ps[NPS]; - uint64_t epoch; - uint64_t npurge0 = 0; - uint64_t npurge1 = 0; - size_t sz, large; - unsigned i, nupdates0; - nstime_t time, decay_time, deadline; +static void +decay_ticker_helper(unsigned arena_ind, int flags, bool dirty, ssize_t dt, + uint64_t dirty_npurge0, uint64_t muzzy_npurge0, bool terminate_asap) { +#define NINTERVALS 101 + nstime_t time, update_interval, decay_ms, deadline; + + nstime_init(&time, 0); + nstime_update(&time); - test_skip_if(opt_purge != purge_mode_decay); + nstime_init2(&decay_ms, dt, 0); + nstime_copy(&deadline, &time); + nstime_add(&deadline, &decay_ms); + + nstime_init2(&update_interval, dt, 0); + nstime_idivide(&update_interval, NINTERVALS); /* - * Allocate a bunch of large objects, pause the clock, deallocate the - * objects, restore the clock, then [md]allocx() in a tight loop to - * verify the ticker triggers purging. + * Keep q's slab from being deallocated during the looping below. If a + * cached slab were to repeatedly come and go during looping, it could + * prevent the decay backlog ever becoming empty. */ + void *p = do_mallocx(1, flags); + uint64_t dirty_npurge1, muzzy_npurge1; + do { + for (unsigned i = 0; i < DECAY_NTICKS_PER_UPDATE / 2; + i++) { + void *q = do_mallocx(1, flags); + dallocx(q, flags); + } + dirty_npurge1 = get_arena_dirty_npurge(arena_ind); + muzzy_npurge1 = get_arena_muzzy_npurge(arena_ind); - if (config_tcache) { - size_t tcache_max; - - sz = sizeof(size_t); - assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max, - &sz, NULL, 0), 0, "Unexpected mallctl failure"); - large = nallocx(tcache_max + 1, flags); - } else { - sz = sizeof(size_t); - assert_d_eq(mallctl("arenas.lrun.0.size", (void *)&large, &sz, - NULL, 0), 0, "Unexpected mallctl failure"); + nstime_add(&time_mock, &update_interval); + nstime_update(&time); + } while (nstime_compare(&time, &deadline) <= 0 && ((dirty_npurge1 == + dirty_npurge0 && muzzy_npurge1 == muzzy_npurge0) || + !terminate_asap)); + dallocx(p, flags); + + if (config_stats) { + assert_u64_gt(dirty_npurge1 + muzzy_npurge1, dirty_npurge0 + + muzzy_npurge0, "Expected purging to occur"); } +#undef NINTERVALS +} - assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, - "Unexpected mallctl failure"); - assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, - sizeof(uint64_t)), 0, "Unexpected mallctl failure"); - sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge0, &sz, - NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result"); +TEST_BEGIN(test_decay_ticker) { + test_skip_if(check_background_thread_enabled()); +#define NPS 2048 + ssize_t ddt = opt_dirty_decay_ms; + ssize_t mdt = opt_muzzy_decay_ms; + unsigned arena_ind = do_arena_create(ddt, mdt); + int flags = (MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE); + void *ps[NPS]; + size_t large; - for (i = 0; i < NPS; i++) { - ps[i] = mallocx(large, flags); - assert_ptr_not_null(ps[i], "Unexpected mallocx() failure"); + /* + * Allocate a bunch of large objects, pause the clock, deallocate every + * other object (to fragment virtual memory), restore the clock, then + * [md]allocx() in a tight loop while advancing time rapidly to verify + * the ticker triggers purging. + */ + + size_t tcache_max; + size_t sz = sizeof(size_t); + assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max, &sz, NULL, + 0), 0, "Unexpected mallctl failure"); + large = nallocx(tcache_max + 1, flags); + + do_purge(arena_ind); + uint64_t dirty_npurge0 = get_arena_dirty_npurge(arena_ind); + uint64_t muzzy_npurge0 = get_arena_muzzy_npurge(arena_ind); + + for (unsigned i = 0; i < NPS; i++) { + ps[i] = do_mallocx(large, flags); } nupdates_mock = 0; @@ -258,69 +459,46 @@ TEST_BEGIN(test_decay_ticker) nstime_monotonic = nstime_monotonic_mock; nstime_update = nstime_update_mock; - for (i = 0; i < NPS; i++) { + for (unsigned i = 0; i < NPS; i += 2) { dallocx(ps[i], flags); - nupdates0 = nupdates_mock; - assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0, - "Unexpected arena.0.decay failure"); + unsigned nupdates0 = nupdates_mock; + do_decay(arena_ind); assert_u_gt(nupdates_mock, nupdates0, "Expected nstime_update() to be called"); } - nstime_monotonic = nstime_monotonic_orig; - nstime_update = nstime_update_orig; + decay_ticker_helper(arena_ind, flags, true, ddt, dirty_npurge0, + muzzy_npurge0, true); + decay_ticker_helper(arena_ind, flags, false, ddt+mdt, dirty_npurge0, + muzzy_npurge0, false); - nstime_init(&time, 0); - nstime_update(&time); - nstime_init2(&decay_time, opt_decay_time, 0); - nstime_copy(&deadline, &time); - nstime_add(&deadline, &decay_time); - do { - for (i = 0; i < DECAY_NTICKS_PER_UPDATE / 2; i++) { - void *p = mallocx(1, flags); - assert_ptr_not_null(p, "Unexpected mallocx() failure"); - dallocx(p, flags); - } - assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, - sizeof(uint64_t)), 0, "Unexpected mallctl failure"); - sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge1, - &sz, NULL, 0), config_stats ? 0 : ENOENT, - "Unexpected mallctl result"); + do_arena_destroy(arena_ind); - nstime_update(&time); - } while (nstime_compare(&time, &deadline) <= 0 && npurge1 == npurge0); - - if (config_stats) - assert_u64_gt(npurge1, npurge0, "Expected purging to occur"); + nstime_monotonic = nstime_monotonic_orig; + nstime_update = nstime_update_orig; #undef NPS } TEST_END -TEST_BEGIN(test_decay_nonmonotonic) -{ -#define NPS (SMOOTHSTEP_NSTEPS + 1) +TEST_BEGIN(test_decay_nonmonotonic) { + test_skip_if(check_background_thread_enabled()); +#define NPS (SMOOTHSTEP_NSTEPS + 1) int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE); void *ps[NPS]; - uint64_t epoch; uint64_t npurge0 = 0; uint64_t npurge1 = 0; size_t sz, large0; unsigned i, nupdates0; - test_skip_if(opt_purge != purge_mode_decay); - sz = sizeof(size_t); - assert_d_eq(mallctl("arenas.lrun.0.size", (void *)&large0, &sz, NULL, + assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL, 0), 0, "Unexpected mallctl failure"); assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl failure"); - assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, - sizeof(uint64_t)), 0, "Unexpected mallctl failure"); + do_epoch(); sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge0, &sz, - NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result"); + npurge0 = get_arena_npurge(0); nupdates_mock = 0; nstime_init(&time_mock, 0); @@ -346,14 +524,13 @@ TEST_BEGIN(test_decay_nonmonotonic) "Expected nstime_update() to be called"); } - assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, - sizeof(uint64_t)), 0, "Unexpected mallctl failure"); + do_epoch(); sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge1, &sz, - NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result"); + npurge1 = get_arena_npurge(0); - if (config_stats) + if (config_stats) { assert_u64_eq(npurge0, npurge1, "Unexpected purging occurred"); + } nstime_monotonic = nstime_monotonic_orig; nstime_update = nstime_update_orig; @@ -361,12 +538,62 @@ TEST_BEGIN(test_decay_nonmonotonic) } TEST_END -int -main(void) -{ +TEST_BEGIN(test_decay_now) { + test_skip_if(check_background_thread_enabled()); + + unsigned arena_ind = do_arena_create(0, 0); + assert_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages"); + assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages"); + size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2}; + /* Verify that dirty/muzzy pages never linger after deallocation. */ + for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) { + size_t size = sizes[i]; + generate_dirty(arena_ind, size); + assert_zu_eq(get_arena_pdirty(arena_ind), 0, + "Unexpected dirty pages"); + assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, + "Unexpected muzzy pages"); + } + do_arena_destroy(arena_ind); +} +TEST_END - return (test( +TEST_BEGIN(test_decay_never) { + test_skip_if(check_background_thread_enabled()); + + unsigned arena_ind = do_arena_create(-1, -1); + int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; + assert_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages"); + assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages"); + size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2}; + void *ptrs[sizeof(sizes)/sizeof(size_t)]; + for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) { + ptrs[i] = do_mallocx(sizes[i], flags); + } + /* Verify that each deallocation generates additional dirty pages. */ + size_t pdirty_prev = get_arena_pdirty(arena_ind); + size_t pmuzzy_prev = get_arena_pmuzzy(arena_ind); + assert_zu_eq(pdirty_prev, 0, "Unexpected dirty pages"); + assert_zu_eq(pmuzzy_prev, 0, "Unexpected muzzy pages"); + for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) { + dallocx(ptrs[i], flags); + size_t pdirty = get_arena_pdirty(arena_ind); + size_t pmuzzy = get_arena_pmuzzy(arena_ind); + assert_zu_gt(pdirty, pdirty_prev, + "Expected dirty pages to increase."); + assert_zu_eq(pmuzzy, 0, "Unexpected muzzy pages"); + pdirty_prev = pdirty; + } + do_arena_destroy(arena_ind); +} +TEST_END + +int +main(void) { + return test( test_decay_ticks, test_decay_ticker, - test_decay_nonmonotonic)); + test_decay_nonmonotonic, + test_decay_now, + test_decay_never); } diff --git a/test/unit/decay.sh b/test/unit/decay.sh index 7b8f470..45aeccf 100644 --- a/test/unit/decay.sh +++ b/test/unit/decay.sh @@ -1,3 +1,3 @@ #!/bin/sh -export MALLOC_CONF="purge:decay,decay_time:1" +export MALLOC_CONF="dirty_decay_ms:1000,muzzy_decay_ms:1000,lg_tcache_max:0" diff --git a/test/unit/extent_quantize.c b/test/unit/extent_quantize.c index d2eb6d7..0ca7a75 100644 --- a/test/unit/extent_quantize.c +++ b/test/unit/extent_quantize.c @@ -1,39 +1,81 @@ #include "test/jemalloc_test.h" -TEST_BEGIN(test_huge_extent_size) { - unsigned nhchunks, i; +TEST_BEGIN(test_small_extent_size) { + unsigned nbins, i; + size_t sz, extent_size; + size_t mib[4]; + size_t miblen = sizeof(mib) / sizeof(size_t); + + /* + * Iterate over all small size classes, get their extent sizes, and + * verify that the quantized size is the same as the extent size. + */ + + sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0, + "Unexpected mallctl failure"); + + assert_d_eq(mallctlnametomib("arenas.bin.0.slab_size", mib, &miblen), 0, + "Unexpected mallctlnametomib failure"); + for (i = 0; i < nbins; i++) { + mib[2] = i; + sz = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&extent_size, &sz, + NULL, 0), 0, "Unexpected mallctlbymib failure"); + assert_zu_eq(extent_size, + extent_size_quantize_floor(extent_size), + "Small extent quantization should be a no-op " + "(extent_size=%zu)", extent_size); + assert_zu_eq(extent_size, + extent_size_quantize_ceil(extent_size), + "Small extent quantization should be a no-op " + "(extent_size=%zu)", extent_size); + } +} +TEST_END + +TEST_BEGIN(test_large_extent_size) { + bool cache_oblivious; + unsigned nlextents, i; size_t sz, extent_size_prev, ceil_prev; size_t mib[4]; size_t miblen = sizeof(mib) / sizeof(size_t); /* - * Iterate over all huge size classes, get their extent sizes, and + * Iterate over all large size classes, get their extent sizes, and * verify that the quantized size is the same as the extent size. */ + sz = sizeof(bool); + assert_d_eq(mallctl("config.cache_oblivious", (void *)&cache_oblivious, + &sz, NULL, 0), 0, "Unexpected mallctl failure"); + sz = sizeof(unsigned); - assert_d_eq(mallctl("arenas.nhchunks", (void *)&nhchunks, &sz, NULL, + assert_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL, 0), 0, "Unexpected mallctl failure"); - assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0, + assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0, "Unexpected mallctlnametomib failure"); - for (i = 0; i < nhchunks; i++) { - size_t extent_size, floor, ceil; - + for (i = 0; i < nlextents; i++) { + size_t lextent_size, extent_size, floor, ceil; mib[2] = i; sz = sizeof(size_t); - assert_d_eq(mallctlbymib(mib, miblen, (void *)&extent_size, + assert_d_eq(mallctlbymib(mib, miblen, (void *)&lextent_size, &sz, NULL, 0), 0, "Unexpected mallctlbymib failure"); + extent_size = cache_oblivious ? lextent_size + PAGE : + lextent_size; floor = extent_size_quantize_floor(extent_size); ceil = extent_size_quantize_ceil(extent_size); assert_zu_eq(extent_size, floor, "Extent quantization should be a no-op for precise size " - "(extent_size=%zu)", extent_size); + "(lextent_size=%zu, extent_size=%zu)", lextent_size, + extent_size); assert_zu_eq(extent_size, ceil, "Extent quantization should be a no-op for precise size " - "(extent_size=%zu)", extent_size); + "(lextent_size=%zu, extent_size=%zu)", lextent_size, + extent_size); if (i > 0) { assert_zu_eq(extent_size_prev, @@ -47,7 +89,7 @@ TEST_BEGIN(test_huge_extent_size) { ceil_prev, extent_size); } } - if (i + 1 < nhchunks) { + if (i + 1 < nlextents) { extent_size_prev = floor; ceil_prev = extent_size_quantize_ceil(extent_size + PAGE); @@ -93,6 +135,7 @@ TEST_END int main(void) { return test( - test_huge_extent_size, + test_small_extent_size, + test_large_extent_size, test_monotonic); } diff --git a/test/unit/fork.c b/test/unit/fork.c index c530797..afe2214 100644 --- a/test/unit/fork.c +++ b/test/unit/fork.c @@ -4,12 +4,24 @@ #include <sys/wait.h> #endif -TEST_BEGIN(test_fork) -{ +TEST_BEGIN(test_fork) { #ifndef _WIN32 void *p; pid_t pid; + /* Set up a manually managed arena for test. */ + unsigned arena_ind; + size_t sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); + + /* Migrate to the new arena. */ + unsigned old_arena_ind; + sz = sizeof(old_arena_ind); + assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, + (void *)&arena_ind, sizeof(arena_ind)), 0, + "Unexpected mallctl() failure"); + p = malloc(1); assert_ptr_not_null(p, "Unexpected malloc() failure"); @@ -32,8 +44,9 @@ TEST_BEGIN(test_fork) /* Parent. */ while (true) { - if (waitpid(pid, &status, 0) == -1) + if (waitpid(pid, &status, 0) == -1) { test_fail("Unexpected waitpid() failure"); + } if (WIFSIGNALED(status)) { test_fail("Unexpected child termination due to " "signal %d", WTERMSIG(status)); @@ -56,9 +69,7 @@ TEST_BEGIN(test_fork) TEST_END int -main(void) -{ - - return (test( - test_fork)); +main(void) { + return test( + test_fork); } diff --git a/test/unit/hash.c b/test/unit/hash.c index 010c9d7..7cc034f 100644 --- a/test/unit/hash.c +++ b/test/unit/hash.c @@ -28,6 +28,7 @@ */ #include "test/jemalloc_test.h" +#include "jemalloc/internal/hash.h" typedef enum { hash_variant_x86_32, @@ -36,33 +37,28 @@ typedef enum { } hash_variant_t; static int -hash_variant_bits(hash_variant_t variant) -{ - +hash_variant_bits(hash_variant_t variant) { switch (variant) { - case hash_variant_x86_32: return (32); - case hash_variant_x86_128: return (128); - case hash_variant_x64_128: return (128); + case hash_variant_x86_32: return 32; + case hash_variant_x86_128: return 128; + case hash_variant_x64_128: return 128; default: not_reached(); } } static const char * -hash_variant_string(hash_variant_t variant) -{ - +hash_variant_string(hash_variant_t variant) { switch (variant) { - case hash_variant_x86_32: return ("hash_x86_32"); - case hash_variant_x86_128: return ("hash_x86_128"); - case hash_variant_x64_128: return ("hash_x64_128"); + case hash_variant_x86_32: return "hash_x86_32"; + case hash_variant_x86_128: return "hash_x86_128"; + case hash_variant_x64_128: return "hash_x64_128"; default: not_reached(); } } -#define KEY_SIZE 256 +#define KEY_SIZE 256 static void -hash_variant_verify_key(hash_variant_t variant, uint8_t *key) -{ +hash_variant_verify_key(hash_variant_t variant, uint8_t *key) { const int hashbytes = hash_variant_bits(variant) / 8; const int hashes_size = hashbytes * 256; VARIABLE_ARRAY(uint8_t, hashes, hashes_size); @@ -141,45 +137,37 @@ hash_variant_verify_key(hash_variant_t variant, uint8_t *key) } static void -hash_variant_verify(hash_variant_t variant) -{ -#define MAX_ALIGN 16 +hash_variant_verify(hash_variant_t variant) { +#define MAX_ALIGN 16 uint8_t key[KEY_SIZE + (MAX_ALIGN - 1)]; unsigned i; - for (i = 0; i < MAX_ALIGN; i++) + for (i = 0; i < MAX_ALIGN; i++) { hash_variant_verify_key(variant, &key[i]); + } #undef MAX_ALIGN } #undef KEY_SIZE -TEST_BEGIN(test_hash_x86_32) -{ - +TEST_BEGIN(test_hash_x86_32) { hash_variant_verify(hash_variant_x86_32); } TEST_END -TEST_BEGIN(test_hash_x86_128) -{ - +TEST_BEGIN(test_hash_x86_128) { hash_variant_verify(hash_variant_x86_128); } TEST_END -TEST_BEGIN(test_hash_x64_128) -{ - +TEST_BEGIN(test_hash_x64_128) { hash_variant_verify(hash_variant_x64_128); } TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_hash_x86_32, test_hash_x86_128, - test_hash_x64_128)); + test_hash_x64_128); } diff --git a/test/unit/hooks.c b/test/unit/hooks.c new file mode 100644 index 0000000..b70172e --- /dev/null +++ b/test/unit/hooks.c @@ -0,0 +1,38 @@ +#include "test/jemalloc_test.h" + +static bool hook_called = false; + +static void +hook() { + hook_called = true; +} + +static int +func_to_hook(int arg1, int arg2) { + return arg1 + arg2; +} + +#define func_to_hook JEMALLOC_HOOK(func_to_hook, hooks_libc_hook) + +TEST_BEGIN(unhooked_call) { + hooks_libc_hook = NULL; + hook_called = false; + assert_d_eq(3, func_to_hook(1, 2), "Hooking changed return value."); + assert_false(hook_called, "Nulling out hook didn't take."); +} +TEST_END + +TEST_BEGIN(hooked_call) { + hooks_libc_hook = &hook; + hook_called = false; + assert_d_eq(3, func_to_hook(1, 2), "Hooking changed return value."); + assert_true(hook_called, "Hook should have executed."); +} +TEST_END + +int +main(void) { + return test( + unhooked_call, + hooked_call); +} diff --git a/test/unit/junk.c b/test/unit/junk.c index bbd83fb..fd0e65b 100644 --- a/test/unit/junk.c +++ b/test/unit/junk.c @@ -1,22 +1,21 @@ #include "test/jemalloc_test.h" +#include "jemalloc/internal/util.h" + static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig; -static arena_dalloc_junk_large_t *arena_dalloc_junk_large_orig; -static huge_dalloc_junk_t *huge_dalloc_junk_orig; +static large_dalloc_junk_t *large_dalloc_junk_orig; +static large_dalloc_maybe_junk_t *large_dalloc_maybe_junk_orig; static void *watch_for_junking; static bool saw_junking; static void -watch_junking(void *p) -{ - +watch_junking(void *p) { watch_for_junking = p; saw_junking = false; } static void -arena_dalloc_junk_small_intercept(void *ptr, arena_bin_info_t *bin_info) -{ +arena_dalloc_junk_small_intercept(void *ptr, const arena_bin_info_t *bin_info) { size_t i; arena_dalloc_junk_small_orig(ptr, bin_info); @@ -25,52 +24,46 @@ arena_dalloc_junk_small_intercept(void *ptr, arena_bin_info_t *bin_info) "Missing junk fill for byte %zu/%zu of deallocated region", i, bin_info->reg_size); } - if (ptr == watch_for_junking) + if (ptr == watch_for_junking) { saw_junking = true; + } } static void -arena_dalloc_junk_large_intercept(void *ptr, size_t usize) -{ +large_dalloc_junk_intercept(void *ptr, size_t usize) { size_t i; - arena_dalloc_junk_large_orig(ptr, usize); + large_dalloc_junk_orig(ptr, usize); for (i = 0; i < usize; i++) { assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK, "Missing junk fill for byte %zu/%zu of deallocated region", i, usize); } - if (ptr == watch_for_junking) + if (ptr == watch_for_junking) { saw_junking = true; + } } static void -huge_dalloc_junk_intercept(void *ptr, size_t usize) -{ - - huge_dalloc_junk_orig(ptr, usize); - /* - * The conditions under which junk filling actually occurs are nuanced - * enough that it doesn't make sense to duplicate the decision logic in - * test code, so don't actually check that the region is junk-filled. - */ - if (ptr == watch_for_junking) +large_dalloc_maybe_junk_intercept(void *ptr, size_t usize) { + large_dalloc_maybe_junk_orig(ptr, usize); + if (ptr == watch_for_junking) { saw_junking = true; + } } static void -test_junk(size_t sz_min, size_t sz_max) -{ +test_junk(size_t sz_min, size_t sz_max) { uint8_t *s; size_t sz_prev, sz, i; if (opt_junk_free) { arena_dalloc_junk_small_orig = arena_dalloc_junk_small; arena_dalloc_junk_small = arena_dalloc_junk_small_intercept; - arena_dalloc_junk_large_orig = arena_dalloc_junk_large; - arena_dalloc_junk_large = arena_dalloc_junk_large_intercept; - huge_dalloc_junk_orig = huge_dalloc_junk; - huge_dalloc_junk = huge_dalloc_junk_intercept; + large_dalloc_junk_orig = large_dalloc_junk; + large_dalloc_junk = large_dalloc_junk_intercept; + large_dalloc_maybe_junk_orig = large_dalloc_maybe_junk; + large_dalloc_maybe_junk = large_dalloc_maybe_junk_intercept; } sz_prev = 0; @@ -98,13 +91,21 @@ test_junk(size_t sz_min, size_t sz_max) } if (xallocx(s, sz+1, 0, 0) == sz) { + uint8_t *t; watch_junking(s); - s = (uint8_t *)rallocx(s, sz+1, 0); - assert_ptr_not_null((void *)s, + t = (uint8_t *)rallocx(s, sz+1, 0); + assert_ptr_not_null((void *)t, "Unexpected rallocx() failure"); - assert_true(!opt_junk_free || saw_junking, - "Expected region of size %zu to be junk-filled", - sz); + assert_zu_ge(sallocx(t, 0), sz+1, + "Unexpectedly small rallocx() result"); + if (!background_thread_enabled()) { + assert_ptr_ne(s, t, + "Unexpected in-place rallocx()"); + assert_true(!opt_junk_free || saw_junking, + "Expected region of size %zu to be " + "junk-filled", sz); + } + s = t; } } @@ -115,131 +116,26 @@ test_junk(size_t sz_min, size_t sz_max) if (opt_junk_free) { arena_dalloc_junk_small = arena_dalloc_junk_small_orig; - arena_dalloc_junk_large = arena_dalloc_junk_large_orig; - huge_dalloc_junk = huge_dalloc_junk_orig; + large_dalloc_junk = large_dalloc_junk_orig; + large_dalloc_maybe_junk = large_dalloc_maybe_junk_orig; } } -TEST_BEGIN(test_junk_small) -{ - +TEST_BEGIN(test_junk_small) { test_skip_if(!config_fill); test_junk(1, SMALL_MAXCLASS-1); } TEST_END -TEST_BEGIN(test_junk_large) -{ - - test_skip_if(!config_fill); - test_junk(SMALL_MAXCLASS+1, large_maxclass); -} -TEST_END - -TEST_BEGIN(test_junk_huge) -{ - +TEST_BEGIN(test_junk_large) { test_skip_if(!config_fill); - test_junk(large_maxclass+1, chunksize*2); -} -TEST_END - -arena_ralloc_junk_large_t *arena_ralloc_junk_large_orig; -static void *most_recently_trimmed; - -static size_t -shrink_size(size_t size) -{ - size_t shrink_size; - - for (shrink_size = size - 1; nallocx(shrink_size, 0) == size; - shrink_size--) - ; /* Do nothing. */ - - return (shrink_size); -} - -static void -arena_ralloc_junk_large_intercept(void *ptr, size_t old_usize, size_t usize) -{ - - arena_ralloc_junk_large_orig(ptr, old_usize, usize); - assert_zu_eq(old_usize, large_maxclass, "Unexpected old_usize"); - assert_zu_eq(usize, shrink_size(large_maxclass), "Unexpected usize"); - most_recently_trimmed = ptr; -} - -TEST_BEGIN(test_junk_large_ralloc_shrink) -{ - void *p1, *p2; - - p1 = mallocx(large_maxclass, 0); - assert_ptr_not_null(p1, "Unexpected mallocx() failure"); - - arena_ralloc_junk_large_orig = arena_ralloc_junk_large; - arena_ralloc_junk_large = arena_ralloc_junk_large_intercept; - - p2 = rallocx(p1, shrink_size(large_maxclass), 0); - assert_ptr_eq(p1, p2, "Unexpected move during shrink"); - - arena_ralloc_junk_large = arena_ralloc_junk_large_orig; - - assert_ptr_eq(most_recently_trimmed, p1, - "Expected trimmed portion of region to be junk-filled"); -} -TEST_END - -static bool detected_redzone_corruption; - -static void -arena_redzone_corruption_replacement(void *ptr, size_t usize, bool after, - size_t offset, uint8_t byte) -{ - - detected_redzone_corruption = true; -} - -TEST_BEGIN(test_junk_redzone) -{ - char *s; - arena_redzone_corruption_t *arena_redzone_corruption_orig; - - test_skip_if(!config_fill); - test_skip_if(!opt_junk_alloc || !opt_junk_free); - - arena_redzone_corruption_orig = arena_redzone_corruption; - arena_redzone_corruption = arena_redzone_corruption_replacement; - - /* Test underflow. */ - detected_redzone_corruption = false; - s = (char *)mallocx(1, 0); - assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); - s[-1] = 0xbb; - dallocx(s, 0); - assert_true(detected_redzone_corruption, - "Did not detect redzone corruption"); - - /* Test overflow. */ - detected_redzone_corruption = false; - s = (char *)mallocx(1, 0); - assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); - s[sallocx(s, 0)] = 0xbb; - dallocx(s, 0); - assert_true(detected_redzone_corruption, - "Did not detect redzone corruption"); - - arena_redzone_corruption = arena_redzone_corruption_orig; + test_junk(SMALL_MAXCLASS+1, (1U << (LG_LARGE_MINCLASS+1))); } TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_junk_small, - test_junk_large, - test_junk_huge, - test_junk_large_ralloc_shrink, - test_junk_redzone)); + test_junk_large); } diff --git a/test/unit/junk.sh b/test/unit/junk.sh index e19c313..97cd8ca 100644 --- a/test/unit/junk.sh +++ b/test/unit/junk.sh @@ -1,5 +1,5 @@ #!/bin/sh if [ "x${enable_fill}" = "x1" ] ; then - export MALLOC_CONF="abort:false,zero:false,redzone:true,quarantine:0,junk:true" + export MALLOC_CONF="abort:false,zero:false,junk:true" fi diff --git a/test/unit/junk_alloc.sh b/test/unit/junk_alloc.sh index 984387d..e1008c2 100644 --- a/test/unit/junk_alloc.sh +++ b/test/unit/junk_alloc.sh @@ -1,5 +1,5 @@ #!/bin/sh if [ "x${enable_fill}" = "x1" ] ; then - export MALLOC_CONF="abort:false,zero:false,redzone:true,quarantine:0,junk:alloc" + export MALLOC_CONF="abort:false,zero:false,junk:alloc" fi diff --git a/test/unit/junk_free.sh b/test/unit/junk_free.sh index a5c21a5..402196c 100644 --- a/test/unit/junk_free.sh +++ b/test/unit/junk_free.sh @@ -1,5 +1,5 @@ #!/bin/sh if [ "x${enable_fill}" = "x1" ] ; then - export MALLOC_CONF="abort:false,zero:false,redzone:true,quarantine:0,junk:free" + export MALLOC_CONF="abort:false,zero:false,junk:free" fi diff --git a/test/unit/lg_chunk.c b/test/unit/lg_chunk.c deleted file mode 100644 index d4f77b7..0000000 --- a/test/unit/lg_chunk.c +++ /dev/null @@ -1,19 +0,0 @@ -#include "test/jemalloc_test.h" - -TEST_BEGIN(test_lg_chunk_clamp) -{ - void *p; - - p = mallocx(1, 0); - assert_ptr_not_null(p, "Unexpected mallocx() failure"); - dallocx(p, 0); -} -TEST_END - -int -main(void) -{ - - return (test( - test_lg_chunk_clamp)); -} diff --git a/test/unit/lg_chunk.sh b/test/unit/lg_chunk.sh deleted file mode 100644 index 103eef1..0000000 --- a/test/unit/lg_chunk.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh - -# Make sure that opt.lg_chunk clamping is sufficient. In practice, this test -# program will fail a debug assertion during initialization and abort (rather -# than the test soft-failing) if clamping is insufficient. -export MALLOC_CONF="lg_chunk:0" diff --git a/test/unit/mallctl.c b/test/unit/mallctl.c index 3d1a740..f611654 100644 --- a/test/unit/mallctl.c +++ b/test/unit/mallctl.c @@ -1,7 +1,8 @@ #include "test/jemalloc_test.h" -TEST_BEGIN(test_mallctl_errors) -{ +#include "jemalloc/internal/util.h" + +TEST_BEGIN(test_mallctl_errors) { uint64_t epoch; size_t sz; @@ -28,8 +29,7 @@ TEST_BEGIN(test_mallctl_errors) } TEST_END -TEST_BEGIN(test_mallctlnametomib_errors) -{ +TEST_BEGIN(test_mallctlnametomib_errors) { size_t mib[1]; size_t miblen; @@ -39,8 +39,7 @@ TEST_BEGIN(test_mallctlnametomib_errors) } TEST_END -TEST_BEGIN(test_mallctlbymib_errors) -{ +TEST_BEGIN(test_mallctlbymib_errors) { uint64_t epoch; size_t sz; size_t mib[1]; @@ -76,8 +75,7 @@ TEST_BEGIN(test_mallctlbymib_errors) } TEST_END -TEST_BEGIN(test_mallctl_read_write) -{ +TEST_BEGIN(test_mallctl_read_write) { uint64_t old_epoch, new_epoch; size_t sz = sizeof(old_epoch); @@ -104,8 +102,7 @@ TEST_BEGIN(test_mallctl_read_write) } TEST_END -TEST_BEGIN(test_mallctlnametomib_short_mib) -{ +TEST_BEGIN(test_mallctlnametomib_short_mib) { size_t mib[4]; size_t miblen; @@ -119,10 +116,8 @@ TEST_BEGIN(test_mallctlnametomib_short_mib) } TEST_END -TEST_BEGIN(test_mallctl_config) -{ - -#define TEST_MALLCTL_CONFIG(config, t) do { \ +TEST_BEGIN(test_mallctl_config) { +#define TEST_MALLCTL_CONFIG(config, t) do { \ t oldval; \ size_t sz = sizeof(oldval); \ assert_d_eq(mallctl("config."#config, (void *)&oldval, &sz, \ @@ -136,27 +131,21 @@ TEST_BEGIN(test_mallctl_config) TEST_MALLCTL_CONFIG(fill, bool); TEST_MALLCTL_CONFIG(lazy_lock, bool); TEST_MALLCTL_CONFIG(malloc_conf, const char *); - TEST_MALLCTL_CONFIG(munmap, bool); TEST_MALLCTL_CONFIG(prof, bool); TEST_MALLCTL_CONFIG(prof_libgcc, bool); TEST_MALLCTL_CONFIG(prof_libunwind, bool); TEST_MALLCTL_CONFIG(stats, bool); - TEST_MALLCTL_CONFIG(tcache, bool); - TEST_MALLCTL_CONFIG(thp, bool); - TEST_MALLCTL_CONFIG(tls, bool); TEST_MALLCTL_CONFIG(utrace, bool); - TEST_MALLCTL_CONFIG(valgrind, bool); TEST_MALLCTL_CONFIG(xmalloc, bool); #undef TEST_MALLCTL_CONFIG } TEST_END -TEST_BEGIN(test_mallctl_opt) -{ +TEST_BEGIN(test_mallctl_opt) { bool config_always = true; -#define TEST_MALLCTL_OPT(t, opt, config) do { \ +#define TEST_MALLCTL_OPT(t, opt, config) do { \ t oldval; \ size_t sz = sizeof(oldval); \ int expected = config_##config ? 0 : ENOENT; \ @@ -168,22 +157,20 @@ TEST_BEGIN(test_mallctl_opt) } while (0) TEST_MALLCTL_OPT(bool, abort, always); - TEST_MALLCTL_OPT(size_t, lg_chunk, always); + TEST_MALLCTL_OPT(bool, retain, always); TEST_MALLCTL_OPT(const char *, dss, always); TEST_MALLCTL_OPT(unsigned, narenas, always); - TEST_MALLCTL_OPT(const char *, purge, always); - TEST_MALLCTL_OPT(ssize_t, lg_dirty_mult, always); - TEST_MALLCTL_OPT(ssize_t, decay_time, always); + TEST_MALLCTL_OPT(const char *, percpu_arena, always); + TEST_MALLCTL_OPT(bool, background_thread, always); + TEST_MALLCTL_OPT(ssize_t, dirty_decay_ms, always); + TEST_MALLCTL_OPT(ssize_t, muzzy_decay_ms, always); TEST_MALLCTL_OPT(bool, stats_print, always); TEST_MALLCTL_OPT(const char *, junk, fill); - TEST_MALLCTL_OPT(size_t, quarantine, fill); - TEST_MALLCTL_OPT(bool, redzone, fill); TEST_MALLCTL_OPT(bool, zero, fill); TEST_MALLCTL_OPT(bool, utrace, utrace); TEST_MALLCTL_OPT(bool, xmalloc, xmalloc); - TEST_MALLCTL_OPT(bool, tcache, tcache); - TEST_MALLCTL_OPT(size_t, lg_tcache_max, tcache); - TEST_MALLCTL_OPT(bool, thp, thp); + TEST_MALLCTL_OPT(bool, tcache, always); + TEST_MALLCTL_OPT(size_t, lg_tcache_max, always); TEST_MALLCTL_OPT(bool, prof, prof); TEST_MALLCTL_OPT(const char *, prof_prefix, prof); TEST_MALLCTL_OPT(bool, prof_active, prof); @@ -198,8 +185,7 @@ TEST_BEGIN(test_mallctl_opt) } TEST_END -TEST_BEGIN(test_manpage_example) -{ +TEST_BEGIN(test_manpage_example) { unsigned nbins, i; size_t mib[4]; size_t len, miblen; @@ -223,16 +209,13 @@ TEST_BEGIN(test_manpage_example) } TEST_END -TEST_BEGIN(test_tcache_none) -{ - void *p0, *q, *p1; - - test_skip_if(!config_tcache); +TEST_BEGIN(test_tcache_none) { + test_skip_if(!opt_tcache); /* Allocate p and q. */ - p0 = mallocx(42, 0); + void *p0 = mallocx(42, 0); assert_ptr_not_null(p0, "Unexpected mallocx() failure"); - q = mallocx(42, 0); + void *q = mallocx(42, 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); /* Deallocate p and q, but bypass the tcache for q. */ @@ -240,7 +223,7 @@ TEST_BEGIN(test_tcache_none) dallocx(q, MALLOCX_TCACHE_NONE); /* Make sure that tcache-based allocation returns p, not q. */ - p1 = mallocx(42, 0); + void *p1 = mallocx(42, 0); assert_ptr_not_null(p1, "Unexpected mallocx() failure"); assert_ptr_eq(p0, p1, "Expected tcache to allocate cached region"); @@ -249,17 +232,14 @@ TEST_BEGIN(test_tcache_none) } TEST_END -TEST_BEGIN(test_tcache) -{ -#define NTCACHES 10 +TEST_BEGIN(test_tcache) { +#define NTCACHES 10 unsigned tis[NTCACHES]; void *ps[NTCACHES]; void *qs[NTCACHES]; unsigned i; size_t sz, psz, qsz; - test_skip_if(!config_tcache); - psz = 42; qsz = nallocx(psz, 0) + 1; @@ -321,11 +301,13 @@ TEST_BEGIN(test_tcache) assert_ptr_eq(qs[i], q0, "Expected rallocx() to allocate cached region, i=%u", i); /* Avoid undefined behavior in case of test failure. */ - if (qs[i] == NULL) + if (qs[i] == NULL) { qs[i] = ps[i]; + } } - for (i = 0; i < NTCACHES; i++) + for (i = 0; i < NTCACHES; i++) { dallocx(qs[i], MALLOCX_TCACHE(tis[i])); + } /* Flush some non-empty tcaches. */ for (i = 0; i < NTCACHES/2; i++) { @@ -343,95 +325,145 @@ TEST_BEGIN(test_tcache) } TEST_END -TEST_BEGIN(test_thread_arena) -{ - unsigned arena_old, arena_new, narenas; - size_t sz = sizeof(unsigned); +TEST_BEGIN(test_thread_arena) { + unsigned old_arena_ind, new_arena_ind, narenas; + + const char *opa; + size_t sz = sizeof(opa); + assert_d_eq(mallctl("opt.percpu_arena", &opa, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect"); - arena_new = narenas - 1; - assert_d_eq(mallctl("thread.arena", (void *)&arena_old, &sz, - (void *)&arena_new, sizeof(unsigned)), 0, + + if (strcmp(opa, "disabled") == 0) { + new_arena_ind = narenas - 1; + assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, + (void *)&new_arena_ind, sizeof(unsigned)), 0, + "Unexpected mallctl() failure"); + new_arena_ind = 0; + assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, + (void *)&new_arena_ind, sizeof(unsigned)), 0, + "Unexpected mallctl() failure"); + } else { + assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, + NULL, 0), 0, "Unexpected mallctl() failure"); + new_arena_ind = percpu_arena_ind_limit(opt_percpu_arena) - 1; + if (old_arena_ind != new_arena_ind) { + assert_d_eq(mallctl("thread.arena", + (void *)&old_arena_ind, &sz, (void *)&new_arena_ind, + sizeof(unsigned)), EPERM, "thread.arena ctl " + "should not be allowed with percpu arena"); + } + } +} +TEST_END + +TEST_BEGIN(test_arena_i_initialized) { + unsigned narenas, i; + size_t sz; + size_t mib[3]; + size_t miblen = sizeof(mib) / sizeof(size_t); + bool initialized; + + sz = sizeof(narenas); + assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); + + assert_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + for (i = 0; i < narenas; i++) { + mib[1] = i; + sz = sizeof(initialized); + assert_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL, + 0), 0, "Unexpected mallctl() failure"); + } + + mib[1] = MALLCTL_ARENAS_ALL; + sz = sizeof(initialized); + assert_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); - arena_new = 0; - assert_d_eq(mallctl("thread.arena", (void *)&arena_old, &sz, - (void *)&arena_new, sizeof(unsigned)), 0, + assert_true(initialized, + "Merged arena statistics should always be initialized"); + + /* Equivalent to the above but using mallctl() directly. */ + sz = sizeof(initialized); + assert_d_eq(mallctl( + "arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".initialized", + (void *)&initialized, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); + assert_true(initialized, + "Merged arena statistics should always be initialized"); } TEST_END -TEST_BEGIN(test_arena_i_lg_dirty_mult) -{ - ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult; +TEST_BEGIN(test_arena_i_dirty_decay_ms) { + ssize_t dirty_decay_ms, orig_dirty_decay_ms, prev_dirty_decay_ms; size_t sz = sizeof(ssize_t); - test_skip_if(opt_purge != purge_mode_ratio); - - assert_d_eq(mallctl("arena.0.lg_dirty_mult", - (void *)&orig_lg_dirty_mult, &sz, NULL, 0), 0, + assert_d_eq(mallctl("arena.0.dirty_decay_ms", + (void *)&orig_dirty_decay_ms, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); - lg_dirty_mult = -2; - assert_d_eq(mallctl("arena.0.lg_dirty_mult", NULL, NULL, - (void *)&lg_dirty_mult, sizeof(ssize_t)), EFAULT, + dirty_decay_ms = -2; + assert_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL, + (void *)&dirty_decay_ms, sizeof(ssize_t)), EFAULT, "Unexpected mallctl() success"); - lg_dirty_mult = (sizeof(size_t) << 3); - assert_d_eq(mallctl("arena.0.lg_dirty_mult", NULL, NULL, - (void *)&lg_dirty_mult, sizeof(ssize_t)), EFAULT, - "Unexpected mallctl() success"); + dirty_decay_ms = 0x7fffffff; + assert_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL, + (void *)&dirty_decay_ms, sizeof(ssize_t)), 0, + "Unexpected mallctl() failure"); - for (prev_lg_dirty_mult = orig_lg_dirty_mult, lg_dirty_mult = -1; - lg_dirty_mult < (ssize_t)(sizeof(size_t) << 3); prev_lg_dirty_mult - = lg_dirty_mult, lg_dirty_mult++) { - ssize_t old_lg_dirty_mult; + for (prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms = -1; + dirty_decay_ms < 20; prev_dirty_decay_ms = dirty_decay_ms, + dirty_decay_ms++) { + ssize_t old_dirty_decay_ms; - assert_d_eq(mallctl("arena.0.lg_dirty_mult", - (void *)&old_lg_dirty_mult, &sz, (void *)&lg_dirty_mult, + assert_d_eq(mallctl("arena.0.dirty_decay_ms", + (void *)&old_dirty_decay_ms, &sz, (void *)&dirty_decay_ms, sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); - assert_zd_eq(old_lg_dirty_mult, prev_lg_dirty_mult, - "Unexpected old arena.0.lg_dirty_mult"); + assert_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms, + "Unexpected old arena.0.dirty_decay_ms"); } } TEST_END -TEST_BEGIN(test_arena_i_decay_time) -{ - ssize_t decay_time, orig_decay_time, prev_decay_time; +TEST_BEGIN(test_arena_i_muzzy_decay_ms) { + ssize_t muzzy_decay_ms, orig_muzzy_decay_ms, prev_muzzy_decay_ms; size_t sz = sizeof(ssize_t); - test_skip_if(opt_purge != purge_mode_decay); - - assert_d_eq(mallctl("arena.0.decay_time", (void *)&orig_decay_time, &sz, - NULL, 0), 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctl("arena.0.muzzy_decay_ms", + (void *)&orig_muzzy_decay_ms, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); - decay_time = -2; - assert_d_eq(mallctl("arena.0.decay_time", NULL, NULL, - (void *)&decay_time, sizeof(ssize_t)), EFAULT, + muzzy_decay_ms = -2; + assert_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL, + (void *)&muzzy_decay_ms, sizeof(ssize_t)), EFAULT, "Unexpected mallctl() success"); - decay_time = 0x7fffffff; - assert_d_eq(mallctl("arena.0.decay_time", NULL, NULL, - (void *)&decay_time, sizeof(ssize_t)), 0, + muzzy_decay_ms = 0x7fffffff; + assert_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL, + (void *)&muzzy_decay_ms, sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); - for (prev_decay_time = decay_time, decay_time = -1; - decay_time < 20; prev_decay_time = decay_time, decay_time++) { - ssize_t old_decay_time; + for (prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms = -1; + muzzy_decay_ms < 20; prev_muzzy_decay_ms = muzzy_decay_ms, + muzzy_decay_ms++) { + ssize_t old_muzzy_decay_ms; - assert_d_eq(mallctl("arena.0.decay_time", (void *)&old_decay_time, - &sz, (void *)&decay_time, sizeof(ssize_t)), 0, - "Unexpected mallctl() failure"); - assert_zd_eq(old_decay_time, prev_decay_time, - "Unexpected old arena.0.decay_time"); + assert_d_eq(mallctl("arena.0.muzzy_decay_ms", + (void *)&old_muzzy_decay_ms, &sz, (void *)&muzzy_decay_ms, + sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); + assert_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms, + "Unexpected old arena.0.muzzy_decay_ms"); } } TEST_END -TEST_BEGIN(test_arena_i_purge) -{ +TEST_BEGIN(test_arena_i_purge) { unsigned narenas; size_t sz = sizeof(unsigned); size_t mib[3]; @@ -447,11 +479,14 @@ TEST_BEGIN(test_arena_i_purge) mib[1] = narenas; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib() failure"); + + mib[1] = MALLCTL_ARENAS_ALL; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); } TEST_END -TEST_BEGIN(test_arena_i_decay) -{ +TEST_BEGIN(test_arena_i_decay) { unsigned narenas; size_t sz = sizeof(unsigned); size_t mib[3]; @@ -467,11 +502,14 @@ TEST_BEGIN(test_arena_i_decay) mib[1] = narenas; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib() failure"); + + mib[1] = MALLCTL_ARENAS_ALL; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); } TEST_END -TEST_BEGIN(test_arena_i_dss) -{ +TEST_BEGIN(test_arena_i_dss) { const char *dss_prec_old, *dss_prec_new; size_t sz = sizeof(dss_prec_old); size_t mib[3]; @@ -516,94 +554,72 @@ TEST_BEGIN(test_arena_i_dss) } TEST_END -TEST_BEGIN(test_arenas_initialized) -{ - unsigned narenas; - size_t sz = sizeof(narenas); - - assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0), - 0, "Unexpected mallctl() failure"); - { - VARIABLE_ARRAY(bool, initialized, narenas); - - sz = narenas * sizeof(bool); - assert_d_eq(mallctl("arenas.initialized", (void *)initialized, - &sz, NULL, 0), 0, "Unexpected mallctl() failure"); - } -} -TEST_END - -TEST_BEGIN(test_arenas_lg_dirty_mult) -{ - ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult; +TEST_BEGIN(test_arenas_dirty_decay_ms) { + ssize_t dirty_decay_ms, orig_dirty_decay_ms, prev_dirty_decay_ms; size_t sz = sizeof(ssize_t); - test_skip_if(opt_purge != purge_mode_ratio); - - assert_d_eq(mallctl("arenas.lg_dirty_mult", (void *)&orig_lg_dirty_mult, - &sz, NULL, 0), 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctl("arenas.dirty_decay_ms", + (void *)&orig_dirty_decay_ms, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); - lg_dirty_mult = -2; - assert_d_eq(mallctl("arenas.lg_dirty_mult", NULL, NULL, - (void *)&lg_dirty_mult, sizeof(ssize_t)), EFAULT, + dirty_decay_ms = -2; + assert_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL, + (void *)&dirty_decay_ms, sizeof(ssize_t)), EFAULT, "Unexpected mallctl() success"); - lg_dirty_mult = (sizeof(size_t) << 3); - assert_d_eq(mallctl("arenas.lg_dirty_mult", NULL, NULL, - (void *)&lg_dirty_mult, sizeof(ssize_t)), EFAULT, - "Unexpected mallctl() success"); + dirty_decay_ms = 0x7fffffff; + assert_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL, + (void *)&dirty_decay_ms, sizeof(ssize_t)), 0, + "Expected mallctl() failure"); - for (prev_lg_dirty_mult = orig_lg_dirty_mult, lg_dirty_mult = -1; - lg_dirty_mult < (ssize_t)(sizeof(size_t) << 3); prev_lg_dirty_mult = - lg_dirty_mult, lg_dirty_mult++) { - ssize_t old_lg_dirty_mult; + for (prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms = -1; + dirty_decay_ms < 20; prev_dirty_decay_ms = dirty_decay_ms, + dirty_decay_ms++) { + ssize_t old_dirty_decay_ms; - assert_d_eq(mallctl("arenas.lg_dirty_mult", - (void *)&old_lg_dirty_mult, &sz, (void *)&lg_dirty_mult, + assert_d_eq(mallctl("arenas.dirty_decay_ms", + (void *)&old_dirty_decay_ms, &sz, (void *)&dirty_decay_ms, sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); - assert_zd_eq(old_lg_dirty_mult, prev_lg_dirty_mult, - "Unexpected old arenas.lg_dirty_mult"); + assert_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms, + "Unexpected old arenas.dirty_decay_ms"); } } TEST_END -TEST_BEGIN(test_arenas_decay_time) -{ - ssize_t decay_time, orig_decay_time, prev_decay_time; +TEST_BEGIN(test_arenas_muzzy_decay_ms) { + ssize_t muzzy_decay_ms, orig_muzzy_decay_ms, prev_muzzy_decay_ms; size_t sz = sizeof(ssize_t); - test_skip_if(opt_purge != purge_mode_decay); - - assert_d_eq(mallctl("arenas.decay_time", (void *)&orig_decay_time, &sz, - NULL, 0), 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctl("arenas.muzzy_decay_ms", + (void *)&orig_muzzy_decay_ms, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); - decay_time = -2; - assert_d_eq(mallctl("arenas.decay_time", NULL, NULL, - (void *)&decay_time, sizeof(ssize_t)), EFAULT, + muzzy_decay_ms = -2; + assert_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL, + (void *)&muzzy_decay_ms, sizeof(ssize_t)), EFAULT, "Unexpected mallctl() success"); - decay_time = 0x7fffffff; - assert_d_eq(mallctl("arenas.decay_time", NULL, NULL, - (void *)&decay_time, sizeof(ssize_t)), 0, + muzzy_decay_ms = 0x7fffffff; + assert_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL, + (void *)&muzzy_decay_ms, sizeof(ssize_t)), 0, "Expected mallctl() failure"); - for (prev_decay_time = decay_time, decay_time = -1; - decay_time < 20; prev_decay_time = decay_time, decay_time++) { - ssize_t old_decay_time; + for (prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms = -1; + muzzy_decay_ms < 20; prev_muzzy_decay_ms = muzzy_decay_ms, + muzzy_decay_ms++) { + ssize_t old_muzzy_decay_ms; - assert_d_eq(mallctl("arenas.decay_time", - (void *)&old_decay_time, &sz, (void *)&decay_time, + assert_d_eq(mallctl("arenas.muzzy_decay_ms", + (void *)&old_muzzy_decay_ms, &sz, (void *)&muzzy_decay_ms, sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); - assert_zd_eq(old_decay_time, prev_decay_time, - "Unexpected old arenas.decay_time"); + assert_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms, + "Unexpected old arenas.muzzy_decay_ms"); } } TEST_END -TEST_BEGIN(test_arenas_constants) -{ - -#define TEST_ARENAS_CONSTANT(t, name, expected) do { \ +TEST_BEGIN(test_arenas_constants) { +#define TEST_ARENAS_CONSTANT(t, name, expected) do { \ t name; \ size_t sz = sizeof(t); \ assert_d_eq(mallctl("arenas."#name, (void *)&name, &sz, NULL, \ @@ -614,17 +630,14 @@ TEST_BEGIN(test_arenas_constants) TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM); TEST_ARENAS_CONSTANT(size_t, page, PAGE); TEST_ARENAS_CONSTANT(unsigned, nbins, NBINS); - TEST_ARENAS_CONSTANT(unsigned, nlruns, nlclasses); - TEST_ARENAS_CONSTANT(unsigned, nhchunks, nhclasses); + TEST_ARENAS_CONSTANT(unsigned, nlextents, NSIZES - NBINS); #undef TEST_ARENAS_CONSTANT } TEST_END -TEST_BEGIN(test_arenas_bin_constants) -{ - -#define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \ +TEST_BEGIN(test_arenas_bin_constants) { +#define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \ t name; \ size_t sz = sizeof(t); \ assert_d_eq(mallctl("arenas.bin.0."#name, (void *)&name, &sz, \ @@ -634,54 +647,35 @@ TEST_BEGIN(test_arenas_bin_constants) TEST_ARENAS_BIN_CONSTANT(size_t, size, arena_bin_info[0].reg_size); TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, arena_bin_info[0].nregs); - TEST_ARENAS_BIN_CONSTANT(size_t, run_size, arena_bin_info[0].run_size); + TEST_ARENAS_BIN_CONSTANT(size_t, slab_size, + arena_bin_info[0].slab_size); #undef TEST_ARENAS_BIN_CONSTANT } TEST_END -TEST_BEGIN(test_arenas_lrun_constants) -{ - -#define TEST_ARENAS_LRUN_CONSTANT(t, name, expected) do { \ +TEST_BEGIN(test_arenas_lextent_constants) { +#define TEST_ARENAS_LEXTENT_CONSTANT(t, name, expected) do { \ t name; \ size_t sz = sizeof(t); \ - assert_d_eq(mallctl("arenas.lrun.0."#name, (void *)&name, &sz, \ - NULL, 0), 0, "Unexpected mallctl() failure"); \ - assert_zu_eq(name, expected, "Incorrect "#name" size"); \ -} while (0) - - TEST_ARENAS_LRUN_CONSTANT(size_t, size, LARGE_MINCLASS); - -#undef TEST_ARENAS_LRUN_CONSTANT -} -TEST_END - -TEST_BEGIN(test_arenas_hchunk_constants) -{ - -#define TEST_ARENAS_HCHUNK_CONSTANT(t, name, expected) do { \ - t name; \ - size_t sz = sizeof(t); \ - assert_d_eq(mallctl("arenas.hchunk.0."#name, (void *)&name, \ + assert_d_eq(mallctl("arenas.lextent.0."#name, (void *)&name, \ &sz, NULL, 0), 0, "Unexpected mallctl() failure"); \ assert_zu_eq(name, expected, "Incorrect "#name" size"); \ } while (0) - TEST_ARENAS_HCHUNK_CONSTANT(size_t, size, chunksize); + TEST_ARENAS_LEXTENT_CONSTANT(size_t, size, LARGE_MINCLASS); -#undef TEST_ARENAS_HCHUNK_CONSTANT +#undef TEST_ARENAS_LEXTENT_CONSTANT } TEST_END -TEST_BEGIN(test_arenas_extend) -{ +TEST_BEGIN(test_arenas_create) { unsigned narenas_before, arena, narenas_after; size_t sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_before, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); - assert_d_eq(mallctl("arenas.extend", (void *)&arena, &sz, NULL, 0), 0, + assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_after, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); @@ -692,10 +686,8 @@ TEST_BEGIN(test_arenas_extend) } TEST_END -TEST_BEGIN(test_stats_arenas) -{ - -#define TEST_STATS_ARENAS(t, name) do { \ +TEST_BEGIN(test_stats_arenas) { +#define TEST_STATS_ARENAS(t, name) do { \ t name; \ size_t sz = sizeof(t); \ assert_d_eq(mallctl("stats.arenas.0."#name, (void *)&name, &sz, \ @@ -704,8 +696,8 @@ TEST_BEGIN(test_stats_arenas) TEST_STATS_ARENAS(unsigned, nthreads); TEST_STATS_ARENAS(const char *, dss); - TEST_STATS_ARENAS(ssize_t, lg_dirty_mult); - TEST_STATS_ARENAS(ssize_t, decay_time); + TEST_STATS_ARENAS(ssize_t, dirty_decay_ms); + TEST_STATS_ARENAS(ssize_t, muzzy_decay_ms); TEST_STATS_ARENAS(size_t, pactive); TEST_STATS_ARENAS(size_t, pdirty); @@ -714,10 +706,8 @@ TEST_BEGIN(test_stats_arenas) TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_mallctl_errors, test_mallctlnametomib_errors, test_mallctlbymib_errors, @@ -729,18 +719,17 @@ main(void) test_tcache_none, test_tcache, test_thread_arena, - test_arena_i_lg_dirty_mult, - test_arena_i_decay_time, + test_arena_i_initialized, + test_arena_i_dirty_decay_ms, + test_arena_i_muzzy_decay_ms, test_arena_i_purge, test_arena_i_decay, test_arena_i_dss, - test_arenas_initialized, - test_arenas_lg_dirty_mult, - test_arenas_decay_time, + test_arenas_dirty_decay_ms, + test_arenas_muzzy_decay_ms, test_arenas_constants, test_arenas_bin_constants, - test_arenas_lrun_constants, - test_arenas_hchunk_constants, - test_arenas_extend, - test_stats_arenas)); + test_arenas_lextent_constants, + test_arenas_create, + test_stats_arenas); } diff --git a/test/unit/util.c b/test/unit/malloc_io.c index b1f9abd..79ba7fc 100644 --- a/test/unit/util.c +++ b/test/unit/malloc_io.c @@ -1,59 +1,6 @@ #include "test/jemalloc_test.h" -#define TEST_POW2_CEIL(t, suf, pri) do { \ - unsigned i, pow2; \ - t x; \ - \ - assert_##suf##_eq(pow2_ceil_##suf(0), 0, "Unexpected result"); \ - \ - for (i = 0; i < sizeof(t) * 8; i++) { \ - assert_##suf##_eq(pow2_ceil_##suf(((t)1) << i), ((t)1) \ - << i, "Unexpected result"); \ - } \ - \ - for (i = 2; i < sizeof(t) * 8; i++) { \ - assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) - 1), \ - ((t)1) << i, "Unexpected result"); \ - } \ - \ - for (i = 0; i < sizeof(t) * 8 - 1; i++) { \ - assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) + 1), \ - ((t)1) << (i+1), "Unexpected result"); \ - } \ - \ - for (pow2 = 1; pow2 < 25; pow2++) { \ - for (x = (((t)1) << (pow2-1)) + 1; x <= ((t)1) << pow2; \ - x++) { \ - assert_##suf##_eq(pow2_ceil_##suf(x), \ - ((t)1) << pow2, \ - "Unexpected result, x=%"pri, x); \ - } \ - } \ -} while (0) - -TEST_BEGIN(test_pow2_ceil_u64) -{ - - TEST_POW2_CEIL(uint64_t, u64, FMTu64); -} -TEST_END - -TEST_BEGIN(test_pow2_ceil_u32) -{ - - TEST_POW2_CEIL(uint32_t, u32, FMTu32); -} -TEST_END - -TEST_BEGIN(test_pow2_ceil_zu) -{ - - TEST_POW2_CEIL(size_t, zu, "zu"); -} -TEST_END - -TEST_BEGIN(test_malloc_strtoumax_no_endptr) -{ +TEST_BEGIN(test_malloc_strtoumax_no_endptr) { int err; set_errno(0); @@ -63,8 +10,7 @@ TEST_BEGIN(test_malloc_strtoumax_no_endptr) } TEST_END -TEST_BEGIN(test_malloc_strtoumax) -{ +TEST_BEGIN(test_malloc_strtoumax) { struct test_s { const char *input; const char *expected_remainder; @@ -73,9 +19,9 @@ TEST_BEGIN(test_malloc_strtoumax) const char *expected_errno_name; uintmax_t expected_x; }; -#define ERR(e) e, #e -#define KUMAX(x) ((uintmax_t)x##ULL) -#define KSMAX(x) ((uintmax_t)(intmax_t)x##LL) +#define ERR(e) e, #e +#define KUMAX(x) ((uintmax_t)x##ULL) +#define KSMAX(x) ((uintmax_t)(intmax_t)x##LL) struct test_s tests[] = { {"0", "0", -1, ERR(EINVAL), UINTMAX_MAX}, {"0", "0", 1, ERR(EINVAL), UINTMAX_MAX}, @@ -158,13 +104,12 @@ TEST_BEGIN(test_malloc_strtoumax) } TEST_END -TEST_BEGIN(test_malloc_snprintf_truncated) -{ -#define BUFLEN 15 +TEST_BEGIN(test_malloc_snprintf_truncated) { +#define BUFLEN 15 char buf[BUFLEN]; size_t result; size_t len; -#define TEST(expected_str_untruncated, ...) do { \ +#define TEST(expected_str_untruncated, ...) do { \ result = malloc_snprintf(buf, len, __VA_ARGS__); \ assert_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \ "Unexpected string inequality (\"%s\" vs \"%s\")", \ @@ -191,12 +136,11 @@ TEST_BEGIN(test_malloc_snprintf_truncated) } TEST_END -TEST_BEGIN(test_malloc_snprintf) -{ -#define BUFLEN 128 +TEST_BEGIN(test_malloc_snprintf) { +#define BUFLEN 128 char buf[BUFLEN]; size_t result; -#define TEST(expected_str, ...) do { \ +#define TEST(expected_str, ...) do { \ result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__); \ assert_str_eq(buf, expected_str, "Unexpected output"); \ assert_zu_eq(result, strlen(expected_str), "Unexpected result");\ @@ -305,15 +249,10 @@ TEST_BEGIN(test_malloc_snprintf) TEST_END int -main(void) -{ - - return (test( - test_pow2_ceil_u64, - test_pow2_ceil_u32, - test_pow2_ceil_zu, +main(void) { + return test( test_malloc_strtoumax_no_endptr, test_malloc_strtoumax, test_malloc_snprintf_truncated, - test_malloc_snprintf)); + test_malloc_snprintf); } diff --git a/test/unit/math.c b/test/unit/math.c index adb72be..09ef20c 100644 --- a/test/unit/math.c +++ b/test/unit/math.c @@ -1,7 +1,7 @@ #include "test/jemalloc_test.h" -#define MAX_REL_ERR 1.0e-9 -#define MAX_ABS_ERR 1.0e-9 +#define MAX_REL_ERR 1.0e-9 +#define MAX_ABS_ERR 1.0e-9 #include <float.h> @@ -10,34 +10,33 @@ #endif #ifndef INFINITY -#define INFINITY (DBL_MAX + DBL_MAX) +#define INFINITY (DBL_MAX + DBL_MAX) #endif static bool -double_eq_rel(double a, double b, double max_rel_err, double max_abs_err) -{ +double_eq_rel(double a, double b, double max_rel_err, double max_abs_err) { double rel_err; - if (fabs(a - b) < max_abs_err) - return (true); + if (fabs(a - b) < max_abs_err) { + return true; + } rel_err = (fabs(b) > fabs(a)) ? fabs((a-b)/b) : fabs((a-b)/a); return (rel_err < max_rel_err); } static uint64_t -factorial(unsigned x) -{ +factorial(unsigned x) { uint64_t ret = 1; unsigned i; - for (i = 2; i <= x; i++) + for (i = 2; i <= x; i++) { ret *= (uint64_t)i; + } - return (ret); + return ret; } -TEST_BEGIN(test_ln_gamma_factorial) -{ +TEST_BEGIN(test_ln_gamma_factorial) { unsigned x; /* exp(ln_gamma(x)) == (x-1)! for integer x. */ @@ -188,8 +187,7 @@ static const double ln_gamma_misc_expected[] = { 359.13420536957539753 }; -TEST_BEGIN(test_ln_gamma_misc) -{ +TEST_BEGIN(test_ln_gamma_misc) { unsigned i; for (i = 1; i < sizeof(ln_gamma_misc_expected)/sizeof(double); i++) { @@ -239,8 +237,7 @@ static const double pt_norm_expected[] = { 1.88079360815125041, 2.05374891063182208, 2.32634787404084076 }; -TEST_BEGIN(test_pt_norm) -{ +TEST_BEGIN(test_pt_norm) { unsigned i; for (i = 1; i < sizeof(pt_norm_expected)/sizeof(double); i++) { @@ -289,8 +286,7 @@ static const double pt_chi2_expected[] = { 1046.4872561869577, 1063.5717461999654, 1107.0741966053859 }; -TEST_BEGIN(test_pt_chi2) -{ +TEST_BEGIN(test_pt_chi2) { unsigned i, j; unsigned e = 0; @@ -351,8 +347,7 @@ static const double pt_gamma_expected[] = { 4.7230515633946677, 5.6417477865306020, 8.4059469148854635 }; -TEST_BEGIN(test_pt_gamma_shape) -{ +TEST_BEGIN(test_pt_gamma_shape) { unsigned i, j; unsigned e = 0; @@ -371,8 +366,7 @@ TEST_BEGIN(test_pt_gamma_shape) } TEST_END -TEST_BEGIN(test_pt_gamma_scale) -{ +TEST_BEGIN(test_pt_gamma_scale) { double shape = 1.0; double ln_gamma_shape = ln_gamma(shape); @@ -385,14 +379,12 @@ TEST_BEGIN(test_pt_gamma_scale) TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_ln_gamma_factorial, test_ln_gamma_misc, test_pt_norm, test_pt_chi2, test_pt_gamma_shape, - test_pt_gamma_scale)); + test_pt_gamma_scale); } diff --git a/test/unit/mq.c b/test/unit/mq.c index bde2a48..57a4d54 100644 --- a/test/unit/mq.c +++ b/test/unit/mq.c @@ -1,7 +1,7 @@ #include "test/jemalloc_test.h" -#define NSENDERS 3 -#define NMSGS 100000 +#define NSENDERS 3 +#define NMSGS 100000 typedef struct mq_msg_s mq_msg_t; struct mq_msg_s { @@ -9,8 +9,7 @@ struct mq_msg_s { }; mq_gen(static, mq_, mq_t, mq_msg_t, link) -TEST_BEGIN(test_mq_basic) -{ +TEST_BEGIN(test_mq_basic) { mq_t mq; mq_msg_t msg; @@ -31,8 +30,7 @@ TEST_BEGIN(test_mq_basic) TEST_END static void * -thd_receiver_start(void *arg) -{ +thd_receiver_start(void *arg) { mq_t *mq = (mq_t *)arg; unsigned i; @@ -41,12 +39,11 @@ thd_receiver_start(void *arg) assert_ptr_not_null(msg, "mq_get() should never return NULL"); dallocx(msg, 0); } - return (NULL); + return NULL; } static void * -thd_sender_start(void *arg) -{ +thd_sender_start(void *arg) { mq_t *mq = (mq_t *)arg; unsigned i; @@ -58,11 +55,10 @@ thd_sender_start(void *arg) msg = (mq_msg_t *)p; mq_put(mq, msg); } - return (NULL); + return NULL; } -TEST_BEGIN(test_mq_threaded) -{ +TEST_BEGIN(test_mq_threaded) { mq_t mq; thd_t receiver; thd_t senders[NSENDERS]; @@ -71,23 +67,23 @@ TEST_BEGIN(test_mq_threaded) assert_false(mq_init(&mq), "Unexpected mq_init() failure"); thd_create(&receiver, thd_receiver_start, (void *)&mq); - for (i = 0; i < NSENDERS; i++) + for (i = 0; i < NSENDERS; i++) { thd_create(&senders[i], thd_sender_start, (void *)&mq); + } thd_join(receiver, NULL); - for (i = 0; i < NSENDERS; i++) + for (i = 0; i < NSENDERS; i++) { thd_join(senders[i], NULL); + } mq_fini(&mq); } TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_mq_basic, - test_mq_threaded)); + test_mq_threaded); } diff --git a/test/unit/mtx.c b/test/unit/mtx.c index 96ff694..424587b 100644 --- a/test/unit/mtx.c +++ b/test/unit/mtx.c @@ -1,10 +1,9 @@ #include "test/jemalloc_test.h" -#define NTHREADS 2 -#define NINCRS 2000000 +#define NTHREADS 2 +#define NINCRS 2000000 -TEST_BEGIN(test_mtx_basic) -{ +TEST_BEGIN(test_mtx_basic) { mtx_t mtx; assert_false(mtx_init(&mtx), "Unexpected mtx_init() failure"); @@ -20,8 +19,7 @@ typedef struct { } thd_start_arg_t; static void * -thd_start(void *varg) -{ +thd_start(void *varg) { thd_start_arg_t *arg = (thd_start_arg_t *)varg; unsigned i; @@ -30,31 +28,30 @@ thd_start(void *varg) arg->x++; mtx_unlock(&arg->mtx); } - return (NULL); + return NULL; } -TEST_BEGIN(test_mtx_race) -{ +TEST_BEGIN(test_mtx_race) { thd_start_arg_t arg; thd_t thds[NTHREADS]; unsigned i; assert_false(mtx_init(&arg.mtx), "Unexpected mtx_init() failure"); arg.x = 0; - for (i = 0; i < NTHREADS; i++) + for (i = 0; i < NTHREADS; i++) { thd_create(&thds[i], thd_start, (void *)&arg); - for (i = 0; i < NTHREADS; i++) + } + for (i = 0; i < NTHREADS; i++) { thd_join(thds[i], NULL); + } assert_u_eq(arg.x, NTHREADS * NINCRS, "Race-related counter corruption"); } TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_mtx_basic, - test_mtx_race)); + test_mtx_race); } diff --git a/test/unit/nstime.c b/test/unit/nstime.c index 0368bc2..f313780 100644 --- a/test/unit/nstime.c +++ b/test/unit/nstime.c @@ -1,9 +1,8 @@ #include "test/jemalloc_test.h" -#define BILLION UINT64_C(1000000000) +#define BILLION UINT64_C(1000000000) -TEST_BEGIN(test_nstime_init) -{ +TEST_BEGIN(test_nstime_init) { nstime_t nst; nstime_init(&nst, 42000000043); @@ -13,8 +12,7 @@ TEST_BEGIN(test_nstime_init) } TEST_END -TEST_BEGIN(test_nstime_init2) -{ +TEST_BEGIN(test_nstime_init2) { nstime_t nst; nstime_init2(&nst, 42, 43); @@ -23,8 +21,7 @@ TEST_BEGIN(test_nstime_init2) } TEST_END -TEST_BEGIN(test_nstime_copy) -{ +TEST_BEGIN(test_nstime_copy) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, 43); @@ -35,8 +32,7 @@ TEST_BEGIN(test_nstime_copy) } TEST_END -TEST_BEGIN(test_nstime_compare) -{ +TEST_BEGIN(test_nstime_compare) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, 43); @@ -70,8 +66,7 @@ TEST_BEGIN(test_nstime_compare) } TEST_END -TEST_BEGIN(test_nstime_add) -{ +TEST_BEGIN(test_nstime_add) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, 43); @@ -90,8 +85,24 @@ TEST_BEGIN(test_nstime_add) } TEST_END -TEST_BEGIN(test_nstime_subtract) -{ +TEST_BEGIN(test_nstime_iadd) { + nstime_t nsta, nstb; + + nstime_init2(&nsta, 42, BILLION - 1); + nstime_iadd(&nsta, 1); + nstime_init2(&nstb, 43, 0); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect addition result"); + + nstime_init2(&nsta, 42, 1); + nstime_iadd(&nsta, BILLION + 1); + nstime_init2(&nstb, 43, 2); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect addition result"); +} +TEST_END + +TEST_BEGIN(test_nstime_subtract) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, 43); @@ -110,8 +121,24 @@ TEST_BEGIN(test_nstime_subtract) } TEST_END -TEST_BEGIN(test_nstime_imultiply) -{ +TEST_BEGIN(test_nstime_isubtract) { + nstime_t nsta, nstb; + + nstime_init2(&nsta, 42, 43); + nstime_isubtract(&nsta, 42*BILLION + 43); + nstime_init(&nstb, 0); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect subtraction result"); + + nstime_init2(&nsta, 42, 43); + nstime_isubtract(&nsta, 41*BILLION + 44); + nstime_init2(&nstb, 0, BILLION - 1); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect subtraction result"); +} +TEST_END + +TEST_BEGIN(test_nstime_imultiply) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, 43); @@ -128,8 +155,7 @@ TEST_BEGIN(test_nstime_imultiply) } TEST_END -TEST_BEGIN(test_nstime_idivide) -{ +TEST_BEGIN(test_nstime_idivide) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, 43); @@ -148,8 +174,7 @@ TEST_BEGIN(test_nstime_idivide) } TEST_END -TEST_BEGIN(test_nstime_divide) -{ +TEST_BEGIN(test_nstime_divide) { nstime_t nsta, nstb, nstc; nstime_init2(&nsta, 42, 43); @@ -176,15 +201,12 @@ TEST_BEGIN(test_nstime_divide) } TEST_END -TEST_BEGIN(test_nstime_monotonic) -{ - +TEST_BEGIN(test_nstime_monotonic) { nstime_monotonic(); } TEST_END -TEST_BEGIN(test_nstime_update) -{ +TEST_BEGIN(test_nstime_update) { nstime_t nst; nstime_init(&nst, 0); @@ -209,19 +231,19 @@ TEST_BEGIN(test_nstime_update) TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_nstime_init, test_nstime_init2, test_nstime_copy, test_nstime_compare, test_nstime_add, + test_nstime_iadd, test_nstime_subtract, + test_nstime_isubtract, test_nstime_imultiply, test_nstime_idivide, test_nstime_divide, test_nstime_monotonic, - test_nstime_update)); + test_nstime_update); } diff --git a/test/unit/pack.c b/test/unit/pack.c index 991faa6..edfc548 100644 --- a/test/unit/pack.c +++ b/test/unit/pack.c @@ -4,21 +4,20 @@ * Size class that is a divisor of the page size, ideally 4+ regions per run. */ #if LG_PAGE <= 14 -#define SZ (ZU(1) << (LG_PAGE - 2)) +#define SZ (ZU(1) << (LG_PAGE - 2)) #else -#define SZ 4096 +#define SZ ZU(4096) #endif /* - * Number of chunks to consume at high water mark. Should be at least 2 so that + * Number of slabs to consume at high water mark. Should be at least 2 so that * if mmap()ed memory grows downward, downward growth of mmap()ed memory is * tested. */ -#define NCHUNKS 8 +#define NSLABS 8 static unsigned -binind_compute(void) -{ +binind_compute(void) { size_t sz; unsigned nbins, i; @@ -38,17 +37,17 @@ binind_compute(void) sz = sizeof(size); assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &sz, NULL, 0), 0, "Unexpected mallctlbymib failure"); - if (size == SZ) - return (i); + if (size == SZ) { + return i; + } } test_fail("Unable to compute nregs_per_run"); - return (0); + return 0; } static size_t -nregs_per_run_compute(void) -{ +nregs_per_run_compute(void) { uint32_t nregs; size_t sz; unsigned binind = binind_compute(); @@ -61,57 +60,23 @@ nregs_per_run_compute(void) sz = sizeof(nregs); assert_d_eq(mallctlbymib(mib, miblen, (void *)&nregs, &sz, NULL, 0), 0, "Unexpected mallctlbymib failure"); - return (nregs); -} - -static size_t -npages_per_run_compute(void) -{ - size_t sz; - unsigned binind = binind_compute(); - size_t mib[4]; - size_t miblen = sizeof(mib)/sizeof(size_t); - size_t run_size; - - assert_d_eq(mallctlnametomib("arenas.bin.0.run_size", mib, &miblen), 0, - "Unexpected mallctlnametomb failure"); - mib[2] = (size_t)binind; - sz = sizeof(run_size); - assert_d_eq(mallctlbymib(mib, miblen, (void *)&run_size, &sz, NULL, - 0), 0, "Unexpected mallctlbymib failure"); - return (run_size >> LG_PAGE); -} - -static size_t -npages_per_chunk_compute(void) -{ - - return ((chunksize >> LG_PAGE) - map_bias); -} - -static size_t -nruns_per_chunk_compute(void) -{ - - return (npages_per_chunk_compute() / npages_per_run_compute()); + return nregs; } static unsigned -arenas_extend_mallctl(void) -{ +arenas_create_mallctl(void) { unsigned arena_ind; size_t sz; sz = sizeof(arena_ind); - assert_d_eq(mallctl("arenas.extend", (void *)&arena_ind, &sz, NULL, 0), - 0, "Error in arenas.extend"); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), + 0, "Error in arenas.create"); - return (arena_ind); + return arena_ind; } static void -arena_reset_mallctl(unsigned arena_ind) -{ +arena_reset_mallctl(unsigned arena_ind) { size_t mib[3]; size_t miblen = sizeof(mib)/sizeof(size_t); @@ -122,18 +87,15 @@ arena_reset_mallctl(unsigned arena_ind) "Unexpected mallctlbymib() failure"); } -TEST_BEGIN(test_pack) -{ - unsigned arena_ind = arenas_extend_mallctl(); +TEST_BEGIN(test_pack) { + unsigned arena_ind = arenas_create_mallctl(); size_t nregs_per_run = nregs_per_run_compute(); - size_t nruns_per_chunk = nruns_per_chunk_compute(); - size_t nruns = nruns_per_chunk * NCHUNKS; - size_t nregs = nregs_per_run * nruns; + size_t nregs = nregs_per_run * NSLABS; VARIABLE_ARRAY(void *, ptrs, nregs); size_t i, j, offset; /* Fill matrix. */ - for (i = offset = 0; i < nruns; i++) { + for (i = offset = 0; i < NSLABS; i++) { for (j = 0; j < nregs_per_run; j++) { void *p = mallocx(SZ, MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE); @@ -152,12 +114,13 @@ TEST_BEGIN(test_pack) */ offset = 0; for (i = offset = 0; - i < nruns; + i < NSLABS; i++, offset = (offset + 1) % nregs_per_run) { for (j = 0; j < nregs_per_run; j++) { void *p = ptrs[(i * nregs_per_run) + j]; - if (offset == j) + if (offset == j) { continue; + } dallocx(p, MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE); } @@ -169,13 +132,14 @@ TEST_BEGIN(test_pack) */ offset = 0; for (i = offset = 0; - i < nruns; + i < NSLABS; i++, offset = (offset + 1) % nregs_per_run) { for (j = 0; j < nregs_per_run; j++) { void *p; - if (offset == j) + if (offset == j) { continue; + } p = mallocx(SZ, MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE); assert_ptr_eq(p, ptrs[(i * nregs_per_run) + j], @@ -190,9 +154,7 @@ TEST_BEGIN(test_pack) TEST_END int -main(void) -{ - - return (test( - test_pack)); +main(void) { + return test( + test_pack); } diff --git a/test/unit/pack.sh b/test/unit/pack.sh index a58151d..6f45148 100644 --- a/test/unit/pack.sh +++ b/test/unit/pack.sh @@ -1,5 +1,4 @@ #!/bin/sh -# Use smallest possible chunk size. Immediately purge to minimize -# fragmentation. -export MALLOC_CONF="lg_chunk:0,lg_dirty_mult:-1,decay_time:-1" +# Immediately purge to minimize fragmentation. +export MALLOC_CONF="dirty_decay_ms:0,muzzy_decay_ms:0" diff --git a/test/unit/pages.c b/test/unit/pages.c index d31a35e..67dbb4c 100644 --- a/test/unit/pages.c +++ b/test/unit/pages.c @@ -1,27 +1,27 @@ #include "test/jemalloc_test.h" -TEST_BEGIN(test_pages_huge) -{ +TEST_BEGIN(test_pages_huge) { + size_t alloc_size; bool commit; - void *pages; + void *pages, *hugepage; + alloc_size = HUGEPAGE * 2 - PAGE; commit = true; - pages = pages_map(NULL, PAGE, &commit); + pages = pages_map(NULL, alloc_size, PAGE, &commit); assert_ptr_not_null(pages, "Unexpected pages_map() error"); - assert_false(pages_huge(pages, PAGE), + hugepage = (void *)(ALIGNMENT_CEILING((uintptr_t)pages, HUGEPAGE)); + assert_b_ne(pages_huge(hugepage, HUGEPAGE), config_thp, "Unexpected pages_huge() result"); - assert_false(pages_nohuge(pages, PAGE), + assert_false(pages_nohuge(hugepage, HUGEPAGE), "Unexpected pages_nohuge() result"); - pages_unmap(pages, PAGE); + pages_unmap(pages, alloc_size); } TEST_END int -main(void) -{ - - return (test( - test_pages_huge)); +main(void) { + return test( + test_pages_huge); } diff --git a/test/unit/ph.c b/test/unit/ph.c index da442f0..88bf56f 100644 --- a/test/unit/ph.c +++ b/test/unit/ph.c @@ -1,17 +1,18 @@ #include "test/jemalloc_test.h" +#include "jemalloc/internal/ph.h" + typedef struct node_s node_t; struct node_s { -#define NODE_MAGIC 0x9823af7e +#define NODE_MAGIC 0x9823af7e uint32_t magic; phn(node_t) link; uint64_t key; }; static int -node_cmp(const node_t *a, const node_t *b) -{ +node_cmp(const node_t *a, const node_t *b) { int ret; ret = (a->key > b->key) - (a->key < b->key); @@ -23,7 +24,7 @@ node_cmp(const node_t *a, const node_t *b) ret = (((uintptr_t)a) > ((uintptr_t)b)) - (((uintptr_t)a) < ((uintptr_t)b)); } - return (ret); + return ret; } static int @@ -32,25 +33,26 @@ node_cmp_magic(const node_t *a, const node_t *b) { assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic"); assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic"); - return (node_cmp(a, b)); + return node_cmp(a, b); } typedef ph(node_t) heap_t; ph_gen(static, heap_, heap_t, node_t, link, node_cmp_magic); static void -node_print(const node_t *node, unsigned depth) -{ +node_print(const node_t *node, unsigned depth) { unsigned i; node_t *leftmost_child, *sibling; - for (i = 0; i < depth; i++) + for (i = 0; i < depth; i++) { malloc_printf("\t"); + } malloc_printf("%2"FMTu64"\n", node->key); leftmost_child = phn_lchild_get(node_t, link, node); - if (leftmost_child == NULL) + if (leftmost_child == NULL) { return; + } node_print(leftmost_child, depth + 1); for (sibling = phn_next_get(node_t, link, leftmost_child); sibling != @@ -60,13 +62,13 @@ node_print(const node_t *node, unsigned depth) } static void -heap_print(const heap_t *heap) -{ +heap_print(const heap_t *heap) { node_t *auxelm; malloc_printf("vvv heap %p vvv\n", heap); - if (heap->ph_root == NULL) + if (heap->ph_root == NULL) { goto label_return; + } node_print(heap->ph_root, 0); @@ -83,8 +85,7 @@ label_return: } static unsigned -node_validate(const node_t *node, const node_t *parent) -{ +node_validate(const node_t *node, const node_t *parent) { unsigned nnodes = 1; node_t *leftmost_child, *sibling; @@ -94,8 +95,9 @@ node_validate(const node_t *node, const node_t *parent) } leftmost_child = phn_lchild_get(node_t, link, node); - if (leftmost_child == NULL) - return (nnodes); + if (leftmost_child == NULL) { + return nnodes; + } assert_ptr_eq((void *)phn_prev_get(node_t, link, leftmost_child), (void *)node, "Leftmost child does not link to node"); nnodes += node_validate(leftmost_child, node); @@ -107,17 +109,17 @@ node_validate(const node_t *node, const node_t *parent) "sibling's prev doesn't link to sibling"); nnodes += node_validate(sibling, node); } - return (nnodes); + return nnodes; } static unsigned -heap_validate(const heap_t *heap) -{ +heap_validate(const heap_t *heap) { unsigned nnodes = 0; node_t *auxelm; - if (heap->ph_root == NULL) + if (heap->ph_root == NULL) { goto label_return; + } nnodes += node_validate(heap->ph_root, NULL); @@ -130,43 +132,47 @@ heap_validate(const heap_t *heap) } label_return: - if (false) + if (false) { heap_print(heap); - return (nnodes); + } + return nnodes; } -TEST_BEGIN(test_ph_empty) -{ +TEST_BEGIN(test_ph_empty) { heap_t heap; heap_new(&heap); assert_true(heap_empty(&heap), "Heap should be empty"); assert_ptr_null(heap_first(&heap), "Unexpected node"); + assert_ptr_null(heap_any(&heap), "Unexpected node"); } TEST_END static void -node_remove(heap_t *heap, node_t *node) -{ - +node_remove(heap_t *heap, node_t *node) { heap_remove(heap, node); node->magic = 0; } static node_t * -node_remove_first(heap_t *heap) -{ +node_remove_first(heap_t *heap) { node_t *node = heap_remove_first(heap); node->magic = 0; - return (node); + return node; } -TEST_BEGIN(test_ph_random) -{ -#define NNODES 25 -#define NBAGS 250 -#define SEED 42 +static node_t * +node_remove_any(heap_t *heap) { + node_t *node = heap_remove_any(heap); + node->magic = 0; + return node; +} + +TEST_BEGIN(test_ph_random) { +#define NNODES 25 +#define NBAGS 250 +#define SEED 42 sfmt_t *sfmt; uint64_t bag[NNODES]; heap_t heap; @@ -178,17 +184,20 @@ TEST_BEGIN(test_ph_random) switch (i) { case 0: /* Insert in order. */ - for (j = 0; j < NNODES; j++) + for (j = 0; j < NNODES; j++) { bag[j] = j; + } break; case 1: /* Insert in reverse order. */ - for (j = 0; j < NNODES; j++) + for (j = 0; j < NNODES; j++) { bag[j] = NNODES - j - 1; + } break; default: - for (j = 0; j < NNODES; j++) + for (j = 0; j < NNODES; j++) { bag[j] = gen_rand64_range(sfmt, NNODES); + } } for (j = 1; j <= NNODES; j++) { @@ -205,6 +214,8 @@ TEST_BEGIN(test_ph_random) for (k = 0; k < j; k++) { heap_insert(&heap, &nodes[k]); if (i % 13 == 12) { + assert_ptr_not_null(heap_any(&heap), + "Heap should not be empty"); /* Trigger merging. */ assert_ptr_not_null(heap_first(&heap), "Heap should not be empty"); @@ -217,7 +228,7 @@ TEST_BEGIN(test_ph_random) "Heap should not be empty"); /* Remove nodes. */ - switch (i % 4) { + switch (i % 6) { case 0: for (k = 0; k < j; k++) { assert_u_eq(heap_validate(&heap), j - k, @@ -265,12 +276,31 @@ TEST_BEGIN(test_ph_random) prev = node; } break; + } case 4: { + for (k = 0; k < j; k++) { + node_remove_any(&heap); + assert_u_eq(heap_validate(&heap), j - k + - 1, "Incorrect node count"); + } + break; + } case 5: { + for (k = 0; k < j; k++) { + node_t *node = heap_any(&heap); + assert_u_eq(heap_validate(&heap), j - k, + "Incorrect node count"); + node_remove(&heap, node); + assert_u_eq(heap_validate(&heap), j - k + - 1, "Incorrect node count"); + } + break; } default: not_reached(); } assert_ptr_null(heap_first(&heap), "Heap should be empty"); + assert_ptr_null(heap_any(&heap), + "Heap should be empty"); assert_true(heap_empty(&heap), "Heap should be empty"); } } @@ -281,10 +311,8 @@ TEST_BEGIN(test_ph_random) TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_ph_empty, - test_ph_random)); + test_ph_random); } diff --git a/test/unit/prng.c b/test/unit/prng.c index 80c9d73..b5795c2 100644 --- a/test/unit/prng.c +++ b/test/unit/prng.c @@ -1,33 +1,33 @@ #include "test/jemalloc_test.h" static void -test_prng_lg_range_u32(bool atomic) -{ - uint32_t sa, sb, ra, rb; +test_prng_lg_range_u32(bool atomic) { + atomic_u32_t sa, sb; + uint32_t ra, rb; unsigned lg_range; - sa = 42; + atomic_store_u32(&sa, 42, ATOMIC_RELAXED); ra = prng_lg_range_u32(&sa, 32, atomic); - sa = 42; + atomic_store_u32(&sa, 42, ATOMIC_RELAXED); rb = prng_lg_range_u32(&sa, 32, atomic); assert_u32_eq(ra, rb, "Repeated generation should produce repeated results"); - sb = 42; + atomic_store_u32(&sb, 42, ATOMIC_RELAXED); rb = prng_lg_range_u32(&sb, 32, atomic); assert_u32_eq(ra, rb, "Equivalent generation should produce equivalent results"); - sa = 42; + atomic_store_u32(&sa, 42, ATOMIC_RELAXED); ra = prng_lg_range_u32(&sa, 32, atomic); rb = prng_lg_range_u32(&sa, 32, atomic); assert_u32_ne(ra, rb, "Full-width results must not immediately repeat"); - sa = 42; + atomic_store_u32(&sa, 42, ATOMIC_RELAXED); ra = prng_lg_range_u32(&sa, 32, atomic); for (lg_range = 31; lg_range > 0; lg_range--) { - sb = 42; + atomic_store_u32(&sb, 42, ATOMIC_RELAXED); rb = prng_lg_range_u32(&sb, lg_range, atomic); assert_u32_eq((rb & (UINT32_C(0xffffffff) << lg_range)), 0, "High order bits should be 0, lg_range=%u", lg_range); @@ -38,8 +38,7 @@ test_prng_lg_range_u32(bool atomic) } static void -test_prng_lg_range_u64(void) -{ +test_prng_lg_range_u64(void) { uint64_t sa, sb, ra, rb; unsigned lg_range; @@ -75,34 +74,34 @@ test_prng_lg_range_u64(void) } static void -test_prng_lg_range_zu(bool atomic) -{ - size_t sa, sb, ra, rb; +test_prng_lg_range_zu(bool atomic) { + atomic_zu_t sa, sb; + size_t ra, rb; unsigned lg_range; - sa = 42; + atomic_store_zu(&sa, 42, ATOMIC_RELAXED); ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); - sa = 42; + atomic_store_zu(&sa, 42, ATOMIC_RELAXED); rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); assert_zu_eq(ra, rb, "Repeated generation should produce repeated results"); - sb = 42; + atomic_store_zu(&sb, 42, ATOMIC_RELAXED); rb = prng_lg_range_zu(&sb, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); assert_zu_eq(ra, rb, "Equivalent generation should produce equivalent results"); - sa = 42; + atomic_store_zu(&sa, 42, ATOMIC_RELAXED); ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); assert_zu_ne(ra, rb, "Full-width results must not immediately repeat"); - sa = 42; + atomic_store_zu(&sa, 42, ATOMIC_RELAXED); ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); for (lg_range = (ZU(1) << (3 + LG_SIZEOF_PTR)) - 1; lg_range > 0; lg_range--) { - sb = 42; + atomic_store_zu(&sb, 42, ATOMIC_RELAXED); rb = prng_lg_range_zu(&sb, lg_range, atomic); assert_zu_eq((rb & (SIZE_T_MAX << lg_range)), 0, "High order bits should be 0, lg_range=%u", lg_range); @@ -112,54 +111,43 @@ test_prng_lg_range_zu(bool atomic) } } -TEST_BEGIN(test_prng_lg_range_u32_nonatomic) -{ - +TEST_BEGIN(test_prng_lg_range_u32_nonatomic) { test_prng_lg_range_u32(false); } TEST_END -TEST_BEGIN(test_prng_lg_range_u32_atomic) -{ - +TEST_BEGIN(test_prng_lg_range_u32_atomic) { test_prng_lg_range_u32(true); } TEST_END -TEST_BEGIN(test_prng_lg_range_u64_nonatomic) -{ - +TEST_BEGIN(test_prng_lg_range_u64_nonatomic) { test_prng_lg_range_u64(); } TEST_END -TEST_BEGIN(test_prng_lg_range_zu_nonatomic) -{ - +TEST_BEGIN(test_prng_lg_range_zu_nonatomic) { test_prng_lg_range_zu(false); } TEST_END -TEST_BEGIN(test_prng_lg_range_zu_atomic) -{ - +TEST_BEGIN(test_prng_lg_range_zu_atomic) { test_prng_lg_range_zu(true); } TEST_END static void -test_prng_range_u32(bool atomic) -{ +test_prng_range_u32(bool atomic) { uint32_t range; -#define MAX_RANGE 10000000 -#define RANGE_STEP 97 -#define NREPS 10 +#define MAX_RANGE 10000000 +#define RANGE_STEP 97 +#define NREPS 10 for (range = 2; range < MAX_RANGE; range += RANGE_STEP) { - uint32_t s; + atomic_u32_t s; unsigned rep; - s = range; + atomic_store_u32(&s, range, ATOMIC_RELAXED); for (rep = 0; rep < NREPS; rep++) { uint32_t r = prng_range_u32(&s, range, atomic); @@ -169,12 +157,11 @@ test_prng_range_u32(bool atomic) } static void -test_prng_range_u64(void) -{ +test_prng_range_u64(void) { uint64_t range; -#define MAX_RANGE 10000000 -#define RANGE_STEP 97 -#define NREPS 10 +#define MAX_RANGE 10000000 +#define RANGE_STEP 97 +#define NREPS 10 for (range = 2; range < MAX_RANGE; range += RANGE_STEP) { uint64_t s; @@ -190,18 +177,17 @@ test_prng_range_u64(void) } static void -test_prng_range_zu(bool atomic) -{ +test_prng_range_zu(bool atomic) { size_t range; -#define MAX_RANGE 10000000 -#define RANGE_STEP 97 -#define NREPS 10 +#define MAX_RANGE 10000000 +#define RANGE_STEP 97 +#define NREPS 10 for (range = 2; range < MAX_RANGE; range += RANGE_STEP) { - size_t s; + atomic_zu_t s; unsigned rep; - s = range; + atomic_store_zu(&s, range, ATOMIC_RELAXED); for (rep = 0; rep < NREPS; rep++) { size_t r = prng_range_zu(&s, range, atomic); @@ -210,46 +196,34 @@ test_prng_range_zu(bool atomic) } } -TEST_BEGIN(test_prng_range_u32_nonatomic) -{ - +TEST_BEGIN(test_prng_range_u32_nonatomic) { test_prng_range_u32(false); } TEST_END -TEST_BEGIN(test_prng_range_u32_atomic) -{ - +TEST_BEGIN(test_prng_range_u32_atomic) { test_prng_range_u32(true); } TEST_END -TEST_BEGIN(test_prng_range_u64_nonatomic) -{ - +TEST_BEGIN(test_prng_range_u64_nonatomic) { test_prng_range_u64(); } TEST_END -TEST_BEGIN(test_prng_range_zu_nonatomic) -{ - +TEST_BEGIN(test_prng_range_zu_nonatomic) { test_prng_range_zu(false); } TEST_END -TEST_BEGIN(test_prng_range_zu_atomic) -{ - +TEST_BEGIN(test_prng_range_zu_atomic) { test_prng_range_zu(true); } TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_prng_lg_range_u32_nonatomic, test_prng_lg_range_u32_atomic, test_prng_lg_range_u64_nonatomic, @@ -259,5 +233,5 @@ main(void) test_prng_range_u32_atomic, test_prng_range_u64_nonatomic, test_prng_range_zu_nonatomic, - test_prng_range_zu_atomic)); + test_prng_range_zu_atomic); } diff --git a/test/unit/prof_accum.c b/test/unit/prof_accum.c index 031f083..2522006 100644 --- a/test/unit/prof_accum.c +++ b/test/unit/prof_accum.c @@ -1,31 +1,27 @@ #include "test/jemalloc_test.h" -#define NTHREADS 4 -#define NALLOCS_PER_THREAD 50 -#define DUMP_INTERVAL 1 -#define BT_COUNT_CHECK_INTERVAL 5 +#define NTHREADS 4 +#define NALLOCS_PER_THREAD 50 +#define DUMP_INTERVAL 1 +#define BT_COUNT_CHECK_INTERVAL 5 static int -prof_dump_open_intercept(bool propagate_err, const char *filename) -{ +prof_dump_open_intercept(bool propagate_err, const char *filename) { int fd; fd = open("/dev/null", O_WRONLY); assert_d_ne(fd, -1, "Unexpected open() failure"); - return (fd); + return fd; } static void * -alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration) -{ - - return (btalloc(1, thd_ind*NALLOCS_PER_THREAD + iteration)); +alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration) { + return btalloc(1, thd_ind*NALLOCS_PER_THREAD + iteration); } static void * -thd_start(void *varg) -{ +thd_start(void *varg) { unsigned thd_ind = *(unsigned *)varg; size_t bt_count_prev, bt_count; unsigned i_prev, i; @@ -50,11 +46,10 @@ thd_start(void *varg) } } - return (NULL); + return NULL; } -TEST_BEGIN(test_idump) -{ +TEST_BEGIN(test_idump) { bool active; thd_t thds[NTHREADS]; unsigned thd_args[NTHREADS]; @@ -73,15 +68,14 @@ TEST_BEGIN(test_idump) thd_args[i] = i; thd_create(&thds[i], thd_start, (void *)&thd_args[i]); } - for (i = 0; i < NTHREADS; i++) + for (i = 0; i < NTHREADS; i++) { thd_join(thds[i], NULL); + } } TEST_END int -main(void) -{ - - return (test( - test_idump)); +main(void) { + return test_no_reentrancy( + test_idump); } diff --git a/test/unit/prof_active.c b/test/unit/prof_active.c index a906beb..850a24a 100644 --- a/test/unit/prof_active.c +++ b/test/unit/prof_active.c @@ -1,8 +1,7 @@ #include "test/jemalloc_test.h" static void -mallctl_bool_get(const char *name, bool expected, const char *func, int line) -{ +mallctl_bool_get(const char *name, bool expected, const char *func, int line) { bool old; size_t sz; @@ -15,8 +14,7 @@ mallctl_bool_get(const char *name, bool expected, const char *func, int line) static void mallctl_bool_set(const char *name, bool old_expected, bool val_new, - const char *func, int line) -{ + const char *func, int line) { bool old; size_t sz; @@ -31,50 +29,41 @@ mallctl_bool_set(const char *name, bool old_expected, bool val_new, static void mallctl_prof_active_get_impl(bool prof_active_old_expected, const char *func, - int line) -{ - + int line) { mallctl_bool_get("prof.active", prof_active_old_expected, func, line); } -#define mallctl_prof_active_get(a) \ +#define mallctl_prof_active_get(a) \ mallctl_prof_active_get_impl(a, __func__, __LINE__) static void mallctl_prof_active_set_impl(bool prof_active_old_expected, - bool prof_active_new, const char *func, int line) -{ - + bool prof_active_new, const char *func, int line) { mallctl_bool_set("prof.active", prof_active_old_expected, prof_active_new, func, line); } -#define mallctl_prof_active_set(a, b) \ +#define mallctl_prof_active_set(a, b) \ mallctl_prof_active_set_impl(a, b, __func__, __LINE__) static void mallctl_thread_prof_active_get_impl(bool thread_prof_active_old_expected, - const char *func, int line) -{ - + const char *func, int line) { mallctl_bool_get("thread.prof.active", thread_prof_active_old_expected, func, line); } -#define mallctl_thread_prof_active_get(a) \ +#define mallctl_thread_prof_active_get(a) \ mallctl_thread_prof_active_get_impl(a, __func__, __LINE__) static void mallctl_thread_prof_active_set_impl(bool thread_prof_active_old_expected, - bool thread_prof_active_new, const char *func, int line) -{ - + bool thread_prof_active_new, const char *func, int line) { mallctl_bool_set("thread.prof.active", thread_prof_active_old_expected, thread_prof_active_new, func, line); } -#define mallctl_thread_prof_active_set(a, b) \ +#define mallctl_thread_prof_active_set(a, b) \ mallctl_thread_prof_active_set_impl(a, b, __func__, __LINE__) static void -prof_sampling_probe_impl(bool expect_sample, const char *func, int line) -{ +prof_sampling_probe_impl(bool expect_sample, const char *func, int line) { void *p; size_t expected_backtraces = expect_sample ? 1 : 0; @@ -86,12 +75,10 @@ prof_sampling_probe_impl(bool expect_sample, const char *func, int line) "%s():%d: Unexpected backtrace count", func, line); dallocx(p, 0); } -#define prof_sampling_probe(a) \ +#define prof_sampling_probe(a) \ prof_sampling_probe_impl(a, __func__, __LINE__) -TEST_BEGIN(test_prof_active) -{ - +TEST_BEGIN(test_prof_active) { test_skip_if(!config_prof); mallctl_prof_active_get(true); @@ -124,9 +111,7 @@ TEST_BEGIN(test_prof_active) TEST_END int -main(void) -{ - - return (test( - test_prof_active)); +main(void) { + return test_no_reentrancy( + test_prof_active); } diff --git a/test/unit/prof_gdump.c b/test/unit/prof_gdump.c index b88a74c..fcb434c 100644 --- a/test/unit/prof_gdump.c +++ b/test/unit/prof_gdump.c @@ -3,8 +3,7 @@ static bool did_prof_dump_open; static int -prof_dump_open_intercept(bool propagate_err, const char *filename) -{ +prof_dump_open_intercept(bool propagate_err, const char *filename) { int fd; did_prof_dump_open = true; @@ -12,11 +11,10 @@ prof_dump_open_intercept(bool propagate_err, const char *filename) fd = open("/dev/null", O_WRONLY); assert_d_ne(fd, -1, "Unexpected open() failure"); - return (fd); + return fd; } -TEST_BEGIN(test_gdump) -{ +TEST_BEGIN(test_gdump) { bool active, gdump, gdump_old; void *p, *q, *r, *s; size_t sz; @@ -31,12 +29,12 @@ TEST_BEGIN(test_gdump) prof_dump_open = prof_dump_open_intercept; did_prof_dump_open = false; - p = mallocx(chunksize, 0); + p = mallocx((1U << LG_LARGE_MINCLASS), 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_true(did_prof_dump_open, "Expected a profile dump"); did_prof_dump_open = false; - q = mallocx(chunksize, 0); + q = mallocx((1U << LG_LARGE_MINCLASS), 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); assert_true(did_prof_dump_open, "Expected a profile dump"); @@ -47,7 +45,7 @@ TEST_BEGIN(test_gdump) "Unexpected mallctl failure while disabling prof.gdump"); assert(gdump_old); did_prof_dump_open = false; - r = mallocx(chunksize, 0); + r = mallocx((1U << LG_LARGE_MINCLASS), 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); assert_false(did_prof_dump_open, "Unexpected profile dump"); @@ -58,7 +56,7 @@ TEST_BEGIN(test_gdump) "Unexpected mallctl failure while enabling prof.gdump"); assert(!gdump_old); did_prof_dump_open = false; - s = mallocx(chunksize, 0); + s = mallocx((1U << LG_LARGE_MINCLASS), 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); assert_true(did_prof_dump_open, "Expected a profile dump"); @@ -70,9 +68,7 @@ TEST_BEGIN(test_gdump) TEST_END int -main(void) -{ - - return (test( - test_gdump)); +main(void) { + return test_no_reentrancy( + test_gdump); } diff --git a/test/unit/prof_idump.c b/test/unit/prof_idump.c index 87734a4..1cc6c98 100644 --- a/test/unit/prof_idump.c +++ b/test/unit/prof_idump.c @@ -3,8 +3,7 @@ static bool did_prof_dump_open; static int -prof_dump_open_intercept(bool propagate_err, const char *filename) -{ +prof_dump_open_intercept(bool propagate_err, const char *filename) { int fd; did_prof_dump_open = true; @@ -12,11 +11,10 @@ prof_dump_open_intercept(bool propagate_err, const char *filename) fd = open("/dev/null", O_WRONLY); assert_d_ne(fd, -1, "Unexpected open() failure"); - return (fd); + return fd; } -TEST_BEGIN(test_idump) -{ +TEST_BEGIN(test_idump) { bool active; void *p; @@ -38,9 +36,7 @@ TEST_BEGIN(test_idump) TEST_END int -main(void) -{ - - return (test( - test_idump)); +main(void) { + return test( + test_idump); } diff --git a/test/unit/prof_idump.sh b/test/unit/prof_idump.sh index 08a1b62..4dc599a 100644 --- a/test/unit/prof_idump.sh +++ b/test/unit/prof_idump.sh @@ -1,7 +1,8 @@ #!/bin/sh +export MALLOC_CONF="tcache:false" if [ "x${enable_prof}" = "x1" ] ; then - export MALLOC_CONF="prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0,lg_prof_interval:0" + export MALLOC_CONF="${MALLOC_CONF},prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0,lg_prof_interval:0" fi diff --git a/test/unit/prof_reset.c b/test/unit/prof_reset.c index 87b0d0c..7cce42d 100644 --- a/test/unit/prof_reset.c +++ b/test/unit/prof_reset.c @@ -1,39 +1,34 @@ #include "test/jemalloc_test.h" static int -prof_dump_open_intercept(bool propagate_err, const char *filename) -{ +prof_dump_open_intercept(bool propagate_err, const char *filename) { int fd; fd = open("/dev/null", O_WRONLY); assert_d_ne(fd, -1, "Unexpected open() failure"); - return (fd); + return fd; } static void -set_prof_active(bool active) -{ - +set_prof_active(bool active) { assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, sizeof(active)), 0, "Unexpected mallctl failure"); } static size_t -get_lg_prof_sample(void) -{ +get_lg_prof_sample(void) { size_t lg_prof_sample; size_t sz = sizeof(size_t); assert_d_eq(mallctl("prof.lg_sample", (void *)&lg_prof_sample, &sz, NULL, 0), 0, "Unexpected mallctl failure while reading profiling sample rate"); - return (lg_prof_sample); + return lg_prof_sample; } static void -do_prof_reset(size_t lg_prof_sample) -{ +do_prof_reset(size_t lg_prof_sample) { assert_d_eq(mallctl("prof.reset", NULL, NULL, (void *)&lg_prof_sample, sizeof(size_t)), 0, "Unexpected mallctl failure while resetting profile data"); @@ -41,8 +36,7 @@ do_prof_reset(size_t lg_prof_sample) "Expected profile sample rate change"); } -TEST_BEGIN(test_prof_reset_basic) -{ +TEST_BEGIN(test_prof_reset_basic) { size_t lg_prof_sample_orig, lg_prof_sample, lg_prof_sample_next; size_t sz; unsigned i; @@ -91,17 +85,14 @@ bool prof_dump_header_intercepted = false; prof_cnt_t cnt_all_copy = {0, 0, 0, 0}; static bool prof_dump_header_intercept(tsdn_t *tsdn, bool propagate_err, - const prof_cnt_t *cnt_all) -{ - + const prof_cnt_t *cnt_all) { prof_dump_header_intercepted = true; memcpy(&cnt_all_copy, cnt_all, sizeof(prof_cnt_t)); - return (false); + return false; } -TEST_BEGIN(test_prof_reset_cleanup) -{ +TEST_BEGIN(test_prof_reset_cleanup) { void *p; prof_dump_header_t *prof_dump_header_orig; @@ -139,14 +130,13 @@ TEST_BEGIN(test_prof_reset_cleanup) } TEST_END -#define NTHREADS 4 -#define NALLOCS_PER_THREAD (1U << 13) -#define OBJ_RING_BUF_COUNT 1531 -#define RESET_INTERVAL (1U << 10) -#define DUMP_INTERVAL 3677 +#define NTHREADS 4 +#define NALLOCS_PER_THREAD (1U << 13) +#define OBJ_RING_BUF_COUNT 1531 +#define RESET_INTERVAL (1U << 10) +#define DUMP_INTERVAL 3677 static void * -thd_start(void *varg) -{ +thd_start(void *varg) { unsigned thd_ind = *(unsigned *)varg; unsigned i; void *objs[OBJ_RING_BUF_COUNT]; @@ -186,11 +176,10 @@ thd_start(void *varg) } } - return (NULL); + return NULL; } -TEST_BEGIN(test_prof_reset) -{ +TEST_BEGIN(test_prof_reset) { size_t lg_prof_sample_orig; thd_t thds[NTHREADS]; unsigned thd_args[NTHREADS]; @@ -213,8 +202,9 @@ TEST_BEGIN(test_prof_reset) thd_args[i] = i; thd_create(&thds[i], thd_start, (void *)&thd_args[i]); } - for (i = 0; i < NTHREADS; i++) + for (i = 0; i < NTHREADS; i++) { thd_join(thds[i], NULL); + } assert_zu_eq(prof_bt_count(), bt_count, "Unexpected bactrace count change"); @@ -233,9 +223,8 @@ TEST_END #undef DUMP_INTERVAL /* Test sampling at the same allocation site across resets. */ -#define NITER 10 -TEST_BEGIN(test_xallocx) -{ +#define NITER 10 +TEST_BEGIN(test_xallocx) { size_t lg_prof_sample_orig; unsigned i; void *ptrs[NITER]; @@ -285,15 +274,13 @@ TEST_END #undef NITER int -main(void) -{ - +main(void) { /* Intercept dumping prior to running any tests. */ prof_dump_open = prof_dump_open_intercept; - return (test( + return test_no_reentrancy( test_prof_reset_basic, test_prof_reset_cleanup, test_prof_reset, - test_xallocx)); + test_xallocx); } diff --git a/test/unit/prof_tctx.c b/test/unit/prof_tctx.c new file mode 100644 index 0000000..ff3b2b0 --- /dev/null +++ b/test/unit/prof_tctx.c @@ -0,0 +1,46 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_prof_realloc) { + tsdn_t *tsdn; + int flags; + void *p, *q; + prof_tctx_t *tctx_p, *tctx_q; + uint64_t curobjs_0, curobjs_1, curobjs_2, curobjs_3; + + test_skip_if(!config_prof); + + tsdn = tsdn_fetch(); + flags = MALLOCX_TCACHE_NONE; + + prof_cnt_all(&curobjs_0, NULL, NULL, NULL); + p = mallocx(1024, flags); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + tctx_p = prof_tctx_get(tsdn, p, NULL); + assert_ptr_ne(tctx_p, (prof_tctx_t *)(uintptr_t)1U, + "Expected valid tctx"); + prof_cnt_all(&curobjs_1, NULL, NULL, NULL); + assert_u64_eq(curobjs_0 + 1, curobjs_1, + "Allocation should have increased sample size"); + + q = rallocx(p, 2048, flags); + assert_ptr_ne(p, q, "Expected move"); + assert_ptr_not_null(p, "Unexpected rmallocx() failure"); + tctx_q = prof_tctx_get(tsdn, q, NULL); + assert_ptr_ne(tctx_q, (prof_tctx_t *)(uintptr_t)1U, + "Expected valid tctx"); + prof_cnt_all(&curobjs_2, NULL, NULL, NULL); + assert_u64_eq(curobjs_1, curobjs_2, + "Reallocation should not have changed sample size"); + + dallocx(q, flags); + prof_cnt_all(&curobjs_3, NULL, NULL, NULL); + assert_u64_eq(curobjs_0, curobjs_3, + "Sample size should have returned to base level"); +} +TEST_END + +int +main(void) { + return test_no_reentrancy( + test_prof_realloc); +} diff --git a/test/unit/prof_thread_name.c b/test/unit/prof_thread_name.c index 3251853..c9c2a2b 100644 --- a/test/unit/prof_thread_name.c +++ b/test/unit/prof_thread_name.c @@ -2,8 +2,7 @@ static void mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func, - int line) -{ + int line) { const char *thread_name_old; size_t sz; @@ -15,25 +14,22 @@ mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func, assert_str_eq(thread_name_old, thread_name_expected, "%s():%d: Unexpected thread.prof.name value", func, line); } -#define mallctl_thread_name_get(a) \ +#define mallctl_thread_name_get(a) \ mallctl_thread_name_get_impl(a, __func__, __LINE__) static void mallctl_thread_name_set_impl(const char *thread_name, const char *func, - int line) -{ - + int line) { assert_d_eq(mallctl("thread.prof.name", NULL, NULL, (void *)&thread_name, sizeof(thread_name)), 0, "%s():%d: Unexpected mallctl failure reading thread.prof.name", func, line); mallctl_thread_name_get_impl(thread_name, func, line); } -#define mallctl_thread_name_set(a) \ +#define mallctl_thread_name_set(a) \ mallctl_thread_name_set_impl(a, __func__, __LINE__) -TEST_BEGIN(test_prof_thread_name_validation) -{ +TEST_BEGIN(test_prof_thread_name_validation) { const char *thread_name; test_skip_if(!config_prof); @@ -72,11 +68,10 @@ TEST_BEGIN(test_prof_thread_name_validation) } TEST_END -#define NTHREADS 4 -#define NRESET 25 +#define NTHREADS 4 +#define NRESET 25 static void * -thd_start(void *varg) -{ +thd_start(void *varg) { unsigned thd_ind = *(unsigned *)varg; char thread_name[16] = ""; unsigned i; @@ -95,11 +90,10 @@ thd_start(void *varg) mallctl_thread_name_set(thread_name); mallctl_thread_name_set(""); - return (NULL); + return NULL; } -TEST_BEGIN(test_prof_thread_name_threaded) -{ +TEST_BEGIN(test_prof_thread_name_threaded) { thd_t thds[NTHREADS]; unsigned thd_args[NTHREADS]; unsigned i; @@ -110,18 +104,17 @@ TEST_BEGIN(test_prof_thread_name_threaded) thd_args[i] = i; thd_create(&thds[i], thd_start, (void *)&thd_args[i]); } - for (i = 0; i < NTHREADS; i++) + for (i = 0; i < NTHREADS; i++) { thd_join(thds[i], NULL); + } } TEST_END #undef NTHREADS #undef NRESET int -main(void) -{ - - return (test( +main(void) { + return test( test_prof_thread_name_validation, - test_prof_thread_name_threaded)); + test_prof_thread_name_threaded); } diff --git a/test/unit/ql.c b/test/unit/ql.c index 05fad45..b76c24c 100644 --- a/test/unit/ql.c +++ b/test/unit/ql.c @@ -1,7 +1,9 @@ #include "test/jemalloc_test.h" +#include "jemalloc/internal/ql.h" + /* Number of ring entries, in [2..26]. */ -#define NENTRIES 9 +#define NENTRIES 9 typedef struct list_s list_t; typedef ql_head(list_t) list_head_t; @@ -12,8 +14,7 @@ struct list_s { }; static void -test_empty_list(list_head_t *head) -{ +test_empty_list(list_head_t *head) { list_t *t; unsigned i; @@ -34,8 +35,7 @@ test_empty_list(list_head_t *head) assert_u_eq(i, 0, "Unexpected element for empty list"); } -TEST_BEGIN(test_ql_empty) -{ +TEST_BEGIN(test_ql_empty) { list_head_t head; ql_new(&head); @@ -44,8 +44,7 @@ TEST_BEGIN(test_ql_empty) TEST_END static void -init_entries(list_t *entries, unsigned nentries) -{ +init_entries(list_t *entries, unsigned nentries) { unsigned i; for (i = 0; i < nentries; i++) { @@ -55,8 +54,7 @@ init_entries(list_t *entries, unsigned nentries) } static void -test_entries_list(list_head_t *head, list_t *entries, unsigned nentries) -{ +test_entries_list(list_head_t *head, list_t *entries, unsigned nentries) { list_t *t; unsigned i; @@ -91,31 +89,31 @@ test_entries_list(list_head_t *head, list_t *entries, unsigned nentries) } } -TEST_BEGIN(test_ql_tail_insert) -{ +TEST_BEGIN(test_ql_tail_insert) { list_head_t head; list_t entries[NENTRIES]; unsigned i; ql_new(&head); init_entries(entries, sizeof(entries)/sizeof(list_t)); - for (i = 0; i < NENTRIES; i++) + for (i = 0; i < NENTRIES; i++) { ql_tail_insert(&head, &entries[i], link); + } test_entries_list(&head, entries, NENTRIES); } TEST_END -TEST_BEGIN(test_ql_tail_remove) -{ +TEST_BEGIN(test_ql_tail_remove) { list_head_t head; list_t entries[NENTRIES]; unsigned i; ql_new(&head); init_entries(entries, sizeof(entries)/sizeof(list_t)); - for (i = 0; i < NENTRIES; i++) + for (i = 0; i < NENTRIES; i++) { ql_tail_insert(&head, &entries[i], link); + } for (i = 0; i < NENTRIES; i++) { test_entries_list(&head, entries, NENTRIES-i); @@ -125,31 +123,31 @@ TEST_BEGIN(test_ql_tail_remove) } TEST_END -TEST_BEGIN(test_ql_head_insert) -{ +TEST_BEGIN(test_ql_head_insert) { list_head_t head; list_t entries[NENTRIES]; unsigned i; ql_new(&head); init_entries(entries, sizeof(entries)/sizeof(list_t)); - for (i = 0; i < NENTRIES; i++) + for (i = 0; i < NENTRIES; i++) { ql_head_insert(&head, &entries[NENTRIES-i-1], link); + } test_entries_list(&head, entries, NENTRIES); } TEST_END -TEST_BEGIN(test_ql_head_remove) -{ +TEST_BEGIN(test_ql_head_remove) { list_head_t head; list_t entries[NENTRIES]; unsigned i; ql_new(&head); init_entries(entries, sizeof(entries)/sizeof(list_t)); - for (i = 0; i < NENTRIES; i++) + for (i = 0; i < NENTRIES; i++) { ql_head_insert(&head, &entries[NENTRIES-i-1], link); + } for (i = 0; i < NENTRIES; i++) { test_entries_list(&head, &entries[i], NENTRIES-i); @@ -159,8 +157,7 @@ TEST_BEGIN(test_ql_head_remove) } TEST_END -TEST_BEGIN(test_ql_insert) -{ +TEST_BEGIN(test_ql_insert) { list_head_t head; list_t entries[8]; list_t *a, *b, *c, *d, *e, *f, *g, *h; @@ -196,14 +193,12 @@ TEST_BEGIN(test_ql_insert) TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_ql_empty, test_ql_tail_insert, test_ql_tail_remove, test_ql_head_insert, test_ql_head_remove, - test_ql_insert)); + test_ql_insert); } diff --git a/test/unit/qr.c b/test/unit/qr.c index a2a2d90..271a109 100644 --- a/test/unit/qr.c +++ b/test/unit/qr.c @@ -1,9 +1,11 @@ #include "test/jemalloc_test.h" +#include "jemalloc/internal/qr.h" + /* Number of ring entries, in [2..26]. */ -#define NENTRIES 9 +#define NENTRIES 9 /* Split index, in [1..NENTRIES). */ -#define SPLIT_INDEX 5 +#define SPLIT_INDEX 5 typedef struct ring_s ring_t; @@ -13,8 +15,7 @@ struct ring_s { }; static void -init_entries(ring_t *entries) -{ +init_entries(ring_t *entries) { unsigned i; for (i = 0; i < NENTRIES; i++) { @@ -24,8 +25,7 @@ init_entries(ring_t *entries) } static void -test_independent_entries(ring_t *entries) -{ +test_independent_entries(ring_t *entries) { ring_t *t; unsigned i, j; @@ -61,8 +61,7 @@ test_independent_entries(ring_t *entries) } } -TEST_BEGIN(test_qr_one) -{ +TEST_BEGIN(test_qr_one) { ring_t entries[NENTRIES]; init_entries(entries); @@ -71,8 +70,7 @@ TEST_BEGIN(test_qr_one) TEST_END static void -test_entries_ring(ring_t *entries) -{ +test_entries_ring(ring_t *entries) { ring_t *t; unsigned i, j; @@ -104,27 +102,27 @@ test_entries_ring(ring_t *entries) } } -TEST_BEGIN(test_qr_after_insert) -{ +TEST_BEGIN(test_qr_after_insert) { ring_t entries[NENTRIES]; unsigned i; init_entries(entries); - for (i = 1; i < NENTRIES; i++) + for (i = 1; i < NENTRIES; i++) { qr_after_insert(&entries[i - 1], &entries[i], link); + } test_entries_ring(entries); } TEST_END -TEST_BEGIN(test_qr_remove) -{ +TEST_BEGIN(test_qr_remove) { ring_t entries[NENTRIES]; ring_t *t; unsigned i, j; init_entries(entries); - for (i = 1; i < NENTRIES; i++) + for (i = 1; i < NENTRIES; i++) { qr_after_insert(&entries[i - 1], &entries[i], link); + } for (i = 0; i < NENTRIES; i++) { j = 0; @@ -145,15 +143,15 @@ TEST_BEGIN(test_qr_remove) } TEST_END -TEST_BEGIN(test_qr_before_insert) -{ +TEST_BEGIN(test_qr_before_insert) { ring_t entries[NENTRIES]; ring_t *t; unsigned i, j; init_entries(entries); - for (i = 1; i < NENTRIES; i++) + for (i = 1; i < NENTRIES; i++) { qr_before_insert(&entries[i - 1], &entries[i], link); + } for (i = 0; i < NENTRIES; i++) { j = 0; qr_foreach(t, &entries[i], link) { @@ -184,8 +182,7 @@ TEST_BEGIN(test_qr_before_insert) TEST_END static void -test_split_entries(ring_t *entries) -{ +test_split_entries(ring_t *entries) { ring_t *t; unsigned i, j; @@ -206,43 +203,41 @@ test_split_entries(ring_t *entries) } } -TEST_BEGIN(test_qr_meld_split) -{ +TEST_BEGIN(test_qr_meld_split) { ring_t entries[NENTRIES]; unsigned i; init_entries(entries); - for (i = 1; i < NENTRIES; i++) + for (i = 1; i < NENTRIES; i++) { qr_after_insert(&entries[i - 1], &entries[i], link); + } - qr_split(&entries[0], &entries[SPLIT_INDEX], link); + qr_split(&entries[0], &entries[SPLIT_INDEX], ring_t, link); test_split_entries(entries); - qr_meld(&entries[0], &entries[SPLIT_INDEX], link); + qr_meld(&entries[0], &entries[SPLIT_INDEX], ring_t, link); test_entries_ring(entries); - qr_meld(&entries[0], &entries[SPLIT_INDEX], link); + qr_meld(&entries[0], &entries[SPLIT_INDEX], ring_t, link); test_split_entries(entries); - qr_split(&entries[0], &entries[SPLIT_INDEX], link); + qr_split(&entries[0], &entries[SPLIT_INDEX], ring_t, link); test_entries_ring(entries); - qr_split(&entries[0], &entries[0], link); + qr_split(&entries[0], &entries[0], ring_t, link); test_entries_ring(entries); - qr_meld(&entries[0], &entries[0], link); + qr_meld(&entries[0], &entries[0], ring_t, link); test_entries_ring(entries); } TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_qr_one, test_qr_after_insert, test_qr_remove, test_qr_before_insert, - test_qr_meld_split)); + test_qr_meld_split); } diff --git a/test/unit/quarantine.c b/test/unit/quarantine.c deleted file mode 100644 index 6068768..0000000 --- a/test/unit/quarantine.c +++ /dev/null @@ -1,102 +0,0 @@ -#include "test/jemalloc_test.h" - -/* Keep in sync with definition in quarantine.sh. */ -#define QUARANTINE_SIZE 8192 - -void -quarantine_clear(void) -{ - void *p; - - p = mallocx(QUARANTINE_SIZE*2, 0); - assert_ptr_not_null(p, "Unexpected mallocx() failure"); - dallocx(p, 0); -} - -TEST_BEGIN(test_quarantine) -{ -#define SZ ZU(256) -#define NQUARANTINED (QUARANTINE_SIZE/SZ) - void *quarantined[NQUARANTINED+1]; - size_t i, j; - - test_skip_if(!config_fill); - - assert_zu_eq(nallocx(SZ, 0), SZ, - "SZ=%zu does not precisely equal a size class", SZ); - - quarantine_clear(); - - /* - * Allocate enough regions to completely fill the quarantine, plus one - * more. The last iteration occurs with a completely full quarantine, - * but no regions should be drained from the quarantine until the last - * deallocation occurs. Therefore no region recycling should occur - * until after this loop completes. - */ - for (i = 0; i < NQUARANTINED+1; i++) { - void *p = mallocx(SZ, 0); - assert_ptr_not_null(p, "Unexpected mallocx() failure"); - quarantined[i] = p; - dallocx(p, 0); - for (j = 0; j < i; j++) { - assert_ptr_ne(p, quarantined[j], - "Quarantined region recycled too early; " - "i=%zu, j=%zu", i, j); - } - } -#undef NQUARANTINED -#undef SZ -} -TEST_END - -static bool detected_redzone_corruption; - -static void -arena_redzone_corruption_replacement(void *ptr, size_t usize, bool after, - size_t offset, uint8_t byte) -{ - - detected_redzone_corruption = true; -} - -TEST_BEGIN(test_quarantine_redzone) -{ - char *s; - arena_redzone_corruption_t *arena_redzone_corruption_orig; - - test_skip_if(!config_fill); - - arena_redzone_corruption_orig = arena_redzone_corruption; - arena_redzone_corruption = arena_redzone_corruption_replacement; - - /* Test underflow. */ - detected_redzone_corruption = false; - s = (char *)mallocx(1, 0); - assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); - s[-1] = 0xbb; - dallocx(s, 0); - assert_true(detected_redzone_corruption, - "Did not detect redzone corruption"); - - /* Test overflow. */ - detected_redzone_corruption = false; - s = (char *)mallocx(1, 0); - assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); - s[sallocx(s, 0)] = 0xbb; - dallocx(s, 0); - assert_true(detected_redzone_corruption, - "Did not detect redzone corruption"); - - arena_redzone_corruption = arena_redzone_corruption_orig; -} -TEST_END - -int -main(void) -{ - - return (test( - test_quarantine, - test_quarantine_redzone)); -} diff --git a/test/unit/quarantine.sh b/test/unit/quarantine.sh deleted file mode 100644 index e3c6932..0000000 --- a/test/unit/quarantine.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/sh - -# Keep in sync with definition in quarantine.c. -export QUARANTINE_SIZE=8192 - -if [ "x${enable_fill}" = "x1" ] ; then - export MALLOC_CONF="abort:false,junk:true,redzone:true,quarantine:${QUARANTINE_SIZE}" -fi diff --git a/test/unit/rb.c b/test/unit/rb.c index cf3d3a7..65c0492 100644 --- a/test/unit/rb.c +++ b/test/unit/rb.c @@ -1,20 +1,22 @@ #include "test/jemalloc_test.h" -#define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \ - a_type *rbp_bh_t; \ - for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; \ - rbp_bh_t != NULL; \ - rbp_bh_t = rbtn_left_get(a_type, a_field, rbp_bh_t)) { \ - if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) { \ - (r_height)++; \ +#include "jemalloc/internal/rb.h" + +#define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \ + a_type *rbp_bh_t; \ + for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; rbp_bh_t != \ + NULL; rbp_bh_t = rbtn_left_get(a_type, a_field, \ + rbp_bh_t)) { \ + if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) { \ + (r_height)++; \ + } \ } \ - } \ } while (0) typedef struct node_s node_t; struct node_s { -#define NODE_MAGIC 0x9823af7e +#define NODE_MAGIC 0x9823af7e uint32_t magic; rb_node(node_t) link; uint64_t key; @@ -36,14 +38,13 @@ node_cmp(const node_t *a, const node_t *b) { ret = (((uintptr_t)a) > ((uintptr_t)b)) - (((uintptr_t)a) < ((uintptr_t)b)); } - return (ret); + return ret; } typedef rb_tree(node_t) tree_t; rb_gen(static, tree_, tree_t, node_t, link, node_cmp); -TEST_BEGIN(test_rb_empty) -{ +TEST_BEGIN(test_rb_empty) { tree_t tree; node_t key; @@ -68,52 +69,56 @@ TEST_BEGIN(test_rb_empty) TEST_END static unsigned -tree_recurse(node_t *node, unsigned black_height, unsigned black_depth) -{ +tree_recurse(node_t *node, unsigned black_height, unsigned black_depth) { unsigned ret = 0; node_t *left_node; node_t *right_node; - if (node == NULL) - return (ret); + if (node == NULL) { + return ret; + } left_node = rbtn_left_get(node_t, link, node); right_node = rbtn_right_get(node_t, link, node); - if (!rbtn_red_get(node_t, link, node)) + if (!rbtn_red_get(node_t, link, node)) { black_depth++; + } /* Red nodes must be interleaved with black nodes. */ if (rbtn_red_get(node_t, link, node)) { - if (left_node != NULL) + if (left_node != NULL) { assert_false(rbtn_red_get(node_t, link, left_node), "Node should be black"); - if (right_node != NULL) + } + if (right_node != NULL) { assert_false(rbtn_red_get(node_t, link, right_node), "Node should be black"); + } } /* Self. */ assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic"); /* Left subtree. */ - if (left_node != NULL) + if (left_node != NULL) { ret += tree_recurse(left_node, black_height, black_depth); - else + } else { ret += (black_depth != black_height); + } /* Right subtree. */ - if (right_node != NULL) + if (right_node != NULL) { ret += tree_recurse(right_node, black_height, black_depth); - else + } else { ret += (black_depth != black_height); + } - return (ret); + return ret; } static node_t * -tree_iterate_cb(tree_t *tree, node_t *node, void *data) -{ +tree_iterate_cb(tree_t *tree, node_t *node, void *data) { unsigned *i = (unsigned *)data; node_t *search_node; @@ -136,34 +141,31 @@ tree_iterate_cb(tree_t *tree, node_t *node, void *data) (*i)++; - return (NULL); + return NULL; } static unsigned -tree_iterate(tree_t *tree) -{ +tree_iterate(tree_t *tree) { unsigned i; i = 0; tree_iter(tree, NULL, tree_iterate_cb, (void *)&i); - return (i); + return i; } static unsigned -tree_iterate_reverse(tree_t *tree) -{ +tree_iterate_reverse(tree_t *tree) { unsigned i; i = 0; tree_reverse_iter(tree, NULL, tree_iterate_cb, (void *)&i); - return (i); + return i; } static void -node_remove(tree_t *tree, node_t *node, unsigned nnodes) -{ +node_remove(tree_t *tree, node_t *node, unsigned nnodes) { node_t *search_node; unsigned black_height, imbalances; @@ -195,41 +197,37 @@ node_remove(tree_t *tree, node_t *node, unsigned nnodes) } static node_t * -remove_iterate_cb(tree_t *tree, node_t *node, void *data) -{ +remove_iterate_cb(tree_t *tree, node_t *node, void *data) { unsigned *nnodes = (unsigned *)data; node_t *ret = tree_next(tree, node); node_remove(tree, node, *nnodes); - return (ret); + return ret; } static node_t * -remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data) -{ +remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data) { unsigned *nnodes = (unsigned *)data; node_t *ret = tree_prev(tree, node); node_remove(tree, node, *nnodes); - return (ret); + return ret; } static void -destroy_cb(node_t *node, void *data) -{ +destroy_cb(node_t *node, void *data) { unsigned *nnodes = (unsigned *)data; assert_u_gt(*nnodes, 0, "Destruction removed too many nodes"); (*nnodes)--; } -TEST_BEGIN(test_rb_random) -{ -#define NNODES 25 -#define NBAGS 250 -#define SEED 42 +TEST_BEGIN(test_rb_random) { +#define NNODES 25 +#define NBAGS 250 +#define SEED 42 sfmt_t *sfmt; uint64_t bag[NNODES]; tree_t tree; @@ -241,17 +239,20 @@ TEST_BEGIN(test_rb_random) switch (i) { case 0: /* Insert in order. */ - for (j = 0; j < NNODES; j++) + for (j = 0; j < NNODES; j++) { bag[j] = j; + } break; case 1: /* Insert in reverse order. */ - for (j = 0; j < NNODES; j++) + for (j = 0; j < NNODES; j++) { bag[j] = NNODES - j - 1; + } break; default: - for (j = 0; j < NNODES; j++) + for (j = 0; j < NNODES; j++) { bag[j] = gen_rand64_range(sfmt, NNODES); + } } for (j = 1; j <= NNODES; j++) { @@ -292,12 +293,14 @@ TEST_BEGIN(test_rb_random) /* Remove nodes. */ switch (i % 5) { case 0: - for (k = 0; k < j; k++) + for (k = 0; k < j; k++) { node_remove(&tree, &nodes[k], j - k); + } break; case 1: - for (k = j; k > 0; k--) + for (k = j; k > 0; k--) { node_remove(&tree, &nodes[k-1], k); + } break; case 2: { node_t *start; @@ -345,10 +348,8 @@ TEST_BEGIN(test_rb_random) TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_rb_empty, - test_rb_random)); + test_rb_random); } diff --git a/test/unit/retained.c b/test/unit/retained.c new file mode 100644 index 0000000..d51a598 --- /dev/null +++ b/test/unit/retained.c @@ -0,0 +1,181 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/spin.h" + +static unsigned arena_ind; +static size_t sz; +static size_t esz; +#define NEPOCHS 8 +#define PER_THD_NALLOCS 1 +static atomic_u_t epoch; +static atomic_u_t nfinished; + +static unsigned +do_arena_create(extent_hooks_t *h) { + unsigned arena_ind; + size_t sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, + (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0, + "Unexpected mallctl() failure"); + return arena_ind; +} + +static void +do_arena_destroy(unsigned arena_ind) { + size_t mib[3]; + size_t miblen; + + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = (size_t)arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); +} + +static void +do_refresh(void) { + uint64_t epoch = 1; + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, + sizeof(epoch)), 0, "Unexpected mallctl() failure"); +} + +static size_t +do_get_size_impl(const char *cmd, unsigned arena_ind) { + size_t mib[4]; + size_t miblen = sizeof(mib) / sizeof(size_t); + size_t z = sizeof(size_t); + + assert_d_eq(mallctlnametomib(cmd, mib, &miblen), + 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); + mib[2] = arena_ind; + size_t size; + assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &z, NULL, 0), + 0, "Unexpected mallctlbymib([\"%s\"], ...) failure", cmd); + + return size; +} + +static size_t +do_get_active(unsigned arena_ind) { + return do_get_size_impl("stats.arenas.0.pactive", arena_ind) * PAGE; +} + +static size_t +do_get_mapped(unsigned arena_ind) { + return do_get_size_impl("stats.arenas.0.mapped", arena_ind); +} + +static void * +thd_start(void *arg) { + for (unsigned next_epoch = 1; next_epoch < NEPOCHS; next_epoch++) { + /* Busy-wait for next epoch. */ + unsigned cur_epoch; + spin_t spinner = SPIN_INITIALIZER; + while ((cur_epoch = atomic_load_u(&epoch, ATOMIC_ACQUIRE)) != + next_epoch) { + spin_adaptive(&spinner); + } + assert_u_eq(cur_epoch, next_epoch, "Unexpected epoch"); + + /* + * Allocate. The main thread will reset the arena, so there's + * no need to deallocate. + */ + for (unsigned i = 0; i < PER_THD_NALLOCS; i++) { + void *p = mallocx(sz, MALLOCX_ARENA(arena_ind) | + MALLOCX_TCACHE_NONE + ); + assert_ptr_not_null(p, + "Unexpected mallocx() failure\n"); + } + + /* Let the main thread know we've finished this iteration. */ + atomic_fetch_add_u(&nfinished, 1, ATOMIC_RELEASE); + } + + return NULL; +} + +TEST_BEGIN(test_retained) { + test_skip_if(!config_stats); + + arena_ind = do_arena_create(NULL); + sz = nallocx(HUGEPAGE, 0); + esz = sz + sz_large_pad; + + atomic_store_u(&epoch, 0, ATOMIC_RELAXED); + + unsigned nthreads = ncpus * 2; + VARIABLE_ARRAY(thd_t, threads, nthreads); + for (unsigned i = 0; i < nthreads; i++) { + thd_create(&threads[i], thd_start, NULL); + } + + for (unsigned e = 1; e < NEPOCHS; e++) { + atomic_store_u(&nfinished, 0, ATOMIC_RELEASE); + atomic_store_u(&epoch, e, ATOMIC_RELEASE); + + /* Wait for threads to finish allocating. */ + spin_t spinner = SPIN_INITIALIZER; + while (atomic_load_u(&nfinished, ATOMIC_ACQUIRE) < nthreads) { + spin_adaptive(&spinner); + } + + /* + * Assert that retained is no more than the sum of size classes + * that should have been used to satisfy the worker threads' + * requests, discounting per growth fragmentation. + */ + do_refresh(); + + size_t allocated = esz * nthreads * PER_THD_NALLOCS; + size_t active = do_get_active(arena_ind); + assert_zu_le(allocated, active, "Unexpected active memory"); + size_t mapped = do_get_mapped(arena_ind); + assert_zu_le(active, mapped, "Unexpected mapped memory"); + + arena_t *arena = arena_get(tsdn_fetch(), arena_ind, false); + size_t usable = 0; + size_t fragmented = 0; + for (pszind_t pind = sz_psz2ind(HUGEPAGE); pind < + arena->extent_grow_next; pind++) { + size_t psz = sz_pind2sz(pind); + size_t psz_fragmented = psz % esz; + size_t psz_usable = psz - psz_fragmented; + /* + * Only consider size classes that wouldn't be skipped. + */ + if (psz_usable > 0) { + assert_zu_lt(usable, allocated, + "Excessive retained memory " + "(%#zx[+%#zx] > %#zx)", usable, psz_usable, + allocated); + fragmented += psz_fragmented; + usable += psz_usable; + } + } + + /* + * Clean up arena. Destroying and recreating the arena + * is simpler that specifying extent hooks that deallocate + * (rather than retaining) during reset. + */ + do_arena_destroy(arena_ind); + assert_u_eq(do_arena_create(NULL), arena_ind, + "Unexpected arena index"); + } + + for (unsigned i = 0; i < nthreads; i++) { + thd_join(threads[i], NULL); + } + + do_arena_destroy(arena_ind); +} +TEST_END + +int +main(void) { + return test( + test_retained); +} diff --git a/test/unit/rtree.c b/test/unit/rtree.c index b54b3e8..814837b 100644 --- a/test/unit/rtree.c +++ b/test/unit/rtree.c @@ -1,138 +1,207 @@ #include "test/jemalloc_test.h" +#include "jemalloc/internal/rtree.h" + +rtree_node_alloc_t *rtree_node_alloc_orig; +rtree_node_dalloc_t *rtree_node_dalloc_orig; +rtree_leaf_alloc_t *rtree_leaf_alloc_orig; +rtree_leaf_dalloc_t *rtree_leaf_dalloc_orig; + +/* Potentially too large to safely place on the stack. */ +rtree_t test_rtree; + static rtree_node_elm_t * -node_alloc(size_t nelms) -{ +rtree_node_alloc_intercept(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { + rtree_node_elm_t *node; + + if (rtree != &test_rtree) { + return rtree_node_alloc_orig(tsdn, rtree, nelms); + } + + malloc_mutex_unlock(tsdn, &rtree->init_lock); + node = (rtree_node_elm_t *)calloc(nelms, sizeof(rtree_node_elm_t)); + assert_ptr_not_null(node, "Unexpected calloc() failure"); + malloc_mutex_lock(tsdn, &rtree->init_lock); - return ((rtree_node_elm_t *)calloc(nelms, sizeof(rtree_node_elm_t))); + return node; } static void -node_dalloc(rtree_node_elm_t *node) -{ +rtree_node_dalloc_intercept(tsdn_t *tsdn, rtree_t *rtree, + rtree_node_elm_t *node) { + if (rtree != &test_rtree) { + rtree_node_dalloc_orig(tsdn, rtree, node); + return; + } free(node); } -TEST_BEGIN(test_rtree_get_empty) -{ - unsigned i; - - for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { - rtree_t rtree; - assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), - "Unexpected rtree_new() failure"); - assert_ptr_null(rtree_get(&rtree, 0, false), - "rtree_get() should return NULL for empty tree"); - rtree_delete(&rtree); +static rtree_leaf_elm_t * +rtree_leaf_alloc_intercept(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { + rtree_leaf_elm_t *leaf; + + if (rtree != &test_rtree) { + return rtree_leaf_alloc_orig(tsdn, rtree, nelms); } + + malloc_mutex_unlock(tsdn, &rtree->init_lock); + leaf = (rtree_leaf_elm_t *)calloc(nelms, sizeof(rtree_leaf_elm_t)); + assert_ptr_not_null(leaf, "Unexpected calloc() failure"); + malloc_mutex_lock(tsdn, &rtree->init_lock); + + return leaf; } -TEST_END -TEST_BEGIN(test_rtree_extrema) -{ - unsigned i; - extent_node_t node_a, node_b; +static void +rtree_leaf_dalloc_intercept(tsdn_t *tsdn, rtree_t *rtree, + rtree_leaf_elm_t *leaf) { + if (rtree != &test_rtree) { + rtree_leaf_dalloc_orig(tsdn, rtree, leaf); + return; + } - for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { - rtree_t rtree; - assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), - "Unexpected rtree_new() failure"); + free(leaf); +} - assert_false(rtree_set(&rtree, 0, &node_a), - "Unexpected rtree_set() failure"); - assert_ptr_eq(rtree_get(&rtree, 0, true), &node_a, - "rtree_get() should return previously set value"); +TEST_BEGIN(test_rtree_read_empty) { + tsdn_t *tsdn; - assert_false(rtree_set(&rtree, ~((uintptr_t)0), &node_b), - "Unexpected rtree_set() failure"); - assert_ptr_eq(rtree_get(&rtree, ~((uintptr_t)0), true), &node_b, - "rtree_get() should return previously set value"); + tsdn = tsdn_fetch(); - rtree_delete(&rtree); - } + rtree_t *rtree = &test_rtree; + rtree_ctx_t rtree_ctx; + rtree_ctx_data_init(&rtree_ctx); + assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); + assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx, PAGE, + false), "rtree_extent_read() should return NULL for empty tree"); + rtree_delete(tsdn, rtree); } TEST_END -TEST_BEGIN(test_rtree_bits) -{ - unsigned i, j, k; - - for (i = 1; i < (sizeof(uintptr_t) << 3); i++) { - uintptr_t keys[] = {0, 1, - (((uintptr_t)1) << (sizeof(uintptr_t)*8-i)) - 1}; - extent_node_t node; - rtree_t rtree; - - assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), - "Unexpected rtree_new() failure"); - - for (j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) { - assert_false(rtree_set(&rtree, keys[j], &node), - "Unexpected rtree_set() failure"); - for (k = 0; k < sizeof(keys)/sizeof(uintptr_t); k++) { - assert_ptr_eq(rtree_get(&rtree, keys[k], true), - &node, "rtree_get() should return " - "previously set value and ignore " - "insignificant key bits; i=%u, j=%u, k=%u, " - "set key=%#"FMTxPTR", get key=%#"FMTxPTR, i, - j, k, keys[j], keys[k]); - } - assert_ptr_null(rtree_get(&rtree, - (((uintptr_t)1) << (sizeof(uintptr_t)*8-i)), false), - "Only leftmost rtree leaf should be set; " - "i=%u, j=%u", i, j); - assert_false(rtree_set(&rtree, keys[j], NULL), - "Unexpected rtree_set() failure"); - } +#undef NTHREADS +#undef NITERS +#undef SEED - rtree_delete(&rtree); - } +TEST_BEGIN(test_rtree_extrema) { + extent_t extent_a, extent_b; + extent_init(&extent_a, NULL, NULL, LARGE_MINCLASS, false, + sz_size2index(LARGE_MINCLASS), 0, extent_state_active, false, + false); + extent_init(&extent_b, NULL, NULL, 0, false, NSIZES, 0, + extent_state_active, false, false); + + tsdn_t *tsdn = tsdn_fetch(); + + rtree_t *rtree = &test_rtree; + rtree_ctx_t rtree_ctx; + rtree_ctx_data_init(&rtree_ctx); + assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); + + assert_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, &extent_a, + extent_szind_get(&extent_a), extent_slab_get(&extent_a)), + "Unexpected rtree_write() failure"); + rtree_szind_slab_update(tsdn, rtree, &rtree_ctx, PAGE, + extent_szind_get(&extent_a), extent_slab_get(&extent_a)); + assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, PAGE, true), + &extent_a, + "rtree_extent_read() should return previously set value"); + + assert_false(rtree_write(tsdn, rtree, &rtree_ctx, ~((uintptr_t)0), + &extent_b, extent_szind_get_maybe_invalid(&extent_b), + extent_slab_get(&extent_b)), "Unexpected rtree_write() failure"); + assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, + ~((uintptr_t)0), true), &extent_b, + "rtree_extent_read() should return previously set value"); + + rtree_delete(tsdn, rtree); } TEST_END -TEST_BEGIN(test_rtree_random) -{ - unsigned i; - sfmt_t *sfmt; -#define NSET 16 -#define SEED 42 - - sfmt = init_gen_rand(SEED); - for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { - uintptr_t keys[NSET]; - extent_node_t node; - unsigned j; - rtree_t rtree; - - assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), - "Unexpected rtree_new() failure"); - - for (j = 0; j < NSET; j++) { - keys[j] = (uintptr_t)gen_rand64(sfmt); - assert_false(rtree_set(&rtree, keys[j], &node), - "Unexpected rtree_set() failure"); - assert_ptr_eq(rtree_get(&rtree, keys[j], true), &node, - "rtree_get() should return previously set value"); - } - for (j = 0; j < NSET; j++) { - assert_ptr_eq(rtree_get(&rtree, keys[j], true), &node, - "rtree_get() should return previously set value"); +TEST_BEGIN(test_rtree_bits) { + tsdn_t *tsdn = tsdn_fetch(); + + uintptr_t keys[] = {PAGE, PAGE + 1, + PAGE + (((uintptr_t)1) << LG_PAGE) - 1}; + + extent_t extent; + extent_init(&extent, NULL, NULL, 0, false, NSIZES, 0, + extent_state_active, false, false); + + rtree_t *rtree = &test_rtree; + rtree_ctx_t rtree_ctx; + rtree_ctx_data_init(&rtree_ctx); + assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); + + for (unsigned i = 0; i < sizeof(keys)/sizeof(uintptr_t); i++) { + assert_false(rtree_write(tsdn, rtree, &rtree_ctx, keys[i], + &extent, NSIZES, false), + "Unexpected rtree_write() failure"); + for (unsigned j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) { + assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, + keys[j], true), &extent, + "rtree_extent_read() should return previously set " + "value and ignore insignificant key bits; i=%u, " + "j=%u, set key=%#"FMTxPTR", get key=%#"FMTxPTR, i, + j, keys[i], keys[j]); } + assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx, + (((uintptr_t)2) << LG_PAGE), false), + "Only leftmost rtree leaf should be set; i=%u", i); + rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]); + } - for (j = 0; j < NSET; j++) { - assert_false(rtree_set(&rtree, keys[j], NULL), - "Unexpected rtree_set() failure"); - assert_ptr_null(rtree_get(&rtree, keys[j], true), - "rtree_get() should return previously set value"); - } - for (j = 0; j < NSET; j++) { - assert_ptr_null(rtree_get(&rtree, keys[j], true), - "rtree_get() should return previously set value"); - } + rtree_delete(tsdn, rtree); +} +TEST_END + +TEST_BEGIN(test_rtree_random) { +#define NSET 16 +#define SEED 42 + sfmt_t *sfmt = init_gen_rand(SEED); + tsdn_t *tsdn = tsdn_fetch(); + uintptr_t keys[NSET]; + rtree_t *rtree = &test_rtree; + rtree_ctx_t rtree_ctx; + rtree_ctx_data_init(&rtree_ctx); + + extent_t extent; + extent_init(&extent, NULL, NULL, 0, false, NSIZES, 0, + extent_state_active, false, false); + + assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); + + for (unsigned i = 0; i < NSET; i++) { + keys[i] = (uintptr_t)gen_rand64(sfmt); + rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, + &rtree_ctx, keys[i], false, true); + assert_ptr_not_null(elm, + "Unexpected rtree_leaf_elm_lookup() failure"); + rtree_leaf_elm_write(tsdn, rtree, elm, &extent, NSIZES, false); + assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, + keys[i], true), &extent, + "rtree_extent_read() should return previously set value"); + } + for (unsigned i = 0; i < NSET; i++) { + assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, + keys[i], true), &extent, + "rtree_extent_read() should return previously set value, " + "i=%u", i); + } - rtree_delete(&rtree); + for (unsigned i = 0; i < NSET; i++) { + rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]); + assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx, + keys[i], true), + "rtree_extent_read() should return previously set value"); } + for (unsigned i = 0; i < NSET; i++) { + assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx, + keys[i], true), + "rtree_extent_read() should return previously set value"); + } + + rtree_delete(tsdn, rtree); fini_gen_rand(sfmt); #undef NSET #undef SEED @@ -140,12 +209,19 @@ TEST_BEGIN(test_rtree_random) TEST_END int -main(void) -{ - - return (test( - test_rtree_get_empty, +main(void) { + rtree_node_alloc_orig = rtree_node_alloc; + rtree_node_alloc = rtree_node_alloc_intercept; + rtree_node_dalloc_orig = rtree_node_dalloc; + rtree_node_dalloc = rtree_node_dalloc_intercept; + rtree_leaf_alloc_orig = rtree_leaf_alloc; + rtree_leaf_alloc = rtree_leaf_alloc_intercept; + rtree_leaf_dalloc_orig = rtree_leaf_dalloc; + rtree_leaf_dalloc = rtree_leaf_dalloc_intercept; + + return test( + test_rtree_read_empty, test_rtree_extrema, test_rtree_bits, - test_rtree_random)); + test_rtree_random); } diff --git a/test/unit/run_quantize.c b/test/unit/run_quantize.c deleted file mode 100644 index 089176f..0000000 --- a/test/unit/run_quantize.c +++ /dev/null @@ -1,149 +0,0 @@ -#include "test/jemalloc_test.h" - -TEST_BEGIN(test_small_run_size) -{ - unsigned nbins, i; - size_t sz, run_size; - size_t mib[4]; - size_t miblen = sizeof(mib) / sizeof(size_t); - - /* - * Iterate over all small size classes, get their run sizes, and verify - * that the quantized size is the same as the run size. - */ - - sz = sizeof(unsigned); - assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0, - "Unexpected mallctl failure"); - - assert_d_eq(mallctlnametomib("arenas.bin.0.run_size", mib, &miblen), 0, - "Unexpected mallctlnametomib failure"); - for (i = 0; i < nbins; i++) { - mib[2] = i; - sz = sizeof(size_t); - assert_d_eq(mallctlbymib(mib, miblen, (void *)&run_size, &sz, - NULL, 0), 0, "Unexpected mallctlbymib failure"); - assert_zu_eq(run_size, run_quantize_floor(run_size), - "Small run quantization should be a no-op (run_size=%zu)", - run_size); - assert_zu_eq(run_size, run_quantize_ceil(run_size), - "Small run quantization should be a no-op (run_size=%zu)", - run_size); - } -} -TEST_END - -TEST_BEGIN(test_large_run_size) -{ - bool cache_oblivious; - unsigned nlruns, i; - size_t sz, run_size_prev, ceil_prev; - size_t mib[4]; - size_t miblen = sizeof(mib) / sizeof(size_t); - - /* - * Iterate over all large size classes, get their run sizes, and verify - * that the quantized size is the same as the run size. - */ - - sz = sizeof(bool); - assert_d_eq(mallctl("config.cache_oblivious", (void *)&cache_oblivious, - &sz, NULL, 0), 0, "Unexpected mallctl failure"); - - sz = sizeof(unsigned); - assert_d_eq(mallctl("arenas.nlruns", (void *)&nlruns, &sz, NULL, 0), 0, - "Unexpected mallctl failure"); - - assert_d_eq(mallctlnametomib("arenas.lrun.0.size", mib, &miblen), 0, - "Unexpected mallctlnametomib failure"); - for (i = 0; i < nlruns; i++) { - size_t lrun_size, run_size, floor, ceil; - - mib[2] = i; - sz = sizeof(size_t); - assert_d_eq(mallctlbymib(mib, miblen, (void *)&lrun_size, &sz, - NULL, 0), 0, "Unexpected mallctlbymib failure"); - run_size = cache_oblivious ? lrun_size + PAGE : lrun_size; - floor = run_quantize_floor(run_size); - ceil = run_quantize_ceil(run_size); - - assert_zu_eq(run_size, floor, - "Large run quantization should be a no-op for precise " - "size (lrun_size=%zu, run_size=%zu)", lrun_size, run_size); - assert_zu_eq(run_size, ceil, - "Large run quantization should be a no-op for precise " - "size (lrun_size=%zu, run_size=%zu)", lrun_size, run_size); - - if (i > 0) { - assert_zu_eq(run_size_prev, run_quantize_floor(run_size - - PAGE), "Floor should be a precise size"); - if (run_size_prev < ceil_prev) { - assert_zu_eq(ceil_prev, run_size, - "Ceiling should be a precise size " - "(run_size_prev=%zu, ceil_prev=%zu, " - "run_size=%zu)", run_size_prev, ceil_prev, - run_size); - } - } - run_size_prev = floor; - ceil_prev = run_quantize_ceil(run_size + PAGE); - } -} -TEST_END - -TEST_BEGIN(test_monotonic) -{ - unsigned nbins, nlruns, i; - size_t sz, floor_prev, ceil_prev; - - /* - * Iterate over all run sizes and verify that - * run_quantize_{floor,ceil}() are monotonic. - */ - - sz = sizeof(unsigned); - assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0, - "Unexpected mallctl failure"); - - sz = sizeof(unsigned); - assert_d_eq(mallctl("arenas.nlruns", (void *)&nlruns, &sz, NULL, 0), 0, - "Unexpected mallctl failure"); - - floor_prev = 0; - ceil_prev = 0; - for (i = 1; i <= chunksize >> LG_PAGE; i++) { - size_t run_size, floor, ceil; - - run_size = i << LG_PAGE; - floor = run_quantize_floor(run_size); - ceil = run_quantize_ceil(run_size); - - assert_zu_le(floor, run_size, - "Floor should be <= (floor=%zu, run_size=%zu, ceil=%zu)", - floor, run_size, ceil); - assert_zu_ge(ceil, run_size, - "Ceiling should be >= (floor=%zu, run_size=%zu, ceil=%zu)", - floor, run_size, ceil); - - assert_zu_le(floor_prev, floor, "Floor should be monotonic " - "(floor_prev=%zu, floor=%zu, run_size=%zu, ceil=%zu)", - floor_prev, floor, run_size, ceil); - assert_zu_le(ceil_prev, ceil, "Ceiling should be monotonic " - "(floor=%zu, run_size=%zu, ceil_prev=%zu, ceil=%zu)", - floor, run_size, ceil_prev, ceil); - - floor_prev = floor; - ceil_prev = ceil; - } -} -TEST_END - -int -main(void) -{ - - return (test( - test_small_run_size, - test_large_run_size, - test_monotonic)); -} diff --git a/test/unit/size_classes.c b/test/unit/size_classes.c index 81cc606..bcff560 100644 --- a/test/unit/size_classes.c +++ b/test/unit/size_classes.c @@ -1,39 +1,37 @@ #include "test/jemalloc_test.h" static size_t -get_max_size_class(void) -{ - unsigned nhchunks; +get_max_size_class(void) { + unsigned nlextents; size_t mib[4]; size_t sz, miblen, max_size_class; sz = sizeof(unsigned); - assert_d_eq(mallctl("arenas.nhchunks", (void *)&nhchunks, &sz, NULL, 0), - 0, "Unexpected mallctl() error"); + assert_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL, + 0), 0, "Unexpected mallctl() error"); miblen = sizeof(mib) / sizeof(size_t); - assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0, + assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0, "Unexpected mallctlnametomib() error"); - mib[2] = nhchunks - 1; + mib[2] = nlextents - 1; sz = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz, NULL, 0), 0, "Unexpected mallctlbymib() error"); - return (max_size_class); + return max_size_class; } -TEST_BEGIN(test_size_classes) -{ +TEST_BEGIN(test_size_classes) { size_t size_class, max_size_class; szind_t index, max_index; max_size_class = get_max_size_class(); - max_index = size2index(max_size_class); + max_index = sz_size2index(max_size_class); - for (index = 0, size_class = index2size(index); index < max_index || + for (index = 0, size_class = sz_index2size(index); index < max_index || size_class < max_size_class; index++, size_class = - index2size(index)) { + sz_index2size(index)) { assert_true(index < max_index, "Loop conditionals should be equivalent; index=%u, " "size_class=%zu (%#zx)", index, size_class, size_class); @@ -41,144 +39,145 @@ TEST_BEGIN(test_size_classes) "Loop conditionals should be equivalent; index=%u, " "size_class=%zu (%#zx)", index, size_class, size_class); - assert_u_eq(index, size2index(size_class), - "size2index() does not reverse index2size(): index=%u -->" - " size_class=%zu --> index=%u --> size_class=%zu", index, - size_class, size2index(size_class), - index2size(size2index(size_class))); - assert_zu_eq(size_class, index2size(size2index(size_class)), - "index2size() does not reverse size2index(): index=%u -->" - " size_class=%zu --> index=%u --> size_class=%zu", index, - size_class, size2index(size_class), - index2size(size2index(size_class))); - - assert_u_eq(index+1, size2index(size_class+1), + assert_u_eq(index, sz_size2index(size_class), + "sz_size2index() does not reverse sz_index2size(): index=%u" + " --> size_class=%zu --> index=%u --> size_class=%zu", + index, size_class, sz_size2index(size_class), + sz_index2size(sz_size2index(size_class))); + assert_zu_eq(size_class, + sz_index2size(sz_size2index(size_class)), + "sz_index2size() does not reverse sz_size2index(): index=%u" + " --> size_class=%zu --> index=%u --> size_class=%zu", + index, size_class, sz_size2index(size_class), + sz_index2size(sz_size2index(size_class))); + + assert_u_eq(index+1, sz_size2index(size_class+1), "Next size_class does not round up properly"); assert_zu_eq(size_class, (index > 0) ? - s2u(index2size(index-1)+1) : s2u(1), - "s2u() does not round up to size class"); - assert_zu_eq(size_class, s2u(size_class-1), - "s2u() does not round up to size class"); - assert_zu_eq(size_class, s2u(size_class), - "s2u() does not compute same size class"); - assert_zu_eq(s2u(size_class+1), index2size(index+1), - "s2u() does not round up to next size class"); + sz_s2u(sz_index2size(index-1)+1) : sz_s2u(1), + "sz_s2u() does not round up to size class"); + assert_zu_eq(size_class, sz_s2u(size_class-1), + "sz_s2u() does not round up to size class"); + assert_zu_eq(size_class, sz_s2u(size_class), + "sz_s2u() does not compute same size class"); + assert_zu_eq(sz_s2u(size_class+1), sz_index2size(index+1), + "sz_s2u() does not round up to next size class"); } - assert_u_eq(index, size2index(index2size(index)), - "size2index() does not reverse index2size()"); - assert_zu_eq(max_size_class, index2size(size2index(max_size_class)), - "index2size() does not reverse size2index()"); - - assert_zu_eq(size_class, s2u(index2size(index-1)+1), - "s2u() does not round up to size class"); - assert_zu_eq(size_class, s2u(size_class-1), - "s2u() does not round up to size class"); - assert_zu_eq(size_class, s2u(size_class), - "s2u() does not compute same size class"); + assert_u_eq(index, sz_size2index(sz_index2size(index)), + "sz_size2index() does not reverse sz_index2size()"); + assert_zu_eq(max_size_class, sz_index2size( + sz_size2index(max_size_class)), + "sz_index2size() does not reverse sz_size2index()"); + + assert_zu_eq(size_class, sz_s2u(sz_index2size(index-1)+1), + "sz_s2u() does not round up to size class"); + assert_zu_eq(size_class, sz_s2u(size_class-1), + "sz_s2u() does not round up to size class"); + assert_zu_eq(size_class, sz_s2u(size_class), + "sz_s2u() does not compute same size class"); } TEST_END -TEST_BEGIN(test_psize_classes) -{ - size_t size_class, max_size_class; +TEST_BEGIN(test_psize_classes) { + size_t size_class, max_psz; pszind_t pind, max_pind; - max_size_class = get_max_size_class(); - max_pind = psz2ind(max_size_class); + max_psz = get_max_size_class() + PAGE; + max_pind = sz_psz2ind(max_psz); - for (pind = 0, size_class = pind2sz(pind); pind < max_pind || - size_class < max_size_class; pind++, size_class = - pind2sz(pind)) { + for (pind = 0, size_class = sz_pind2sz(pind); + pind < max_pind || size_class < max_psz; + pind++, size_class = sz_pind2sz(pind)) { assert_true(pind < max_pind, "Loop conditionals should be equivalent; pind=%u, " "size_class=%zu (%#zx)", pind, size_class, size_class); - assert_true(size_class < max_size_class, + assert_true(size_class < max_psz, "Loop conditionals should be equivalent; pind=%u, " "size_class=%zu (%#zx)", pind, size_class, size_class); - assert_u_eq(pind, psz2ind(size_class), - "psz2ind() does not reverse pind2sz(): pind=%u -->" + assert_u_eq(pind, sz_psz2ind(size_class), + "sz_psz2ind() does not reverse sz_pind2sz(): pind=%u -->" " size_class=%zu --> pind=%u --> size_class=%zu", pind, - size_class, psz2ind(size_class), - pind2sz(psz2ind(size_class))); - assert_zu_eq(size_class, pind2sz(psz2ind(size_class)), - "pind2sz() does not reverse psz2ind(): pind=%u -->" + size_class, sz_psz2ind(size_class), + sz_pind2sz(sz_psz2ind(size_class))); + assert_zu_eq(size_class, sz_pind2sz(sz_psz2ind(size_class)), + "sz_pind2sz() does not reverse sz_psz2ind(): pind=%u -->" " size_class=%zu --> pind=%u --> size_class=%zu", pind, - size_class, psz2ind(size_class), - pind2sz(psz2ind(size_class))); + size_class, sz_psz2ind(size_class), + sz_pind2sz(sz_psz2ind(size_class))); - assert_u_eq(pind+1, psz2ind(size_class+1), + assert_u_eq(pind+1, sz_psz2ind(size_class+1), "Next size_class does not round up properly"); assert_zu_eq(size_class, (pind > 0) ? - psz2u(pind2sz(pind-1)+1) : psz2u(1), - "psz2u() does not round up to size class"); - assert_zu_eq(size_class, psz2u(size_class-1), - "psz2u() does not round up to size class"); - assert_zu_eq(size_class, psz2u(size_class), - "psz2u() does not compute same size class"); - assert_zu_eq(psz2u(size_class+1), pind2sz(pind+1), - "psz2u() does not round up to next size class"); + sz_psz2u(sz_pind2sz(pind-1)+1) : sz_psz2u(1), + "sz_psz2u() does not round up to size class"); + assert_zu_eq(size_class, sz_psz2u(size_class-1), + "sz_psz2u() does not round up to size class"); + assert_zu_eq(size_class, sz_psz2u(size_class), + "sz_psz2u() does not compute same size class"); + assert_zu_eq(sz_psz2u(size_class+1), sz_pind2sz(pind+1), + "sz_psz2u() does not round up to next size class"); } - assert_u_eq(pind, psz2ind(pind2sz(pind)), - "psz2ind() does not reverse pind2sz()"); - assert_zu_eq(max_size_class, pind2sz(psz2ind(max_size_class)), - "pind2sz() does not reverse psz2ind()"); - - assert_zu_eq(size_class, psz2u(pind2sz(pind-1)+1), - "psz2u() does not round up to size class"); - assert_zu_eq(size_class, psz2u(size_class-1), - "psz2u() does not round up to size class"); - assert_zu_eq(size_class, psz2u(size_class), - "psz2u() does not compute same size class"); + assert_u_eq(pind, sz_psz2ind(sz_pind2sz(pind)), + "sz_psz2ind() does not reverse sz_pind2sz()"); + assert_zu_eq(max_psz, sz_pind2sz(sz_psz2ind(max_psz)), + "sz_pind2sz() does not reverse sz_psz2ind()"); + + assert_zu_eq(size_class, sz_psz2u(sz_pind2sz(pind-1)+1), + "sz_psz2u() does not round up to size class"); + assert_zu_eq(size_class, sz_psz2u(size_class-1), + "sz_psz2u() does not round up to size class"); + assert_zu_eq(size_class, sz_psz2u(size_class), + "sz_psz2u() does not compute same size class"); } TEST_END -TEST_BEGIN(test_overflow) -{ - size_t max_size_class; +TEST_BEGIN(test_overflow) { + size_t max_size_class, max_psz; max_size_class = get_max_size_class(); - - assert_u_eq(size2index(max_size_class+1), NSIZES, - "size2index() should return NSIZES on overflow"); - assert_u_eq(size2index(ZU(PTRDIFF_MAX)+1), NSIZES, - "size2index() should return NSIZES on overflow"); - assert_u_eq(size2index(SIZE_T_MAX), NSIZES, - "size2index() should return NSIZES on overflow"); - - assert_zu_eq(s2u(max_size_class+1), 0, - "s2u() should return 0 for unsupported size"); - assert_zu_eq(s2u(ZU(PTRDIFF_MAX)+1), 0, - "s2u() should return 0 for unsupported size"); - assert_zu_eq(s2u(SIZE_T_MAX), 0, - "s2u() should return 0 on overflow"); - - assert_u_eq(psz2ind(max_size_class+1), NPSIZES, - "psz2ind() should return NPSIZES on overflow"); - assert_u_eq(psz2ind(ZU(PTRDIFF_MAX)+1), NPSIZES, - "psz2ind() should return NPSIZES on overflow"); - assert_u_eq(psz2ind(SIZE_T_MAX), NPSIZES, - "psz2ind() should return NPSIZES on overflow"); - - assert_zu_eq(psz2u(max_size_class+1), 0, - "psz2u() should return 0 for unsupported size"); - assert_zu_eq(psz2u(ZU(PTRDIFF_MAX)+1), 0, - "psz2u() should return 0 for unsupported size"); - assert_zu_eq(psz2u(SIZE_T_MAX), 0, - "psz2u() should return 0 on overflow"); + max_psz = max_size_class + PAGE; + + assert_u_eq(sz_size2index(max_size_class+1), NSIZES, + "sz_size2index() should return NSIZES on overflow"); + assert_u_eq(sz_size2index(ZU(PTRDIFF_MAX)+1), NSIZES, + "sz_size2index() should return NSIZES on overflow"); + assert_u_eq(sz_size2index(SIZE_T_MAX), NSIZES, + "sz_size2index() should return NSIZES on overflow"); + + assert_zu_eq(sz_s2u(max_size_class+1), 0, + "sz_s2u() should return 0 for unsupported size"); + assert_zu_eq(sz_s2u(ZU(PTRDIFF_MAX)+1), 0, + "sz_s2u() should return 0 for unsupported size"); + assert_zu_eq(sz_s2u(SIZE_T_MAX), 0, + "sz_s2u() should return 0 on overflow"); + + assert_u_eq(sz_psz2ind(max_size_class+1), NPSIZES, + "sz_psz2ind() should return NPSIZES on overflow"); + assert_u_eq(sz_psz2ind(ZU(PTRDIFF_MAX)+1), NPSIZES, + "sz_psz2ind() should return NPSIZES on overflow"); + assert_u_eq(sz_psz2ind(SIZE_T_MAX), NPSIZES, + "sz_psz2ind() should return NPSIZES on overflow"); + + assert_zu_eq(sz_psz2u(max_size_class+1), max_psz, + "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported" + " size"); + assert_zu_eq(sz_psz2u(ZU(PTRDIFF_MAX)+1), max_psz, + "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported " + "size"); + assert_zu_eq(sz_psz2u(SIZE_T_MAX), max_psz, + "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) on overflow"); } TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_size_classes, test_psize_classes, - test_overflow)); + test_overflow); } diff --git a/test/unit/slab.c b/test/unit/slab.c new file mode 100644 index 0000000..6f40aee --- /dev/null +++ b/test/unit/slab.c @@ -0,0 +1,32 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_arena_slab_regind) { + szind_t binind; + + for (binind = 0; binind < NBINS; binind++) { + size_t regind; + extent_t slab; + const arena_bin_info_t *bin_info = &arena_bin_info[binind]; + extent_init(&slab, NULL, mallocx(bin_info->slab_size, + MALLOCX_LG_ALIGN(LG_PAGE)), bin_info->slab_size, true, + binind, 0, extent_state_active, false, true); + assert_ptr_not_null(extent_addr_get(&slab), + "Unexpected malloc() failure"); + for (regind = 0; regind < bin_info->nregs; regind++) { + void *reg = (void *)((uintptr_t)extent_addr_get(&slab) + + (bin_info->reg_size * regind)); + assert_zu_eq(arena_slab_regind(&slab, binind, reg), + regind, + "Incorrect region index computed for size %zu", + bin_info->reg_size); + } + free(extent_addr_get(&slab)); + } +} +TEST_END + +int +main(void) { + return test( + test_arena_slab_regind); +} diff --git a/test/unit/smoothstep.c b/test/unit/smoothstep.c index 4cfb213..7c5dbb7 100644 --- a/test/unit/smoothstep.c +++ b/test/unit/smoothstep.c @@ -1,14 +1,13 @@ #include "test/jemalloc_test.h" static const uint64_t smoothstep_tab[] = { -#define STEP(step, h, x, y) \ +#define STEP(step, h, x, y) \ h, SMOOTHSTEP #undef STEP }; -TEST_BEGIN(test_smoothstep_integral) -{ +TEST_BEGIN(test_smoothstep_integral) { uint64_t sum, min, max; unsigned i; @@ -20,8 +19,9 @@ TEST_BEGIN(test_smoothstep_integral) * integral may be off by as much as SMOOTHSTEP_NSTEPS ulps. */ sum = 0; - for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) + for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { sum += smoothstep_tab[i]; + } max = (KQU(1) << (SMOOTHSTEP_BFP-1)) * (SMOOTHSTEP_NSTEPS+1); min = max - SMOOTHSTEP_NSTEPS; @@ -36,8 +36,7 @@ TEST_BEGIN(test_smoothstep_integral) } TEST_END -TEST_BEGIN(test_smoothstep_monotonic) -{ +TEST_BEGIN(test_smoothstep_monotonic) { uint64_t prev_h; unsigned i; @@ -58,8 +57,7 @@ TEST_BEGIN(test_smoothstep_monotonic) } TEST_END -TEST_BEGIN(test_smoothstep_slope) -{ +TEST_BEGIN(test_smoothstep_slope) { uint64_t prev_h, prev_delta; unsigned i; @@ -96,11 +94,9 @@ TEST_BEGIN(test_smoothstep_slope) TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_smoothstep_integral, test_smoothstep_monotonic, - test_smoothstep_slope)); + test_smoothstep_slope); } diff --git a/test/unit/spin.c b/test/unit/spin.c new file mode 100644 index 0000000..b965f74 --- /dev/null +++ b/test/unit/spin.c @@ -0,0 +1,18 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/spin.h" + +TEST_BEGIN(test_spin) { + spin_t spinner = SPIN_INITIALIZER; + + for (unsigned i = 0; i < 100; i++) { + spin_adaptive(&spinner); + } +} +TEST_END + +int +main(void) { + return test( + test_spin); +} diff --git a/test/unit/stats.c b/test/unit/stats.c index 315717d..d9849d8 100644 --- a/test/unit/stats.c +++ b/test/unit/stats.c @@ -1,15 +1,9 @@ #include "test/jemalloc_test.h" -TEST_BEGIN(test_stats_summary) -{ - size_t *cactive; +TEST_BEGIN(test_stats_summary) { size_t sz, allocated, active, resident, mapped; int expected = config_stats ? 0 : ENOENT; - sz = sizeof(cactive); - assert_d_eq(mallctl("stats.cactive", (void *)&cactive, &sz, NULL, 0), - expected, "Unexpected mallctl() result"); - sz = sizeof(size_t); assert_d_eq(mallctl("stats.allocated", (void *)&allocated, &sz, NULL, 0), expected, "Unexpected mallctl() result"); @@ -21,8 +15,6 @@ TEST_BEGIN(test_stats_summary) expected, "Unexpected mallctl() result"); if (config_stats) { - assert_zu_le(active, *cactive, - "active should be no larger than cactive"); assert_zu_le(allocated, active, "allocated should be no larger than active"); assert_zu_lt(active, resident, @@ -33,8 +25,7 @@ TEST_BEGIN(test_stats_summary) } TEST_END -TEST_BEGIN(test_stats_huge) -{ +TEST_BEGIN(test_stats_large) { void *p; uint64_t epoch; size_t allocated; @@ -42,22 +33,24 @@ TEST_BEGIN(test_stats_huge) size_t sz; int expected = config_stats ? 0 : ENOENT; - p = mallocx(large_maxclass+1, 0); + p = mallocx(SMALL_MAXCLASS+1, MALLOCX_ARENA(0)); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.huge.allocated", (void *)&allocated, - &sz, NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.large.allocated", + (void *)&allocated, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", (void *)&nmalloc, - &sz, NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", (void *)&ndalloc, + assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.huge.nrequests", (void *)&nrequests, + assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.large.nrequests", + (void *)&nrequests, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); if (config_stats) { assert_zu_gt(allocated, 0, @@ -72,31 +65,25 @@ TEST_BEGIN(test_stats_huge) } TEST_END -TEST_BEGIN(test_stats_arenas_summary) -{ - unsigned arena; - void *little, *large, *huge; +TEST_BEGIN(test_stats_arenas_summary) { + void *little, *large; uint64_t epoch; size_t sz; int expected = config_stats ? 0 : ENOENT; size_t mapped; - uint64_t npurge, nmadvise, purged; - - arena = 0; - assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, - sizeof(arena)), 0, "Unexpected mallctl() failure"); + uint64_t dirty_npurge, dirty_nmadvise, dirty_purged; + uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged; - little = mallocx(SMALL_MAXCLASS, 0); + little = mallocx(SMALL_MAXCLASS, MALLOCX_ARENA(0)); assert_ptr_not_null(little, "Unexpected mallocx() failure"); - large = mallocx(large_maxclass, 0); + large = mallocx((1U << LG_LARGE_MINCLASS), MALLOCX_ARENA(0)); assert_ptr_not_null(large, "Unexpected mallocx() failure"); - huge = mallocx(chunksize, 0); - assert_ptr_not_null(huge, "Unexpected mallocx() failure"); dallocx(little, 0); dallocx(large, 0); - dallocx(huge, 0); + assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), + opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result"); assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl() failure"); @@ -106,42 +93,54 @@ TEST_BEGIN(test_stats_arenas_summary) sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.mapped", (void *)&mapped, &sz, NULL, 0), expected, "Unexepected mallctl() result"); + sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge, &sz, NULL, - 0), expected, "Unexepected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.nmadvise", (void *)&nmadvise, &sz, - NULL, 0), expected, "Unexepected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.purged", (void *)&purged, &sz, NULL, - 0), expected, "Unexepected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.dirty_npurge", + (void *)&dirty_npurge, &sz, NULL, 0), expected, + "Unexepected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.dirty_nmadvise", + (void *)&dirty_nmadvise, &sz, NULL, 0), expected, + "Unexepected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.dirty_purged", + (void *)&dirty_purged, &sz, NULL, 0), expected, + "Unexepected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.muzzy_npurge", + (void *)&muzzy_npurge, &sz, NULL, 0), expected, + "Unexepected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.muzzy_nmadvise", + (void *)&muzzy_nmadvise, &sz, NULL, 0), expected, + "Unexepected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.muzzy_purged", + (void *)&muzzy_purged, &sz, NULL, 0), expected, + "Unexepected mallctl() result"); if (config_stats) { - assert_u64_gt(npurge, 0, - "At least one purge should have occurred"); - assert_u64_le(nmadvise, purged, - "nmadvise should be no greater than purged"); + if (!background_thread_enabled()) { + assert_u64_gt(dirty_npurge + muzzy_npurge, 0, + "At least one purge should have occurred"); + } + assert_u64_le(dirty_nmadvise, dirty_purged, + "dirty_nmadvise should be no greater than dirty_purged"); + assert_u64_le(muzzy_nmadvise, muzzy_purged, + "muzzy_nmadvise should be no greater than muzzy_purged"); } } TEST_END void * -thd_start(void *arg) -{ - - return (NULL); +thd_start(void *arg) { + return NULL; } static void -no_lazy_lock(void) -{ +no_lazy_lock(void) { thd_t thd; thd_create(&thd, thd_start, NULL); thd_join(thd, NULL); } -TEST_BEGIN(test_stats_arenas_small) -{ - unsigned arena; +TEST_BEGIN(test_stats_arenas_small) { void *p; size_t sz, allocated; uint64_t epoch, nmalloc, ndalloc, nrequests; @@ -149,15 +148,11 @@ TEST_BEGIN(test_stats_arenas_small) no_lazy_lock(); /* Lazy locking would dodge tcache testing. */ - arena = 0; - assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, - sizeof(arena)), 0, "Unexpected mallctl() failure"); - - p = mallocx(SMALL_MAXCLASS, 0); + p = mallocx(SMALL_MAXCLASS, MALLOCX_ARENA(0)); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), - config_tcache ? 0 : ENOENT, "Unexpected mallctl() result"); + opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); @@ -190,19 +185,13 @@ TEST_BEGIN(test_stats_arenas_small) } TEST_END -TEST_BEGIN(test_stats_arenas_large) -{ - unsigned arena; +TEST_BEGIN(test_stats_arenas_large) { void *p; size_t sz, allocated; - uint64_t epoch, nmalloc, ndalloc, nrequests; + uint64_t epoch, nmalloc, ndalloc; int expected = config_stats ? 0 : ENOENT; - arena = 0; - assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, - sizeof(arena)), 0, "Unexpected mallctl() failure"); - - p = mallocx(large_maxclass, 0); + p = mallocx((1U << LG_LARGE_MINCLASS), MALLOCX_ARENA(0)); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), @@ -217,9 +206,6 @@ TEST_BEGIN(test_stats_arenas_large) &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.large.nrequests", - (void *)&nrequests, &sz, NULL, 0), expected, - "Unexpected mallctl() result"); if (config_stats) { assert_zu_gt(allocated, 0, @@ -228,103 +214,80 @@ TEST_BEGIN(test_stats_arenas_large) "nmalloc should be greater than zero"); assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); - assert_u64_gt(nrequests, 0, - "nrequests should be greater than zero"); } dallocx(p, 0); } TEST_END -TEST_BEGIN(test_stats_arenas_huge) -{ - unsigned arena; - void *p; - size_t sz, allocated; - uint64_t epoch, nmalloc, ndalloc; - int expected = config_stats ? 0 : ENOENT; - - arena = 0; - assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, - sizeof(arena)), 0, "Unexpected mallctl() failure"); - - p = mallocx(chunksize, 0); - assert_ptr_not_null(p, "Unexpected mallocx() failure"); - - assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), - 0, "Unexpected mallctl() failure"); - - sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.huge.allocated", (void *)&allocated, - &sz, NULL, 0), expected, "Unexpected mallctl() result"); - sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", (void *)&nmalloc, - &sz, NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", (void *)&ndalloc, - &sz, NULL, 0), expected, "Unexpected mallctl() result"); - - if (config_stats) { - assert_zu_gt(allocated, 0, - "allocated should be greater than zero"); - assert_u64_gt(nmalloc, 0, - "nmalloc should be greater than zero"); - assert_u64_ge(nmalloc, ndalloc, - "nmalloc should be at least as large as ndalloc"); - } - - dallocx(p, 0); +static void +gen_mallctl_str(char *cmd, char *name, unsigned arena_ind) { + sprintf(cmd, "stats.arenas.%u.bins.0.%s", arena_ind, name); } -TEST_END -TEST_BEGIN(test_stats_arenas_bins) -{ - unsigned arena; +TEST_BEGIN(test_stats_arenas_bins) { void *p; - size_t sz, curruns, curregs; + size_t sz, curslabs, curregs; uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes; - uint64_t nruns, nreruns; + uint64_t nslabs, nreslabs; int expected = config_stats ? 0 : ENOENT; - arena = 0; - assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, - sizeof(arena)), 0, "Unexpected mallctl() failure"); + /* Make sure allocation below isn't satisfied by tcache. */ + assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), + opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result"); + + unsigned arena_ind, old_arena_ind; + sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), + 0, "Arena creation failure"); + sz = sizeof(arena_ind); + assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, + (void *)&arena_ind, sizeof(arena_ind)), 0, + "Unexpected mallctl() failure"); - p = mallocx(arena_bin_info[0].reg_size, 0); - assert_ptr_not_null(p, "Unexpected mallocx() failure"); + p = malloc(arena_bin_info[0].reg_size); + assert_ptr_not_null(p, "Unexpected malloc() failure"); assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), - config_tcache ? 0 : ENOENT, "Unexpected mallctl() result"); + opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); + char cmd[128]; sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.bins.0.nmalloc", (void *)&nmalloc, - &sz, NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.bins.0.ndalloc", (void *)&ndalloc, - &sz, NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.bins.0.nrequests", - (void *)&nrequests, &sz, NULL, 0), expected, + gen_mallctl_str(cmd, "nmalloc", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&nmalloc, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + gen_mallctl_str(cmd, "ndalloc", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&ndalloc, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + gen_mallctl_str(cmd, "nrequests", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&nrequests, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.bins.0.curregs", (void *)&curregs, - &sz, NULL, 0), expected, "Unexpected mallctl() result"); + gen_mallctl_str(cmd, "curregs", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&curregs, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.bins.0.nfills", (void *)&nfills, - &sz, NULL, 0), config_tcache ? expected : ENOENT, + gen_mallctl_str(cmd, "nfills", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&nfills, &sz, NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.bins.0.nflushes", (void *)&nflushes, - &sz, NULL, 0), config_tcache ? expected : ENOENT, + gen_mallctl_str(cmd, "nflushes", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&nflushes, &sz, NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.bins.0.nruns", (void *)&nruns, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.bins.0.nreruns", (void *)&nreruns, - &sz, NULL, 0), expected, "Unexpected mallctl() result"); + gen_mallctl_str(cmd, "nslabs", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&nslabs, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + gen_mallctl_str(cmd, "nreslabs", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&nreslabs, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.bins.0.curruns", (void *)&curruns, - &sz, NULL, 0), expected, "Unexpected mallctl() result"); + gen_mallctl_str(cmd, "curslabs", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&curslabs, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); if (config_stats) { assert_u64_gt(nmalloc, 0, @@ -335,95 +298,48 @@ TEST_BEGIN(test_stats_arenas_bins) "nrequests should be greater than zero"); assert_zu_gt(curregs, 0, "allocated should be greater than zero"); - if (config_tcache) { + if (opt_tcache) { assert_u64_gt(nfills, 0, "At least one fill should have occurred"); assert_u64_gt(nflushes, 0, "At least one flush should have occurred"); } - assert_u64_gt(nruns, 0, - "At least one run should have been allocated"); - assert_zu_gt(curruns, 0, - "At least one run should be currently allocated"); - } - - dallocx(p, 0); -} -TEST_END - -TEST_BEGIN(test_stats_arenas_lruns) -{ - unsigned arena; - void *p; - uint64_t epoch, nmalloc, ndalloc, nrequests; - size_t curruns, sz; - int expected = config_stats ? 0 : ENOENT; - - arena = 0; - assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, - sizeof(arena)), 0, "Unexpected mallctl() failure"); - - p = mallocx(LARGE_MINCLASS, 0); - assert_ptr_not_null(p, "Unexpected mallocx() failure"); - - assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), - 0, "Unexpected mallctl() failure"); - - sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.lruns.0.nmalloc", (void *)&nmalloc, - &sz, NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.lruns.0.ndalloc", (void *)&ndalloc, - &sz, NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.lruns.0.nrequests", - (void *)&nrequests, &sz, NULL, 0), expected, - "Unexpected mallctl() result"); - sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.lruns.0.curruns", (void *)&curruns, - &sz, NULL, 0), expected, "Unexpected mallctl() result"); - - if (config_stats) { - assert_u64_gt(nmalloc, 0, - "nmalloc should be greater than zero"); - assert_u64_ge(nmalloc, ndalloc, - "nmalloc should be at least as large as ndalloc"); - assert_u64_gt(nrequests, 0, - "nrequests should be greater than zero"); - assert_u64_gt(curruns, 0, - "At least one run should be currently allocated"); + assert_u64_gt(nslabs, 0, + "At least one slab should have been allocated"); + assert_zu_gt(curslabs, 0, + "At least one slab should be currently allocated"); } dallocx(p, 0); } TEST_END -TEST_BEGIN(test_stats_arenas_hchunks) -{ - unsigned arena; +TEST_BEGIN(test_stats_arenas_lextents) { void *p; uint64_t epoch, nmalloc, ndalloc; - size_t curhchunks, sz; + size_t curlextents, sz, hsize; int expected = config_stats ? 0 : ENOENT; - arena = 0; - assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, - sizeof(arena)), 0, "Unexpected mallctl() failure"); + sz = sizeof(size_t); + assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&hsize, &sz, NULL, + 0), 0, "Unexpected mallctl() failure"); - p = mallocx(chunksize, 0); + p = mallocx(hsize, MALLOCX_ARENA(0)); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.hchunks.0.nmalloc", + assert_d_eq(mallctl("stats.arenas.0.lextents.0.nmalloc", (void *)&nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.hchunks.0.ndalloc", + assert_d_eq(mallctl("stats.arenas.0.lextents.0.ndalloc", (void *)&ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.hchunks.0.curhchunks", - (void *)&curhchunks, &sz, NULL, 0), expected, + assert_d_eq(mallctl("stats.arenas.0.lextents.0.curlextents", + (void *)&curlextents, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { @@ -431,8 +347,8 @@ TEST_BEGIN(test_stats_arenas_hchunks) "nmalloc should be greater than zero"); assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); - assert_u64_gt(curhchunks, 0, - "At least one chunk should be currently allocated"); + assert_u64_gt(curlextents, 0, + "At least one extent should be currently allocated"); } dallocx(p, 0); @@ -440,17 +356,13 @@ TEST_BEGIN(test_stats_arenas_hchunks) TEST_END int -main(void) -{ - - return (test( +main(void) { + return test_no_reentrancy( test_stats_summary, - test_stats_huge, + test_stats_large, test_stats_arenas_summary, test_stats_arenas_small, test_stats_arenas_large, - test_stats_arenas_huge, test_stats_arenas_bins, - test_stats_arenas_lruns, - test_stats_arenas_hchunks)); + test_stats_arenas_lextents); } diff --git a/test/unit/stats_print.c b/test/unit/stats_print.c index 4f412dc..acb26b0 100644 --- a/test/unit/stats_print.c +++ b/test/unit/stats_print.c @@ -1,5 +1,7 @@ #include "test/jemalloc_test.h" +#include "jemalloc/internal/util.h" + typedef enum { TOKEN_TYPE_NONE, TOKEN_TYPE_ERROR, @@ -39,8 +41,7 @@ struct parser_s { static void token_init(token_t *token, parser_t *parser, token_type_t token_type, - size_t pos, size_t len, size_t line, size_t col) -{ + size_t pos, size_t len, size_t line, size_t col) { token->parser = parser; token->token_type = token_type; token->pos = pos; @@ -50,8 +51,7 @@ token_init(token_t *token, parser_t *parser, token_type_t token_type, } static void -token_error(token_t *token) -{ +token_error(token_t *token) { if (!token->parser->verbose) { return; } @@ -67,16 +67,13 @@ token_error(token_t *token) token->col); break; } - { - UNUSED ssize_t err = write(STDERR_FILENO, - &token->parser->buf[token->pos], token->len); - } + UNUSED ssize_t err = write(STDERR_FILENO, + &token->parser->buf[token->pos], token->len); malloc_printf("\n"); } static void -parser_init(parser_t *parser, bool verbose) -{ +parser_init(parser_t *parser, bool verbose) { parser->verbose = verbose; parser->buf = NULL; parser->len = 0; @@ -86,16 +83,14 @@ parser_init(parser_t *parser, bool verbose) } static void -parser_fini(parser_t *parser) -{ +parser_fini(parser_t *parser) { if (parser->buf != NULL) { dallocx(parser->buf, MALLOCX_TCACHE_NONE); } } static bool -parser_append(parser_t *parser, const char *str) -{ +parser_append(parser_t *parser, const char *str) { size_t len = strlen(str); char *buf = (parser->buf == NULL) ? mallocx(len + 1, MALLOCX_TCACHE_NONE) : rallocx(parser->buf, parser->len + len + 1, @@ -110,8 +105,7 @@ parser_append(parser_t *parser, const char *str) } static bool -parser_tokenize(parser_t *parser) -{ +parser_tokenize(parser_t *parser) { enum { STATE_START, STATE_EOI, @@ -672,8 +666,7 @@ static bool parser_parse_array(parser_t *parser); static bool parser_parse_object(parser_t *parser); static bool -parser_parse_value(parser_t *parser) -{ +parser_parse_value(parser_t *parser) { switch (parser->token.token_type) { case TOKEN_TYPE_NULL: case TOKEN_TYPE_FALSE: @@ -692,8 +685,7 @@ parser_parse_value(parser_t *parser) } static bool -parser_parse_pair(parser_t *parser) -{ +parser_parse_pair(parser_t *parser) { assert_d_eq(parser->token.token_type, TOKEN_TYPE_STRING, "Pair should start with string"); if (parser_tokenize(parser)) { @@ -711,8 +703,7 @@ parser_parse_pair(parser_t *parser) } static bool -parser_parse_values(parser_t *parser) -{ +parser_parse_values(parser_t *parser) { if (parser_parse_value(parser)) { return true; } @@ -739,8 +730,7 @@ parser_parse_values(parser_t *parser) } static bool -parser_parse_array(parser_t *parser) -{ +parser_parse_array(parser_t *parser) { assert_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACKET, "Array should start with ["); if (parser_tokenize(parser)) { @@ -756,8 +746,7 @@ parser_parse_array(parser_t *parser) } static bool -parser_parse_pairs(parser_t *parser) -{ +parser_parse_pairs(parser_t *parser) { assert_d_eq(parser->token.token_type, TOKEN_TYPE_STRING, "Object should start with string"); if (parser_parse_pair(parser)) { @@ -792,8 +781,7 @@ parser_parse_pairs(parser_t *parser) } static bool -parser_parse_object(parser_t *parser) -{ +parser_parse_object(parser_t *parser) { assert_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACE, "Object should start with {"); if (parser_tokenize(parser)) { @@ -811,8 +799,7 @@ parser_parse_object(parser_t *parser) } static bool -parser_parse(parser_t *parser) -{ +parser_parse(parser_t *parser) { if (parser_tokenize(parser)) { goto label_error; } @@ -836,8 +823,7 @@ label_error: return true; } -TEST_BEGIN(test_json_parser) -{ +TEST_BEGIN(test_json_parser) { size_t i; const char *invalid_inputs[] = { /* Tokenizer error case tests. */ @@ -934,41 +920,40 @@ TEST_BEGIN(test_json_parser) TEST_END void -write_cb(void *opaque, const char *str) -{ +write_cb(void *opaque, const char *str) { parser_t *parser = (parser_t *)opaque; if (parser_append(parser, str)) { test_fail("Unexpected input appending failure"); } } -TEST_BEGIN(test_stats_print_json) -{ +TEST_BEGIN(test_stats_print_json) { const char *opts[] = { "J", "Jg", "Jm", + "Jd", + "Jmd", + "Jgd", "Jgm", + "Jgmd", "Ja", "Jb", - "Jab", "Jl", - "Jal", + "Jx", "Jbl", + "Jal", + "Jab", "Jabl", - "Jh", - "Jah", - "Jbh", - "Jabh", - "Jlh", - "Jalh", - "Jblh", - "Jablh", - "Jgmablh", + "Jax", + "Jbx", + "Jlx", + "Jablx", + "Jgmdablx", }; unsigned arena_ind, i; - for (i = 0; i < 2; i++) { + for (i = 0; i < 3; i++) { unsigned j; switch (i) { @@ -976,9 +961,19 @@ TEST_BEGIN(test_stats_print_json) break; case 1: { size_t sz = sizeof(arena_ind); - assert_d_eq(mallctl("arenas.extend", (void *)&arena_ind, + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), 0, "Unexpected mallctl failure"); break; + } case 2: { + size_t mib[3]; + size_t miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.destroy", + mib, &miblen), 0, + "Unexpected mallctlnametomib failure"); + mib[1] = arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, + 0), 0, "Unexpected mallctlbymib failure"); + break; } default: not_reached(); } @@ -997,9 +992,8 @@ TEST_BEGIN(test_stats_print_json) TEST_END int -main(void) -{ - return (test( +main(void) { + return test( test_json_parser, - test_stats_print_json)); + test_stats_print_json); } diff --git a/test/unit/ticker.c b/test/unit/ticker.c index e737020..e5790a3 100644 --- a/test/unit/ticker.c +++ b/test/unit/ticker.c @@ -1,9 +1,10 @@ #include "test/jemalloc_test.h" -TEST_BEGIN(test_ticker_tick) -{ -#define NREPS 2 -#define NTICKS 3 +#include "jemalloc/internal/ticker.h" + +TEST_BEGIN(test_ticker_tick) { +#define NREPS 2 +#define NTICKS 3 ticker_t ticker; int32_t i, j; @@ -26,9 +27,8 @@ TEST_BEGIN(test_ticker_tick) } TEST_END -TEST_BEGIN(test_ticker_ticks) -{ -#define NTICKS 3 +TEST_BEGIN(test_ticker_ticks) { +#define NTICKS 3 ticker_t ticker; ticker_init(&ticker, NTICKS); @@ -45,9 +45,8 @@ TEST_BEGIN(test_ticker_ticks) } TEST_END -TEST_BEGIN(test_ticker_copy) -{ -#define NTICKS 3 +TEST_BEGIN(test_ticker_copy) { +#define NTICKS 3 ticker_t ta, tb; ticker_init(&ta, NTICKS); @@ -66,11 +65,9 @@ TEST_BEGIN(test_ticker_copy) TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_ticker_tick, test_ticker_ticks, - test_ticker_copy)); + test_ticker_copy); } diff --git a/test/unit/tsd.c b/test/unit/tsd.c index d5f96ac..6c47913 100644 --- a/test/unit/tsd.c +++ b/test/unit/tsd.c @@ -1,38 +1,29 @@ #include "test/jemalloc_test.h" -#define THREAD_DATA 0x72b65c10 - -typedef unsigned int data_t; - -static bool data_cleanup_executed; - -malloc_tsd_types(data_, data_t) -malloc_tsd_protos(, data_, data_t) +static int data_cleanup_count; void -data_cleanup(void *arg) -{ - data_t *data = (data_t *)arg; - - if (!data_cleanup_executed) { - assert_x_eq(*data, THREAD_DATA, +data_cleanup(int *data) { + if (data_cleanup_count == 0) { + assert_x_eq(*data, MALLOC_TSD_TEST_DATA_INIT, "Argument passed into cleanup function should match tsd " "value"); } - data_cleanup_executed = true; + ++data_cleanup_count; /* * Allocate during cleanup for two rounds, in order to assure that * jemalloc's internal tsd reinitialization happens. */ + bool reincarnate = false; switch (*data) { - case THREAD_DATA: + case MALLOC_TSD_TEST_DATA_INIT: *data = 1; - data_tsd_set(data); + reincarnate = true; break; case 1: *data = 2; - data_tsd_set(data); + reincarnate = true; break; case 2: return; @@ -40,73 +31,109 @@ data_cleanup(void *arg) not_reached(); } - { + if (reincarnate) { void *p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpeced mallocx() failure"); dallocx(p, 0); } } -malloc_tsd_externs(data_, data_t) -#define DATA_INIT 0x12345678 -malloc_tsd_data(, data_, data_t, DATA_INIT) -malloc_tsd_funcs(, data_, data_t, DATA_INIT, data_cleanup) - static void * -thd_start(void *arg) -{ - data_t d = (data_t)(uintptr_t)arg; +thd_start(void *arg) { + int d = (int)(uintptr_t)arg; void *p; - assert_x_eq(*data_tsd_get(true), DATA_INIT, + tsd_t *tsd = tsd_fetch(); + assert_x_eq(tsd_test_data_get(tsd), MALLOC_TSD_TEST_DATA_INIT, "Initial tsd get should return initialization value"); p = malloc(1); assert_ptr_not_null(p, "Unexpected malloc() failure"); - data_tsd_set(&d); - assert_x_eq(*data_tsd_get(true), d, + tsd_test_data_set(tsd, d); + assert_x_eq(tsd_test_data_get(tsd), d, "After tsd set, tsd get should return value that was set"); d = 0; - assert_x_eq(*data_tsd_get(true), (data_t)(uintptr_t)arg, + assert_x_eq(tsd_test_data_get(tsd), (int)(uintptr_t)arg, "Resetting local data should have no effect on tsd"); + tsd_test_callback_set(tsd, &data_cleanup); + free(p); - return (NULL); + return NULL; } -TEST_BEGIN(test_tsd_main_thread) -{ - +TEST_BEGIN(test_tsd_main_thread) { thd_start((void *)(uintptr_t)0xa5f3e329); } TEST_END -TEST_BEGIN(test_tsd_sub_thread) -{ +TEST_BEGIN(test_tsd_sub_thread) { thd_t thd; - data_cleanup_executed = false; - thd_create(&thd, thd_start, (void *)THREAD_DATA); + data_cleanup_count = 0; + thd_create(&thd, thd_start, (void *)MALLOC_TSD_TEST_DATA_INIT); thd_join(thd, NULL); - assert_true(data_cleanup_executed, - "Cleanup function should have executed"); + /* + * We reincarnate twice in the data cleanup, so it should execute at + * least 3 times. + */ + assert_x_ge(data_cleanup_count, 3, + "Cleanup function should have executed multiple times."); } TEST_END -int -main(void) -{ +static void * +thd_start_reincarnated(void *arg) { + tsd_t *tsd = tsd_fetch(); + assert(tsd); + + void *p = malloc(1); + assert_ptr_not_null(p, "Unexpected malloc() failure"); + + /* Manually trigger reincarnation. */ + assert_ptr_not_null(tsd_arena_get(tsd), + "Should have tsd arena set."); + tsd_cleanup((void *)tsd); + assert_ptr_null(*tsd_arenap_get_unsafe(tsd), + "TSD arena should have been cleared."); + assert_u_eq(tsd->state, tsd_state_purgatory, + "TSD state should be purgatory\n"); - /* Core tsd bootstrapping must happen prior to data_tsd_boot(). */ + free(p); + assert_u_eq(tsd->state, tsd_state_reincarnated, + "TSD state should be reincarnated\n"); + p = mallocx(1, MALLOCX_TCACHE_NONE); + assert_ptr_not_null(p, "Unexpected malloc() failure"); + assert_ptr_null(*tsd_arenap_get_unsafe(tsd), + "Should not have tsd arena set after reincarnation."); + + free(p); + tsd_cleanup((void *)tsd); + assert_ptr_null(*tsd_arenap_get_unsafe(tsd), + "TSD arena should have been cleared after 2nd cleanup."); + + return NULL; +} + +TEST_BEGIN(test_tsd_reincarnation) { + thd_t thd; + thd_create(&thd, thd_start_reincarnated, NULL); + thd_join(thd, NULL); +} +TEST_END + +int +main(void) { + /* Ensure tsd bootstrapped. */ if (nallocx(1, 0) == 0) { malloc_printf("Initialization error"); - return (test_status_fail); + return test_status_fail; } - data_tsd_boot(); - return (test( + return test_no_reentrancy( test_tsd_main_thread, - test_tsd_sub_thread)); + test_tsd_sub_thread, + test_tsd_reincarnation); } diff --git a/test/unit/witness.c b/test/unit/witness.c index 8b99413..5986da4 100644 --- a/test/unit/witness.c +++ b/test/unit/witness.c @@ -12,23 +12,17 @@ static bool saw_depth_error; static void witness_lock_error_intercept(const witness_list_t *witnesses, - const witness_t *witness) -{ - + const witness_t *witness) { saw_lock_error = true; } static void -witness_owner_error_intercept(const witness_t *witness) -{ - +witness_owner_error_intercept(const witness_t *witness) { saw_owner_error = true; } static void -witness_not_owner_error_intercept(const witness_t *witness) -{ - +witness_not_owner_error_intercept(const witness_t *witness) { saw_not_owner_error = true; } @@ -39,125 +33,121 @@ witness_depth_error_intercept(const witness_list_t *witnesses, } static int -witness_comp(const witness_t *a, const witness_t *b) -{ - +witness_comp(const witness_t *a, void *oa, const witness_t *b, void *ob) { assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank"); - return (strcmp(a->name, b->name)); + assert(oa == (void *)a); + assert(ob == (void *)b); + + return strcmp(a->name, b->name); } static int -witness_comp_reverse(const witness_t *a, const witness_t *b) -{ - +witness_comp_reverse(const witness_t *a, void *oa, const witness_t *b, + void *ob) { assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank"); - return (-strcmp(a->name, b->name)); + assert(oa == (void *)a); + assert(ob == (void *)b); + + return -strcmp(a->name, b->name); } -TEST_BEGIN(test_witness) -{ +TEST_BEGIN(test_witness) { witness_t a, b; - tsdn_t *tsdn; + witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; test_skip_if(!config_debug); - tsdn = tsdn_fetch(); - - witness_assert_lockless(tsdn); - witness_assert_depth(tsdn, 0); - witness_assert_depth_to_rank(tsdn, (witness_rank_t)1U, 0); - - witness_init(&a, "a", 1, NULL); - witness_assert_not_owner(tsdn, &a); - witness_lock(tsdn, &a); - witness_assert_owner(tsdn, &a); - witness_assert_depth(tsdn, 1); - witness_assert_depth_to_rank(tsdn, (witness_rank_t)1U, 1); - witness_assert_depth_to_rank(tsdn, (witness_rank_t)2U, 0); - - witness_init(&b, "b", 2, NULL); - witness_assert_not_owner(tsdn, &b); - witness_lock(tsdn, &b); - witness_assert_owner(tsdn, &b); - witness_assert_depth(tsdn, 2); - witness_assert_depth_to_rank(tsdn, (witness_rank_t)1U, 2); - witness_assert_depth_to_rank(tsdn, (witness_rank_t)2U, 1); - witness_assert_depth_to_rank(tsdn, (witness_rank_t)3U, 0); - - witness_unlock(tsdn, &a); - witness_assert_depth(tsdn, 1); - witness_assert_depth_to_rank(tsdn, (witness_rank_t)1U, 1); - witness_assert_depth_to_rank(tsdn, (witness_rank_t)2U, 1); - witness_assert_depth_to_rank(tsdn, (witness_rank_t)3U, 0); - witness_unlock(tsdn, &b); - - witness_assert_lockless(tsdn); - witness_assert_depth(tsdn, 0); - witness_assert_depth_to_rank(tsdn, (witness_rank_t)1U, 0); + witness_assert_lockless(&witness_tsdn); + witness_assert_depth(&witness_tsdn, 0); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 0); + + witness_init(&a, "a", 1, NULL, NULL); + witness_assert_not_owner(&witness_tsdn, &a); + witness_lock(&witness_tsdn, &a); + witness_assert_owner(&witness_tsdn, &a); + witness_assert_depth(&witness_tsdn, 1); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 1); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)2U, 0); + + witness_init(&b, "b", 2, NULL, NULL); + witness_assert_not_owner(&witness_tsdn, &b); + witness_lock(&witness_tsdn, &b); + witness_assert_owner(&witness_tsdn, &b); + witness_assert_depth(&witness_tsdn, 2); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 2); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)2U, 1); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)3U, 0); + + witness_unlock(&witness_tsdn, &a); + witness_assert_depth(&witness_tsdn, 1); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 1); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)2U, 1); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)3U, 0); + witness_unlock(&witness_tsdn, &b); + + witness_assert_lockless(&witness_tsdn); + witness_assert_depth(&witness_tsdn, 0); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 0); } TEST_END -TEST_BEGIN(test_witness_comp) -{ +TEST_BEGIN(test_witness_comp) { witness_t a, b, c, d; - tsdn_t *tsdn; + witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; test_skip_if(!config_debug); - tsdn = tsdn_fetch(); + witness_assert_lockless(&witness_tsdn); - witness_assert_lockless(tsdn); + witness_init(&a, "a", 1, witness_comp, &a); + witness_assert_not_owner(&witness_tsdn, &a); + witness_lock(&witness_tsdn, &a); + witness_assert_owner(&witness_tsdn, &a); + witness_assert_depth(&witness_tsdn, 1); - witness_init(&a, "a", 1, witness_comp); - witness_assert_not_owner(tsdn, &a); - witness_lock(tsdn, &a); - witness_assert_owner(tsdn, &a); - witness_assert_depth(tsdn, 1); - - witness_init(&b, "b", 1, witness_comp); - witness_assert_not_owner(tsdn, &b); - witness_lock(tsdn, &b); - witness_assert_owner(tsdn, &b); - witness_assert_depth(tsdn, 2); - witness_unlock(tsdn, &b); - witness_assert_depth(tsdn, 1); + witness_init(&b, "b", 1, witness_comp, &b); + witness_assert_not_owner(&witness_tsdn, &b); + witness_lock(&witness_tsdn, &b); + witness_assert_owner(&witness_tsdn, &b); + witness_assert_depth(&witness_tsdn, 2); + witness_unlock(&witness_tsdn, &b); + witness_assert_depth(&witness_tsdn, 1); witness_lock_error_orig = witness_lock_error; witness_lock_error = witness_lock_error_intercept; saw_lock_error = false; - witness_init(&c, "c", 1, witness_comp_reverse); - witness_assert_not_owner(tsdn, &c); + witness_init(&c, "c", 1, witness_comp_reverse, &c); + witness_assert_not_owner(&witness_tsdn, &c); assert_false(saw_lock_error, "Unexpected witness lock error"); - witness_lock(tsdn, &c); + witness_lock(&witness_tsdn, &c); assert_true(saw_lock_error, "Expected witness lock error"); - witness_unlock(tsdn, &c); - witness_assert_depth(tsdn, 1); + witness_unlock(&witness_tsdn, &c); + witness_assert_depth(&witness_tsdn, 1); saw_lock_error = false; - witness_init(&d, "d", 1, NULL); - witness_assert_not_owner(tsdn, &d); + witness_init(&d, "d", 1, NULL, NULL); + witness_assert_not_owner(&witness_tsdn, &d); assert_false(saw_lock_error, "Unexpected witness lock error"); - witness_lock(tsdn, &d); + witness_lock(&witness_tsdn, &d); assert_true(saw_lock_error, "Expected witness lock error"); - witness_unlock(tsdn, &d); - witness_assert_depth(tsdn, 1); + witness_unlock(&witness_tsdn, &d); + witness_assert_depth(&witness_tsdn, 1); - witness_unlock(tsdn, &a); + witness_unlock(&witness_tsdn, &a); - witness_assert_lockless(tsdn); + witness_assert_lockless(&witness_tsdn); witness_lock_error = witness_lock_error_orig; } TEST_END -TEST_BEGIN(test_witness_reversal) -{ +TEST_BEGIN(test_witness_reversal) { witness_t a, b; - tsdn_t *tsdn; + witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; test_skip_if(!config_debug); @@ -165,33 +155,30 @@ TEST_BEGIN(test_witness_reversal) witness_lock_error = witness_lock_error_intercept; saw_lock_error = false; - tsdn = tsdn_fetch(); - - witness_assert_lockless(tsdn); + witness_assert_lockless(&witness_tsdn); - witness_init(&a, "a", 1, NULL); - witness_init(&b, "b", 2, NULL); + witness_init(&a, "a", 1, NULL, NULL); + witness_init(&b, "b", 2, NULL, NULL); - witness_lock(tsdn, &b); - witness_assert_depth(tsdn, 1); + witness_lock(&witness_tsdn, &b); + witness_assert_depth(&witness_tsdn, 1); assert_false(saw_lock_error, "Unexpected witness lock error"); - witness_lock(tsdn, &a); + witness_lock(&witness_tsdn, &a); assert_true(saw_lock_error, "Expected witness lock error"); - witness_unlock(tsdn, &a); - witness_assert_depth(tsdn, 1); - witness_unlock(tsdn, &b); + witness_unlock(&witness_tsdn, &a); + witness_assert_depth(&witness_tsdn, 1); + witness_unlock(&witness_tsdn, &b); - witness_assert_lockless(tsdn); + witness_assert_lockless(&witness_tsdn); witness_lock_error = witness_lock_error_orig; } TEST_END -TEST_BEGIN(test_witness_recursive) -{ +TEST_BEGIN(test_witness_recursive) { witness_t a; - tsdn_t *tsdn; + witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; test_skip_if(!config_debug); @@ -203,22 +190,20 @@ TEST_BEGIN(test_witness_recursive) witness_lock_error = witness_lock_error_intercept; saw_lock_error = false; - tsdn = tsdn_fetch(); - - witness_assert_lockless(tsdn); + witness_assert_lockless(&witness_tsdn); - witness_init(&a, "a", 1, NULL); + witness_init(&a, "a", 1, NULL, NULL); - witness_lock(tsdn, &a); + witness_lock(&witness_tsdn, &a); assert_false(saw_lock_error, "Unexpected witness lock error"); assert_false(saw_not_owner_error, "Unexpected witness not owner error"); - witness_lock(tsdn, &a); + witness_lock(&witness_tsdn, &a); assert_true(saw_lock_error, "Expected witness lock error"); assert_true(saw_not_owner_error, "Expected witness not owner error"); - witness_unlock(tsdn, &a); + witness_unlock(&witness_tsdn, &a); - witness_assert_lockless(tsdn); + witness_assert_lockless(&witness_tsdn); witness_owner_error = witness_owner_error_orig; witness_lock_error = witness_lock_error_orig; @@ -226,10 +211,9 @@ TEST_BEGIN(test_witness_recursive) } TEST_END -TEST_BEGIN(test_witness_unlock_not_owned) -{ +TEST_BEGIN(test_witness_unlock_not_owned) { witness_t a; - tsdn_t *tsdn; + witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; test_skip_if(!config_debug); @@ -237,17 +221,15 @@ TEST_BEGIN(test_witness_unlock_not_owned) witness_owner_error = witness_owner_error_intercept; saw_owner_error = false; - tsdn = tsdn_fetch(); + witness_assert_lockless(&witness_tsdn); - witness_assert_lockless(tsdn); - - witness_init(&a, "a", 1, NULL); + witness_init(&a, "a", 1, NULL, NULL); assert_false(saw_owner_error, "Unexpected owner error"); - witness_unlock(tsdn, &a); + witness_unlock(&witness_tsdn, &a); assert_true(saw_owner_error, "Expected owner error"); - witness_assert_lockless(tsdn); + witness_assert_lockless(&witness_tsdn); witness_owner_error = witness_owner_error_orig; } @@ -255,7 +237,7 @@ TEST_END TEST_BEGIN(test_witness_depth) { witness_t a; - tsdn_t *tsdn; + witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; test_skip_if(!config_debug); @@ -263,35 +245,31 @@ TEST_BEGIN(test_witness_depth) { witness_depth_error = witness_depth_error_intercept; saw_depth_error = false; - tsdn = tsdn_fetch(); - - witness_assert_lockless(tsdn); - witness_assert_depth(tsdn, 0); + witness_assert_lockless(&witness_tsdn); + witness_assert_depth(&witness_tsdn, 0); - witness_init(&a, "a", 1, NULL); + witness_init(&a, "a", 1, NULL, NULL); assert_false(saw_depth_error, "Unexpected depth error"); - witness_assert_lockless(tsdn); - witness_assert_depth(tsdn, 0); + witness_assert_lockless(&witness_tsdn); + witness_assert_depth(&witness_tsdn, 0); - witness_lock(tsdn, &a); - witness_assert_lockless(tsdn); - witness_assert_depth(tsdn, 0); + witness_lock(&witness_tsdn, &a); + witness_assert_lockless(&witness_tsdn); + witness_assert_depth(&witness_tsdn, 0); assert_true(saw_depth_error, "Expected depth error"); - witness_unlock(tsdn, &a); + witness_unlock(&witness_tsdn, &a); - witness_assert_lockless(tsdn); - witness_assert_depth(tsdn, 0); + witness_assert_lockless(&witness_tsdn); + witness_assert_depth(&witness_tsdn, 0); witness_depth_error = witness_depth_error_orig; } TEST_END int -main(void) -{ - +main(void) { return test( test_witness, test_witness_comp, diff --git a/test/unit/zero.c b/test/unit/zero.c index 573993a..553692b 100644 --- a/test/unit/zero.c +++ b/test/unit/zero.c @@ -1,11 +1,10 @@ #include "test/jemalloc_test.h" static void -test_zero(size_t sz_min, size_t sz_max) -{ +test_zero(size_t sz_min, size_t sz_max) { uint8_t *s; size_t sz_prev, sz, i; -#define MAGIC ((uint8_t)0x61) +#define MAGIC ((uint8_t)0x61) sz_prev = 0; s = (uint8_t *)mallocx(sz_min, 0); @@ -40,36 +39,21 @@ test_zero(size_t sz_min, size_t sz_max) #undef MAGIC } -TEST_BEGIN(test_zero_small) -{ - +TEST_BEGIN(test_zero_small) { test_skip_if(!config_fill); test_zero(1, SMALL_MAXCLASS-1); } TEST_END -TEST_BEGIN(test_zero_large) -{ - - test_skip_if(!config_fill); - test_zero(SMALL_MAXCLASS+1, large_maxclass); -} -TEST_END - -TEST_BEGIN(test_zero_huge) -{ - +TEST_BEGIN(test_zero_large) { test_skip_if(!config_fill); - test_zero(large_maxclass+1, chunksize*2); + test_zero(SMALL_MAXCLASS+1, (1U << (LG_LARGE_MINCLASS+1))); } TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_zero_small, - test_zero_large, - test_zero_huge)); + test_zero_large); } diff --git a/test/unit/zero.sh b/test/unit/zero.sh index 24488f0..b4540b2 100644 --- a/test/unit/zero.sh +++ b/test/unit/zero.sh @@ -1,5 +1,5 @@ #!/bin/sh if [ "x${enable_fill}" = "x1" ] ; then - export MALLOC_CONF="abort:false,junk:false,zero:true,redzone:false,quarantine:0" + export MALLOC_CONF="abort:false,junk:false,zero:true" fi |
