From c2fc8c8b3afbd15ec3e8ed4ca38667ec0a01ade8 Mon Sep 17 00:00:00 2001 From: Jason Evans Date: Fri, 1 Oct 2010 18:02:43 -0700 Subject: Use offsetof() when sizing dynamic structures. Base dynamic structure size on offsetof(), rather than subtracting the size of the dynamic structure member. Results could differ on systems with strict data structure alignment requirements. --- jemalloc/src/jemalloc.c | 4 ++-- jemalloc/src/rtree.c | 7 ++++--- jemalloc/src/tcache.c | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/jemalloc/src/jemalloc.c b/jemalloc/src/jemalloc.c index f252f59..a79752e 100644 --- a/jemalloc/src/jemalloc.c +++ b/jemalloc/src/jemalloc.c @@ -101,8 +101,8 @@ arenas_extend(unsigned ind) arena_t *ret; /* Allocate enough space for trailing bins. */ - ret = (arena_t *)base_alloc(sizeof(arena_t) - + (sizeof(arena_bin_t) * (nbins - 1))); + ret = (arena_t *)base_alloc(offsetof(arena_t, bins) + + (sizeof(arena_bin_t) * nbins)); if (ret != NULL && arena_new(ret, ind) == false) { arenas[ind] = ret; return (ret); diff --git a/jemalloc/src/rtree.c b/jemalloc/src/rtree.c index a583751..7753743 100644 --- a/jemalloc/src/rtree.c +++ b/jemalloc/src/rtree.c @@ -13,11 +13,12 @@ rtree_new(unsigned bits) height++; assert(height * bits_per_level >= bits); - ret = (rtree_t*)base_alloc(sizeof(rtree_t) + (sizeof(unsigned) * - (height - 1))); + ret = (rtree_t*)base_alloc(offsetof(rtree_t, level2bits) + + (sizeof(unsigned) * height)); if (ret == NULL) return (NULL); - memset(ret, 0, sizeof(rtree_t) + (sizeof(unsigned) * (height - 1))); + memset(ret, 0, offsetof(rtree_t, level2bits) + (sizeof(unsigned) * + height)); malloc_mutex_init(&ret->mutex); ret->height = height; diff --git a/jemalloc/src/tcache.c b/jemalloc/src/tcache.c index ef69b1a..3fb8f2b 100644 --- a/jemalloc/src/tcache.c +++ b/jemalloc/src/tcache.c @@ -204,7 +204,7 @@ tcache_create(arena_t *arena) size_t size; unsigned i; - size = sizeof(tcache_t) + (sizeof(tcache_bin_t) * (nhbins - 1)); + size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins); /* * Round up to the nearest multiple of the cacheline size, in order to * avoid the possibility of false cacheline sharing. -- cgit v0.12