summaryrefslogtreecommitdiffstats
path: root/src/extent.c
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2016-10-03 21:18:55 (GMT)
committerJason Evans <jasone@canonware.com>2016-10-03 21:18:55 (GMT)
commit871a9498e13572f99451ed88db36cda6c9fecf8f (patch)
tree514e2c0b70626c36da003c2738927a07454de87d /src/extent.c
parentd51139c33c180a59dcee0c3880b8261f075139b3 (diff)
downloadjemalloc-871a9498e13572f99451ed88db36cda6c9fecf8f.zip
jemalloc-871a9498e13572f99451ed88db36cda6c9fecf8f.tar.gz
jemalloc-871a9498e13572f99451ed88db36cda6c9fecf8f.tar.bz2
Fix size class overflow bugs.
Avoid calling s2u() on raw extent sizes in extent_recycle(). Clamp psz2ind() (implemented as psz2ind_clamp()) when inserting/removing into/from size-segregated extent heaps.
Diffstat (limited to 'src/extent.c')
-rw-r--r--src/extent.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/src/extent.c b/src/extent.c
index f88c424..63516c6 100644
--- a/src/extent.c
+++ b/src/extent.c
@@ -195,7 +195,7 @@ extent_heaps_insert(tsdn_t *tsdn, extent_heap_t extent_heaps[NPSIZES],
extent_t *extent)
{
size_t psz = extent_size_quantize_floor(extent_size_get(extent));
- pszind_t pind = psz2ind(psz);
+ pszind_t pind = psz2ind_clamp(psz);
malloc_mutex_assert_owner(tsdn, &extent_arena_get(extent)->extents_mtx);
@@ -207,7 +207,7 @@ extent_heaps_remove(tsdn_t *tsdn, extent_heap_t extent_heaps[NPSIZES],
extent_t *extent)
{
size_t psz = extent_size_quantize_floor(extent_size_get(extent));
- pszind_t pind = psz2ind(psz);
+ pszind_t pind = psz2ind_clamp(psz);
malloc_mutex_assert_owner(tsdn, &extent_arena_get(extent)->extents_mtx);
@@ -364,6 +364,7 @@ extent_first_best_fit(tsdn_t *tsdn, arena_t *arena,
malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
pind = psz2ind(extent_size_quantize_ceil(size));
+ assert(pind < NPSIZES);
for (i = pind; i < NPSIZES; i++) {
extent_t *extent = extent_heap_first(&extent_heaps[i]);
if (extent != NULL)
@@ -419,13 +420,16 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
* course cannot be recycled).
*/
assert(PAGE_ADDR2BASE(new_addr) == new_addr);
+ assert(pad == 0);
+ assert(alignment <= PAGE);
prev = extent_lookup(tsdn, (void *)((uintptr_t)new_addr - PAGE),
false);
assert(prev == NULL || extent_past_get(prev) == new_addr);
}
size = usize + pad;
- alloc_size = s2u(size + PAGE_CEILING(alignment) - PAGE);
+ alloc_size = (new_addr != NULL) ? size : s2u(size +
+ PAGE_CEILING(alignment) - PAGE);
/* Beware size_t wrap-around. */
if (alloc_size < usize)
return (NULL);