diff options
author | Jason Evans <jasone@canonware.com> | 2017-04-16 16:25:56 (GMT) |
---|---|---|
committer | Jason Evans <jasone@canonware.com> | 2017-04-19 02:01:04 (GMT) |
commit | 45f087eb033927338b9df847eb9be6886ef48cf7 (patch) | |
tree | 5d59a58674628b3d68e729df98945547a5917250 /src | |
parent | 38e847c1c594fb9ad4862233f3602ade85da4e7f (diff) | |
download | jemalloc-45f087eb033927338b9df847eb9be6886ef48cf7.zip jemalloc-45f087eb033927338b9df847eb9be6886ef48cf7.tar.gz jemalloc-45f087eb033927338b9df847eb9be6886ef48cf7.tar.bz2 |
Revert "Remove BITMAP_USE_TREE."
Some systems use a native 64 KiB page size, which means that the bitmap
for the smallest size class can be 8192 bits, not just 512 bits as when
the page size is 4 KiB. Linear search in bitmap_{sfu,ffu}() is
unacceptably slow for such large bitmaps.
This reverts commit 7c00f04ff40a34627e31488d02ff1081c749c7ba.
Diffstat (limited to 'src')
-rw-r--r-- | src/bitmap.c | 78 |
1 files changed, 78 insertions, 0 deletions
diff --git a/src/bitmap.c b/src/bitmap.c index 275636b..468b317 100644 --- a/src/bitmap.c +++ b/src/bitmap.c @@ -6,6 +6,82 @@ /******************************************************************************/ +#ifdef BITMAP_USE_TREE + +void +bitmap_info_init(bitmap_info_t *binfo, size_t nbits) { + unsigned i; + size_t group_count; + + assert(nbits > 0); + assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); + + /* + * Compute the number of groups necessary to store nbits bits, and + * progressively work upward through the levels until reaching a level + * that requires only one group. + */ + binfo->levels[0].group_offset = 0; + group_count = BITMAP_BITS2GROUPS(nbits); + for (i = 1; group_count > 1; i++) { + assert(i < BITMAP_MAX_LEVELS); + binfo->levels[i].group_offset = binfo->levels[i-1].group_offset + + group_count; + group_count = BITMAP_BITS2GROUPS(group_count); + } + binfo->levels[i].group_offset = binfo->levels[i-1].group_offset + + group_count; + assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX); + binfo->nlevels = i; + binfo->nbits = nbits; +} + +static size_t +bitmap_info_ngroups(const bitmap_info_t *binfo) { + return binfo->levels[binfo->nlevels].group_offset; +} + +void +bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) { + size_t extra; + unsigned i; + + /* + * Bits are actually inverted with regard to the external bitmap + * interface. + */ + + if (fill) { + /* The "filled" bitmap starts out with all 0 bits. */ + memset(bitmap, 0, bitmap_size(binfo)); + return; + } + + /* + * The "empty" bitmap starts out with all 1 bits, except for trailing + * unused bits (if any). Note that each group uses bit 0 to correspond + * to the first logical bit in the group, so extra bits are the most + * significant bits of the last group. + */ + memset(bitmap, 0xffU, bitmap_size(binfo)); + extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) + & BITMAP_GROUP_NBITS_MASK; + if (extra != 0) { + bitmap[binfo->levels[1].group_offset - 1] >>= extra; + } + for (i = 1; i < binfo->nlevels; i++) { + size_t group_count = binfo->levels[i].group_offset - + binfo->levels[i-1].group_offset; + extra = (BITMAP_GROUP_NBITS - (group_count & + BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; + if (extra != 0) { + bitmap[binfo->levels[i+1].group_offset - 1] >>= extra; + } + } +} + +#else /* BITMAP_USE_TREE */ + void bitmap_info_init(bitmap_info_t *binfo, size_t nbits) { assert(nbits > 0); @@ -37,6 +113,8 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) { } } +#endif /* BITMAP_USE_TREE */ + size_t bitmap_size(const bitmap_info_t *binfo) { return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP); |