summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2016-03-28 01:51:12 (GMT)
committerJason Evans <jasone@canonware.com>2016-06-03 19:27:41 (GMT)
commitb2a9fae88652f39b80dc1d25fa842dae8166263d (patch)
treef29531736c42c7f5777c1477c66420f9e67bac03 /src
parente75e9be130910a7344f553e5e6c664047a0d0464 (diff)
downloadjemalloc-b2a9fae88652f39b80dc1d25fa842dae8166263d.zip
jemalloc-b2a9fae88652f39b80dc1d25fa842dae8166263d.tar.gz
jemalloc-b2a9fae88652f39b80dc1d25fa842dae8166263d.tar.bz2
Set/unset rtree node for last chunk of extents.
Set/unset rtree node for last chunk of extents, so that the rtree can be used for chunk coalescing.
Diffstat (limited to 'src')
-rw-r--r--src/chunk.c45
1 files changed, 41 insertions, 4 deletions
diff --git a/src/chunk.c b/src/chunk.c
index 4443368..beef41f 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -143,14 +143,35 @@ chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena,
bool
chunk_register(tsdn_t *tsdn, const void *chunk, const extent_t *extent)
{
+ size_t size;
+ rtree_elm_t *elm_a;
assert(extent_addr_get(extent) == chunk);
- if (rtree_write(tsdn, &chunks_rtree, (uintptr_t)chunk, extent))
+ size = extent_size_get(extent);
+
+ if ((elm_a = rtree_elm_acquire(tsdn, &chunks_rtree, (uintptr_t)chunk,
+ false, true)) == NULL)
return (true);
+ rtree_elm_write_acquired(tsdn, &chunks_rtree, elm_a, extent);
+ if (size > chunksize) {
+ uintptr_t last = ((uintptr_t)chunk +
+ (uintptr_t)(CHUNK_CEILING(size - chunksize)));
+ rtree_elm_t *elm_b;
+
+ if ((elm_b = rtree_elm_acquire(tsdn, &chunks_rtree, last, false,
+ true)) == NULL) {
+ rtree_elm_write_acquired(tsdn, &chunks_rtree, elm_a,
+ NULL);
+ rtree_elm_release(tsdn, &chunks_rtree, elm_a);
+ return (true);
+ }
+ rtree_elm_write_acquired(tsdn, &chunks_rtree, elm_b, extent);
+ rtree_elm_release(tsdn, &chunks_rtree, elm_b);
+ }
+ rtree_elm_release(tsdn, &chunks_rtree, elm_a);
if (config_prof && opt_prof) {
- size_t size = extent_size_get(extent);
size_t nadd = (size == 0) ? 1 : size / chunksize;
size_t cur = atomic_add_z(&curchunks, nadd);
size_t high = atomic_read_z(&highchunks);
@@ -171,10 +192,26 @@ chunk_register(tsdn_t *tsdn, const void *chunk, const extent_t *extent)
void
chunk_deregister(tsdn_t *tsdn, const void *chunk, const extent_t *extent)
{
+ size_t size;
+ rtree_elm_t *elm_a;
+
+ size = extent_size_get(extent);
+
+ elm_a = rtree_elm_acquire(tsdn, &chunks_rtree, (uintptr_t)chunk, true,
+ false);
+ rtree_elm_write_acquired(tsdn, &chunks_rtree, elm_a, NULL);
+ if (size > chunksize) {
+ uintptr_t last = ((uintptr_t)chunk +
+ (uintptr_t)(CHUNK_CEILING(size - chunksize)));
+ rtree_elm_t *elm_b = rtree_elm_acquire(tsdn, &chunks_rtree,
+ last, true, false);
+
+ rtree_elm_write_acquired(tsdn, &chunks_rtree, elm_b, NULL);
+ rtree_elm_release(tsdn, &chunks_rtree, elm_b);
+ }
+ rtree_elm_release(tsdn, &chunks_rtree, elm_a);
- rtree_clear(tsdn, &chunks_rtree, (uintptr_t)chunk);
if (config_prof && opt_prof) {
- size_t size = extent_size_get(extent);
size_t nsub = (size == 0) ? 1 : size / chunksize;
assert(atomic_read_z(&curchunks) >= nsub);
atomic_sub_z(&curchunks, nsub);