summaryrefslogtreecommitdiffstats
path: root/include/jemalloc
diff options
context:
space:
mode:
authorDaniel Micay <danielmicay@gmail.com>2014-10-06 07:42:10 (GMT)
committerJason Evans <jasone@canonware.com>2014-10-08 06:57:09 (GMT)
commitf22214a29ddd3bed005cbcc8f2aff7c61ef4940b (patch)
tree13bcb71ddaeafbb5889fa4b1bd7f612fc4b04c9d /include/jemalloc
parent8bb3198f72fc7587dc93527f9f19fb5be52fa553 (diff)
downloadjemalloc-f22214a29ddd3bed005cbcc8f2aff7c61ef4940b.zip
jemalloc-f22214a29ddd3bed005cbcc8f2aff7c61ef4940b.tar.gz
jemalloc-f22214a29ddd3bed005cbcc8f2aff7c61ef4940b.tar.bz2
Use regular arena allocation for huge tree nodes.
This avoids grabbing the base mutex, as a step towards fine-grained locking for huge allocations. The thread cache also provides a tiny (~3%) improvement for serial huge allocations.
Diffstat (limited to 'include/jemalloc')
-rw-r--r--include/jemalloc/internal/huge.h2
-rw-r--r--include/jemalloc/internal/jemalloc_internal.h.in4
2 files changed, 3 insertions, 3 deletions
diff --git a/include/jemalloc/internal/huge.h b/include/jemalloc/internal/huge.h
index 939993f..5d4d3a1 100644
--- a/include/jemalloc/internal/huge.h
+++ b/include/jemalloc/internal/huge.h
@@ -21,7 +21,7 @@ void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
typedef void (huge_dalloc_junk_t)(void *, size_t);
extern huge_dalloc_junk_t *huge_dalloc_junk;
#endif
-void huge_dalloc(void *ptr);
+void huge_dalloc(tsd_t *tsd, void *ptr);
size_t huge_salloc(const void *ptr);
prof_tctx_t *huge_prof_tctx_get(const void *ptr);
void huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index c7a5fd8..f4d5de6 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -938,7 +938,7 @@ idalloct(tsd_t *tsd, void *ptr, bool try_tcache)
if (chunk != ptr)
arena_dalloc(tsd, chunk, ptr, try_tcache);
else
- huge_dalloc(ptr);
+ huge_dalloc(tsd, ptr);
}
JEMALLOC_ALWAYS_INLINE void
@@ -952,7 +952,7 @@ isdalloct(tsd_t *tsd, void *ptr, size_t size, bool try_tcache)
if (chunk != ptr)
arena_sdalloc(tsd, chunk, ptr, size, try_tcache);
else
- huge_dalloc(ptr);
+ huge_dalloc(tsd, ptr);
}
JEMALLOC_ALWAYS_INLINE void