summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2014-10-11 01:19:20 (GMT)
committerJason Evans <jasone@canonware.com>2014-10-11 01:19:20 (GMT)
commit9b75677e538836b284a0d26a593963187c24a153 (patch)
tree524f6ae583f5367cf74bb472aa392ed3da589a5c
parentfc0b3b7383373d66cfed2cd4e2faa272a6868d32 (diff)
downloadjemalloc-9b75677e538836b284a0d26a593963187c24a153.zip
jemalloc-9b75677e538836b284a0d26a593963187c24a153.tar.gz
jemalloc-9b75677e538836b284a0d26a593963187c24a153.tar.bz2
Don't fetch tsd in a0{d,}alloc().
Don't fetch tsd in a0{d,}alloc(), because doing so can cause infinite recursion on systems that require an allocated tsd wrapper.
-rw-r--r--src/jemalloc.c18
-rw-r--r--test/unit/mq.c1
2 files changed, 8 insertions, 11 deletions
diff --git a/src/jemalloc.c b/src/jemalloc.c
index a862104..fc490eb 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -203,7 +203,6 @@ static void *
a0alloc(size_t size, bool zero)
{
void *ret;
- tsd_t *tsd;
if (unlikely(malloc_init()))
return (NULL);
@@ -211,11 +210,10 @@ a0alloc(size_t size, bool zero)
if (size == 0)
size = 1;
- tsd = tsd_fetch();
if (size <= arena_maxclass)
- ret = arena_malloc(tsd, a0get(), size, zero, false);
+ ret = arena_malloc(NULL, a0get(), size, zero, false);
else
- ret = huge_malloc(tsd, a0get(), size, zero, false);
+ ret = huge_malloc(NULL, a0get(), size, zero, false);
return (ret);
}
@@ -237,18 +235,16 @@ a0calloc(size_t num, size_t size)
void
a0free(void *ptr)
{
- tsd_t *tsd;
arena_chunk_t *chunk;
if (ptr == NULL)
return;
- tsd = tsd_fetch();
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr)
- arena_dalloc(tsd, chunk, ptr, false);
+ arena_dalloc(NULL, chunk, ptr, false);
else
- huge_dalloc(tsd, ptr, false);
+ huge_dalloc(NULL, ptr, false);
}
/* Create a new arena and insert it into the arenas array at index ind. */
@@ -2301,9 +2297,9 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
* fork/malloc races via the following functions it registers during
* initialization using pthread_atfork(), but of course that does no good if
* the allocator isn't fully initialized at fork time. The following library
- * constructor is a partial solution to this problem. It may still possible to
- * trigger the deadlock described above, but doing so would involve forking via
- * a library constructor that runs before jemalloc's runs.
+ * constructor is a partial solution to this problem. It may still be possible
+ * to trigger the deadlock described above, but doing so would involve forking
+ * via a library constructor that runs before jemalloc's runs.
*/
JEMALLOC_ATTR(constructor)
static void
diff --git a/test/unit/mq.c b/test/unit/mq.c
index bd289c5..bde2a48 100644
--- a/test/unit/mq.c
+++ b/test/unit/mq.c
@@ -85,6 +85,7 @@ TEST_END
int
main(void)
{
+
return (test(
test_mq_basic,
test_mq_threaded));