summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2015-01-26 01:31:24 (GMT)
committerJason Evans <jasone@canonware.com>2015-01-26 01:31:24 (GMT)
commit0fd663e9c5336089a98e8a2a0cf5419b534f045f (patch)
treef9f4b6e3f44c2b46840b01970aaf27d3bec0096b /src
parent77d597ebb23aa47a4a0112c294ad6a68857f450c (diff)
downloadjemalloc-0fd663e9c5336089a98e8a2a0cf5419b534f045f.zip
jemalloc-0fd663e9c5336089a98e8a2a0cf5419b534f045f.tar.gz
jemalloc-0fd663e9c5336089a98e8a2a0cf5419b534f045f.tar.bz2
Avoid pointless chunk_recycle() call.
Avoid calling chunk_recycle() for mmap()ed chunks if config_munmap is disabled, in which case there are never any recyclable chunks. This resolves #164.
Diffstat (limited to 'src')
-rw-r--r--src/chunk.c50
1 files changed, 29 insertions, 21 deletions
diff --git a/src/chunk.c b/src/chunk.c
index b9a2441..6d5f84f 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -132,6 +132,19 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad,
return (ret);
}
+static void *
+chunk_alloc_core_dss(void *new_addr, size_t size, size_t alignment, bool base,
+ bool *zero)
+{
+ void *ret;
+
+ if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss,
+ new_addr, size, alignment, base, zero)) != NULL)
+ return (ret);
+ ret = chunk_alloc_dss(new_addr, size, alignment, zero);
+ return (ret);
+}
+
/*
* If the caller specifies (!*zero), it is still possible to receive zeroed
* memory, in which case *zero is toggled to true. arena_chunk_alloc() takes
@@ -150,31 +163,26 @@ chunk_alloc_core(void *new_addr, size_t size, size_t alignment, bool base,
assert((alignment & chunksize_mask) == 0);
/* "primary" dss. */
- if (have_dss && dss_prec == dss_prec_primary) {
- if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss,
- new_addr, size, alignment, base, zero)) != NULL)
- return (ret);
- if ((ret = chunk_alloc_dss(new_addr, size, alignment, zero))
- != NULL)
- return (ret);
- }
+ if (have_dss && dss_prec == dss_prec_primary && (ret =
+ chunk_alloc_core_dss(new_addr, size, alignment, base, zero)) !=
+ NULL)
+ return (ret);
/* mmap. */
- if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, new_addr,
- size, alignment, base, zero)) != NULL)
+ if (!config_munmap && (ret = chunk_recycle(&chunks_szad_mmap,
+ &chunks_ad_mmap, new_addr, size, alignment, base, zero)) != NULL)
return (ret);
- /* Requesting an address not implemented for chunk_alloc_mmap(). */
- if (new_addr == NULL &&
- (ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
+ /*
+ * Requesting an address is not implemented for chunk_alloc_mmap(), so
+ * only call it if (new_addr == NULL).
+ */
+ if (new_addr == NULL && (ret = chunk_alloc_mmap(size, alignment, zero))
+ != NULL)
return (ret);
/* "secondary" dss. */
- if (have_dss && dss_prec == dss_prec_secondary) {
- if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss,
- new_addr, size, alignment, base, zero)) != NULL)
- return (ret);
- if ((ret = chunk_alloc_dss(new_addr, size, alignment, zero))
- != NULL)
- return (ret);
- }
+ if (have_dss && dss_prec == dss_prec_secondary && (ret =
+ chunk_alloc_core_dss(new_addr, size, alignment, base, zero)) !=
+ NULL)
+ return (ret);
/* All strategies for allocation failed. */
return (NULL);