summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorJason Evans <je@fb.com>2012-02-13 18:56:17 (GMT)
committerJason Evans <je@fb.com>2012-02-13 18:56:17 (GMT)
commit4162627757889ea999264c2ddbc3c354768774e2 (patch)
treec0a4528bb29b5eb1b305bad15c13b248429041e3 /src
parentfd56043c53f1cd1335ae6d1c0ee86cc0fbb9f12e (diff)
downloadjemalloc-4162627757889ea999264c2ddbc3c354768774e2.zip
jemalloc-4162627757889ea999264c2ddbc3c354768774e2.tar.gz
jemalloc-4162627757889ea999264c2ddbc3c354768774e2.tar.bz2
Remove the swap feature.
Remove the swap feature, which enabled per application swap files. In practice this feature has not proven itself useful to users.
Diffstat (limited to 'src')
-rw-r--r--src/arena.c5
-rw-r--r--src/chunk.c26
-rw-r--r--src/chunk_swap.c403
-rw-r--r--src/ctl.c111
-rw-r--r--src/huge.c10
-rw-r--r--src/jemalloc.c9
-rw-r--r--src/stats.c26
7 files changed, 19 insertions, 571 deletions
diff --git a/src/arena.c b/src/arena.c
index 4ada6a3..c2632d9 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -671,10 +671,11 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
* madvise(..., MADV_DONTNEED) results in zero-filled pages for anonymous
* mappings, but not for file-backed mappings.
*/
- (config_swap && swap_enabled) ? CHUNK_MAP_UNZEROED : 0;
+ 0
#else
- CHUNK_MAP_UNZEROED;
+ CHUNK_MAP_UNZEROED
#endif
+ ;
/*
* If chunk is the spare, temporarily re-allocate it, 1) so that its
diff --git a/src/chunk.c b/src/chunk.c
index 57ab20d..b908650 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -5,7 +5,6 @@
/* Data. */
size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
-bool opt_overcommit = true;
malloc_mutex_t chunks_mtx;
chunk_stats_t stats_chunks;
@@ -35,23 +34,15 @@ chunk_alloc(size_t size, bool base, bool *zero)
assert(size != 0);
assert((size & chunksize_mask) == 0);
- if (config_swap && swap_enabled) {
- ret = chunk_alloc_swap(size, zero);
+ if (config_dss) {
+ ret = chunk_alloc_dss(size, zero);
if (ret != NULL)
goto RETURN;
}
-
- if (swap_enabled == false || opt_overcommit) {
- if (config_dss) {
- ret = chunk_alloc_dss(size, zero);
- if (ret != NULL)
- goto RETURN;
- }
- ret = chunk_alloc_mmap(size);
- if (ret != NULL) {
- *zero = true;
- goto RETURN;
- }
+ ret = chunk_alloc_mmap(size);
+ if (ret != NULL) {
+ *zero = true;
+ goto RETURN;
}
/* All strategies for allocation failed. */
@@ -102,9 +93,6 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
}
if (unmap) {
- if (config_swap && swap_enabled && chunk_dealloc_swap(chunk,
- size) == false)
- return;
if (config_dss && chunk_dealloc_dss(chunk, size) == false)
return;
chunk_dealloc_mmap(chunk, size);
@@ -126,8 +114,6 @@ chunk_boot(void)
return (true);
memset(&stats_chunks, 0, sizeof(chunk_stats_t));
}
- if (config_swap && chunk_swap_boot())
- return (true);
if (chunk_mmap_boot())
return (true);
if (config_dss && chunk_dss_boot())
diff --git a/src/chunk_swap.c b/src/chunk_swap.c
deleted file mode 100644
index fe9ca30..0000000
--- a/src/chunk_swap.c
+++ /dev/null
@@ -1,403 +0,0 @@
-#define JEMALLOC_CHUNK_SWAP_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-/* Data. */
-
-malloc_mutex_t swap_mtx;
-bool swap_enabled;
-bool swap_prezeroed;
-size_t swap_nfds;
-int *swap_fds;
-size_t swap_avail;
-
-/* Base address of the mmap()ed file(s). */
-static void *swap_base;
-/* Current end of the space in use (<= swap_max). */
-static void *swap_end;
-/* Absolute upper limit on file-backed addresses. */
-static void *swap_max;
-
-/*
- * Trees of chunks that were previously allocated (trees differ only in node
- * ordering). These are used when allocating chunks, in an attempt to re-use
- * address space. Depending on function, different tree orderings are needed,
- * which is why there are two trees with the same contents.
- */
-static extent_tree_t swap_chunks_szad;
-static extent_tree_t swap_chunks_ad;
-
-/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static void *chunk_recycle_swap(size_t size, bool *zero);
-static extent_node_t *chunk_dealloc_swap_record(void *chunk, size_t size);
-
-/******************************************************************************/
-
-static void *
-chunk_recycle_swap(size_t size, bool *zero)
-{
- extent_node_t *node, key;
-
- cassert(config_swap);
-
- key.addr = NULL;
- key.size = size;
- malloc_mutex_lock(&swap_mtx);
- node = extent_tree_szad_nsearch(&swap_chunks_szad, &key);
- if (node != NULL) {
- void *ret = node->addr;
-
- /* Remove node from the tree. */
- extent_tree_szad_remove(&swap_chunks_szad, node);
- if (node->size == size) {
- extent_tree_ad_remove(&swap_chunks_ad, node);
- base_node_dealloc(node);
- } else {
- /*
- * Insert the remainder of node's address range as a
- * smaller chunk. Its position within swap_chunks_ad
- * does not change.
- */
- assert(node->size > size);
- node->addr = (void *)((uintptr_t)node->addr + size);
- node->size -= size;
- extent_tree_szad_insert(&swap_chunks_szad, node);
- }
- if (config_stats)
- swap_avail -= size;
- malloc_mutex_unlock(&swap_mtx);
-
- if (*zero)
- memset(ret, 0, size);
- return (ret);
- }
- malloc_mutex_unlock(&swap_mtx);
-
- return (NULL);
-}
-
-void *
-chunk_alloc_swap(size_t size, bool *zero)
-{
- void *ret;
-
- cassert(config_swap);
- assert(swap_enabled);
-
- ret = chunk_recycle_swap(size, zero);
- if (ret != NULL)
- return (ret);
-
- malloc_mutex_lock(&swap_mtx);
- if ((uintptr_t)swap_end + size <= (uintptr_t)swap_max) {
- ret = swap_end;
- swap_end = (void *)((uintptr_t)swap_end + size);
- if (config_stats)
- swap_avail -= size;
- malloc_mutex_unlock(&swap_mtx);
-
- if (swap_prezeroed)
- *zero = true;
- else if (*zero)
- memset(ret, 0, size);
- } else {
- malloc_mutex_unlock(&swap_mtx);
- return (NULL);
- }
-
- return (ret);
-}
-
-static extent_node_t *
-chunk_dealloc_swap_record(void *chunk, size_t size)
-{
- extent_node_t *xnode, *node, *prev, key;
-
- cassert(config_swap);
-
- xnode = NULL;
- while (true) {
- key.addr = (void *)((uintptr_t)chunk + size);
- node = extent_tree_ad_nsearch(&swap_chunks_ad, &key);
- /* Try to coalesce forward. */
- if (node != NULL && node->addr == key.addr) {
- /*
- * Coalesce chunk with the following address range.
- * This does not change the position within
- * swap_chunks_ad, so only remove/insert from/into
- * swap_chunks_szad.
- */
- extent_tree_szad_remove(&swap_chunks_szad, node);
- node->addr = chunk;
- node->size += size;
- extent_tree_szad_insert(&swap_chunks_szad, node);
- break;
- } else if (xnode == NULL) {
- /*
- * It is possible that base_node_alloc() will cause a
- * new base chunk to be allocated, so take care not to
- * deadlock on swap_mtx, and recover if another thread
- * deallocates an adjacent chunk while this one is busy
- * allocating xnode.
- */
- malloc_mutex_unlock(&swap_mtx);
- xnode = base_node_alloc();
- malloc_mutex_lock(&swap_mtx);
- if (xnode == NULL)
- return (NULL);
- } else {
- /* Coalescing forward failed, so insert a new node. */
- node = xnode;
- xnode = NULL;
- node->addr = chunk;
- node->size = size;
- extent_tree_ad_insert(&swap_chunks_ad, node);
- extent_tree_szad_insert(&swap_chunks_szad, node);
- break;
- }
- }
- /* Discard xnode if it ended up unused do to a race. */
- if (xnode != NULL)
- base_node_dealloc(xnode);
-
- /* Try to coalesce backward. */
- prev = extent_tree_ad_prev(&swap_chunks_ad, node);
- if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
- chunk) {
- /*
- * Coalesce chunk with the previous address range. This does
- * not change the position within swap_chunks_ad, so only
- * remove/insert node from/into swap_chunks_szad.
- */
- extent_tree_szad_remove(&swap_chunks_szad, prev);
- extent_tree_ad_remove(&swap_chunks_ad, prev);
-
- extent_tree_szad_remove(&swap_chunks_szad, node);
- node->addr = prev->addr;
- node->size += prev->size;
- extent_tree_szad_insert(&swap_chunks_szad, node);
-
- base_node_dealloc(prev);
- }
-
- return (node);
-}
-
-bool
-chunk_in_swap(void *chunk)
-{
- bool ret;
-
- cassert(config_swap);
- assert(swap_enabled);
-
- malloc_mutex_lock(&swap_mtx);
- if ((uintptr_t)chunk >= (uintptr_t)swap_base
- && (uintptr_t)chunk < (uintptr_t)swap_max)
- ret = true;
- else
- ret = false;
- malloc_mutex_unlock(&swap_mtx);
-
- return (ret);
-}
-
-bool
-chunk_dealloc_swap(void *chunk, size_t size)
-{
- bool ret;
-
- cassert(config_swap);
- assert(swap_enabled);
-
- malloc_mutex_lock(&swap_mtx);
- if ((uintptr_t)chunk >= (uintptr_t)swap_base
- && (uintptr_t)chunk < (uintptr_t)swap_max) {
- extent_node_t *node;
-
- /* Try to coalesce with other unused chunks. */
- node = chunk_dealloc_swap_record(chunk, size);
- if (node != NULL) {
- chunk = node->addr;
- size = node->size;
- }
-
- /*
- * Try to shrink the in-use memory if this chunk is at the end
- * of the in-use memory.
- */
- if ((void *)((uintptr_t)chunk + size) == swap_end) {
- swap_end = (void *)((uintptr_t)swap_end - size);
-
- if (node != NULL) {
- extent_tree_szad_remove(&swap_chunks_szad,
- node);
- extent_tree_ad_remove(&swap_chunks_ad, node);
- base_node_dealloc(node);
- }
- } else
- madvise(chunk, size, MADV_DONTNEED);
-
- if (config_stats)
- swap_avail += size;
- ret = false;
- goto RETURN;
- }
-
- ret = true;
-RETURN:
- malloc_mutex_unlock(&swap_mtx);
- return (ret);
-}
-
-bool
-chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed)
-{
- bool ret;
- unsigned i;
- off_t off;
- void *vaddr;
- size_t cumsize, voff;
- size_t sizes[nfds];
-
- cassert(config_swap);
-
- malloc_mutex_lock(&swap_mtx);
-
- /* Get file sizes. */
- for (i = 0, cumsize = 0; i < nfds; i++) {
- off = lseek(fds[i], 0, SEEK_END);
- if (off == ((off_t)-1)) {
- ret = true;
- goto RETURN;
- }
- if (PAGE_CEILING(off) != off) {
- /* Truncate to a multiple of the page size. */
- off &= ~PAGE_MASK;
- if (ftruncate(fds[i], off) != 0) {
- ret = true;
- goto RETURN;
- }
- }
- sizes[i] = off;
- if (cumsize + off < cumsize) {
- /*
- * Cumulative file size is greater than the total
- * address space. Bail out while it's still obvious
- * what the problem is.
- */
- ret = true;
- goto RETURN;
- }
- cumsize += off;
- }
-
- /* Round down to a multiple of the chunk size. */
- cumsize &= ~chunksize_mask;
- if (cumsize == 0) {
- ret = true;
- goto RETURN;
- }
-
- /*
- * Allocate a chunk-aligned region of anonymous memory, which will
- * be the final location for the memory-mapped files.
- */
- vaddr = chunk_alloc_mmap_noreserve(cumsize);
- if (vaddr == NULL) {
- ret = true;
- goto RETURN;
- }
-
- /* Overlay the files onto the anonymous mapping. */
- for (i = 0, voff = 0; i < nfds; i++) {
- void *addr = mmap((void *)((uintptr_t)vaddr + voff), sizes[i],
- PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fds[i], 0);
- if (addr == MAP_FAILED) {
- char buf[BUFERROR_BUF];
-
-
- buferror(errno, buf, sizeof(buf));
- malloc_write(
- "<jemalloc>: Error in mmap(..., MAP_FIXED, ...): ");
- malloc_write(buf);
- malloc_write("\n");
- if (opt_abort)
- abort();
- if (munmap(vaddr, voff) == -1) {
- buferror(errno, buf, sizeof(buf));
- malloc_write("<jemalloc>: Error in munmap(): ");
- malloc_write(buf);
- malloc_write("\n");
- }
- ret = true;
- goto RETURN;
- }
- assert(addr == (void *)((uintptr_t)vaddr + voff));
-
- /*
- * Tell the kernel that the mapping will be accessed randomly,
- * and that it should not gratuitously sync pages to the
- * filesystem.
- */
-#ifdef MADV_RANDOM
- madvise(addr, sizes[i], MADV_RANDOM);
-#endif
-#ifdef MADV_NOSYNC
- madvise(addr, sizes[i], MADV_NOSYNC);
-#endif
-
- voff += sizes[i];
- }
-
- swap_prezeroed = prezeroed;
- swap_base = vaddr;
- swap_end = swap_base;
- swap_max = (void *)((uintptr_t)vaddr + cumsize);
-
- /* Copy the fds array for mallctl purposes. */
- swap_fds = (int *)base_alloc(nfds * sizeof(int));
- if (swap_fds == NULL) {
- ret = true;
- goto RETURN;
- }
- memcpy(swap_fds, fds, nfds * sizeof(int));
- swap_nfds = nfds;
-
- if (config_stats)
- swap_avail = cumsize;
-
- swap_enabled = true;
-
- ret = false;
-RETURN:
- malloc_mutex_unlock(&swap_mtx);
- return (ret);
-}
-
-bool
-chunk_swap_boot(void)
-{
-
- cassert(config_swap);
-
- if (malloc_mutex_init(&swap_mtx))
- return (true);
-
- swap_enabled = false;
- swap_prezeroed = false; /* swap.* mallctl's depend on this. */
- swap_nfds = 0;
- swap_fds = NULL;
- if (config_stats)
- swap_avail = 0;
- swap_base = NULL;
- swap_end = NULL;
- swap_max = NULL;
-
- extent_tree_szad_new(&swap_chunks_szad);
- extent_tree_ad_new(&swap_chunks_ad);
-
- return (false);
-}
diff --git a/src/ctl.c b/src/ctl.c
index 05be431..2ac2f66 100644
--- a/src/ctl.c
+++ b/src/ctl.c
@@ -8,8 +8,6 @@
* ctl_mtx protects the following:
* - ctl_stats.*
* - opt_prof_active
- * - swap_enabled
- * - swap_prezeroed
*/
static malloc_mutex_t ctl_mtx;
static bool ctl_initialized;
@@ -56,7 +54,6 @@ CTL_PROTO(config_prof)
CTL_PROTO(config_prof_libgcc)
CTL_PROTO(config_prof_libunwind)
CTL_PROTO(config_stats)
-CTL_PROTO(config_swap)
CTL_PROTO(config_sysv)
CTL_PROTO(config_tcache)
CTL_PROTO(config_tiny)
@@ -85,7 +82,6 @@ CTL_PROTO(opt_prof_gdump)
CTL_PROTO(opt_prof_leak)
CTL_PROTO(opt_prof_accum)
CTL_PROTO(opt_lg_prof_tcmax)
-CTL_PROTO(opt_overcommit)
CTL_PROTO(arenas_bin_i_size)
CTL_PROTO(arenas_bin_i_nregs)
CTL_PROTO(arenas_bin_i_run_size)
@@ -162,10 +158,6 @@ CTL_PROTO(stats_cactive)
CTL_PROTO(stats_allocated)
CTL_PROTO(stats_active)
CTL_PROTO(stats_mapped)
-CTL_PROTO(swap_avail)
-CTL_PROTO(swap_prezeroed)
-CTL_PROTO(swap_nfds)
-CTL_PROTO(swap_fds)
/******************************************************************************/
/* mallctl tree. */
@@ -205,7 +197,6 @@ static const ctl_node_t config_node[] = {
{NAME("prof_libgcc"), CTL(config_prof_libgcc)},
{NAME("prof_libunwind"), CTL(config_prof_libunwind)},
{NAME("stats"), CTL(config_stats)},
- {NAME("swap"), CTL(config_swap)},
{NAME("sysv"), CTL(config_sysv)},
{NAME("tcache"), CTL(config_tcache)},
{NAME("tiny"), CTL(config_tiny)},
@@ -236,8 +227,7 @@ static const ctl_node_t opt_node[] = {
{NAME("prof_gdump"), CTL(opt_prof_gdump)},
{NAME("prof_leak"), CTL(opt_prof_leak)},
{NAME("prof_accum"), CTL(opt_prof_accum)},
- {NAME("lg_prof_tcmax"), CTL(opt_lg_prof_tcmax)},
- {NAME("overcommit"), CTL(opt_overcommit)}
+ {NAME("lg_prof_tcmax"), CTL(opt_lg_prof_tcmax)}
};
static const ctl_node_t arenas_bin_i_node[] = {
@@ -391,13 +381,6 @@ static const ctl_node_t stats_node[] = {
{NAME("arenas"), CHILD(stats_arenas)}
};
-static const ctl_node_t swap_node[] = {
- {NAME("avail"), CTL(swap_avail)},
- {NAME("prezeroed"), CTL(swap_prezeroed)},
- {NAME("nfds"), CTL(swap_nfds)},
- {NAME("fds"), CTL(swap_fds)}
-};
-
static const ctl_node_t root_node[] = {
{NAME("version"), CTL(version)},
{NAME("epoch"), CTL(epoch)},
@@ -408,8 +391,6 @@ static const ctl_node_t root_node[] = {
{NAME("arenas"), CHILD(arenas)},
{NAME("prof"), CHILD(prof)},
{NAME("stats"), CHILD(stats)}
- ,
- {NAME("swap"), CHILD(swap)}
};
static const ctl_node_t super_root_node[] = {
{NAME(""), CHILD(root)}
@@ -597,12 +578,6 @@ ctl_refresh(void)
ctl_stats.active = (ctl_stats.arenas[narenas].pactive <<
PAGE_SHIFT) + ctl_stats.huge.allocated;
ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
-
- if (config_swap) {
- malloc_mutex_lock(&swap_mtx);
- ctl_stats.swap_avail = swap_avail;
- malloc_mutex_unlock(&swap_mtx);
- }
}
ctl_epoch++;
@@ -1138,7 +1113,6 @@ CTL_RO_BOOL_CONFIG_GEN(config_prof)
CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
CTL_RO_BOOL_CONFIG_GEN(config_stats)
-CTL_RO_BOOL_CONFIG_GEN(config_swap)
CTL_RO_BOOL_CONFIG_GEN(config_sysv)
CTL_RO_BOOL_CONFIG_GEN(config_tcache)
CTL_RO_BOOL_CONFIG_GEN(config_tiny)
@@ -1171,7 +1145,6 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_tcmax, opt_lg_prof_tcmax, ssize_t)
-CTL_RO_NL_CGEN(config_swap, opt_overcommit, opt_overcommit, bool)
/******************************************************************************/
@@ -1450,85 +1423,3 @@ CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
-
-/******************************************************************************/
-
-CTL_RO_CGEN(config_swap && config_stats, swap_avail, ctl_stats.swap_avail,
- size_t)
-
-static int
-swap_prezeroed_ctl(const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
- int ret;
-
- if (config_swap == false)
- return (ENOENT);
-
- malloc_mutex_lock(&ctl_mtx);
- if (swap_enabled) {
- READONLY();
- } else {
- /*
- * swap_prezeroed isn't actually used by the swap code until it
- * is set during a successful chunk_swap_enabled() call. We
- * use it here to store the value that we'll pass to
- * chunk_swap_enable() in a swap.fds mallctl(). This is not
- * very clean, but the obvious alternatives are even worse.
- */
- WRITE(swap_prezeroed, bool);
- }
-
- READ(swap_prezeroed, bool);
-
- ret = 0;
-RETURN:
- malloc_mutex_unlock(&ctl_mtx);
- return (ret);
-}
-
-CTL_RO_CGEN(config_swap, swap_nfds, swap_nfds, size_t)
-
-static int
-swap_fds_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
-{
- int ret;
-
- if (config_swap == false)
- return (ENOENT);
-
- malloc_mutex_lock(&ctl_mtx);
- if (swap_enabled) {
- READONLY();
- } else if (newp != NULL) {
- size_t nfds = newlen / sizeof(int);
-
- {
- int fds[nfds];
-
- memcpy(fds, newp, nfds * sizeof(int));
- if (chunk_swap_enable(fds, nfds, swap_prezeroed)) {
- ret = EFAULT;
- goto RETURN;
- }
- }
- }
-
- if (oldp != NULL && oldlenp != NULL) {
- if (*oldlenp != swap_nfds * sizeof(int)) {
- size_t copylen = (swap_nfds * sizeof(int) <= *oldlenp)
- ? swap_nfds * sizeof(int) : *oldlenp;
-
- memcpy(oldp, swap_fds, copylen);
- ret = EINVAL;
- goto RETURN;
- } else
- memcpy(oldp, swap_fds, *oldlenp);
- }
-
- ret = 0;
-RETURN:
- malloc_mutex_unlock(&ctl_mtx);
- return (ret);
-}
diff --git a/src/huge.c b/src/huge.c
index 1eee436..f2fba86 100644
--- a/src/huge.c
+++ b/src/huge.c
@@ -212,13 +212,11 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
/*
* Use mremap(2) if this is a huge-->huge reallocation, and neither the
- * source nor the destination are in swap or dss.
+ * source nor the destination are in dss.
*/
#ifdef JEMALLOC_MREMAP_FIXED
- if (oldsize >= chunksize && (config_swap == false || swap_enabled ==
- false || (chunk_in_swap(ptr) == false && chunk_in_swap(ret) ==
- false)) && (config_dss == false || (chunk_in_dss(ptr) == false &&
- chunk_in_dss(ret) == false))) {
+ if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr)
+ == false && chunk_in_dss(ret) == false))) {
size_t newsize = huge_salloc(ret);
/*
@@ -280,7 +278,7 @@ huge_dalloc(void *ptr, bool unmap)
malloc_mutex_unlock(&huge_mtx);
- if (unmap && config_fill && (config_swap || config_dss) && opt_junk)
+ if (unmap && config_fill && config_dss && opt_junk)
memset(node->addr, 0x5a, node->size);
chunk_dealloc(node->addr, node->size, unmap);
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 9e1814d..a32ce1a 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -610,9 +610,6 @@ malloc_conf_init(void)
CONF_HANDLE_BOOL(prof_gdump)
CONF_HANDLE_BOOL(prof_leak)
}
- if (config_swap) {
- CONF_HANDLE_BOOL(overcommit)
- }
malloc_conf_error("Invalid conf pair", k, klen, v,
vlen);
#undef CONF_HANDLE_BOOL
@@ -1629,9 +1626,6 @@ jemalloc_prefork(void)
if (config_dss)
malloc_mutex_lock(&dss_mtx);
-
- if (config_swap)
- malloc_mutex_lock(&swap_mtx);
}
void
@@ -1641,9 +1635,6 @@ jemalloc_postfork(void)
/* Release all mutexes, now that fork() has completed. */
- if (config_swap)
- malloc_mutex_unlock(&swap_mtx);
-
if (config_dss)
malloc_mutex_unlock(&dss_mtx);
diff --git a/src/stats.c b/src/stats.c
index e644653..ad8cd13 100644
--- a/src/stats.c
+++ b/src/stats.c
@@ -525,7 +525,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
OPT_WRITE_SSIZE_T(lg_prof_interval)
OPT_WRITE_BOOL(prof_gdump)
OPT_WRITE_BOOL(prof_leak)
- OPT_WRITE_BOOL(overcommit)
#undef OPT_WRITE_BOOL
#undef OPT_WRITE_SIZE_T
@@ -668,11 +667,10 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
}
if (config_stats) {
- int err;
size_t sszp, ssz;
size_t *cactive;
size_t allocated, active, mapped;
- size_t chunks_current, chunks_high, swap_avail;
+ size_t chunks_current, chunks_high;
uint64_t chunks_total;
size_t huge_allocated;
uint64_t huge_nmalloc, huge_ndalloc;
@@ -694,24 +692,10 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_GET("stats.chunks.total", &chunks_total, uint64_t);
CTL_GET("stats.chunks.high", &chunks_high, size_t);
CTL_GET("stats.chunks.current", &chunks_current, size_t);
- if ((err = JEMALLOC_P(mallctl)("swap.avail", &swap_avail, &ssz,
- NULL, 0)) == 0) {
- size_t lg_chunk;
-
- malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
- "highchunks curchunks swap_avail\n");
- CTL_GET("opt.lg_chunk", &lg_chunk, size_t);
- malloc_cprintf(write_cb, cbopaque,
- " %13"PRIu64"%13zu%13zu%13zu\n",
- chunks_total, chunks_high, chunks_current,
- swap_avail << lg_chunk);
- } else {
- malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
- "highchunks curchunks\n");
- malloc_cprintf(write_cb, cbopaque,
- " %13"PRIu64"%13zu%13zu\n",
- chunks_total, chunks_high, chunks_current);
- }
+ malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
+ "highchunks curchunks\n");
+ malloc_cprintf(write_cb, cbopaque, " %13"PRIu64"%13zu%13zu\n",
+ chunks_total, chunks_high, chunks_current);
/* Print huge stats. */
CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t);