summaryrefslogtreecommitdiffstats
path: root/jemalloc/src/chunk.c
blob: e6e3bcd195a9aa1f08e9435dca6a38d3e407e194 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
#define	JEMALLOC_CHUNK_C_
#include "jemalloc/internal/jemalloc_internal.h"

/******************************************************************************/
/* Data. */

size_t	opt_lg_chunk = LG_CHUNK_DEFAULT;
#ifdef JEMALLOC_SWAP
bool	opt_overcommit = true;
#endif

#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
malloc_mutex_t	chunks_mtx;
chunk_stats_t	stats_chunks;
#endif

/* Various chunk-related settings. */
size_t		chunksize;
size_t		chunksize_mask; /* (chunksize - 1). */
size_t		chunk_npages;
size_t		arena_chunk_header_npages;
size_t		arena_maxclass; /* Max size class for arenas. */

/******************************************************************************/

/*
 * If the caller specifies (*zero == false), it is still possible to receive
 * zeroed memory, in which case *zero is toggled to true.  arena_chunk_alloc()
 * takes advantage of this to avoid demanding zeroed chunks, but taking
 * advantage of them if they are returned.
 */
void *
chunk_alloc(size_t size, bool *zero)
{
	void *ret;

	assert(size != 0);
	assert((size & chunksize_mask) == 0);

#ifdef JEMALLOC_SWAP
	if (swap_enabled) {
		ret = chunk_alloc_swap(size, zero);
		if (ret != NULL)
			goto RETURN;
	}

	if (swap_enabled == false || opt_overcommit) {
#endif
#ifdef JEMALLOC_DSS
		ret = chunk_alloc_dss(size, zero);
		if (ret != NULL)
			goto RETURN;
#endif
		ret = chunk_alloc_mmap(size);
		if (ret != NULL) {
			*zero = true;
			goto RETURN;
		}
#ifdef JEMALLOC_SWAP
	}
#endif

	/* All strategies for allocation failed. */
	ret = NULL;
RETURN:
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
	if (ret != NULL) {
#  ifdef JEMALLOC_PROF
		bool udump;
#  endif
		malloc_mutex_lock(&chunks_mtx);
#  ifdef JEMALLOC_STATS
		stats_chunks.nchunks += (size / chunksize);
#  endif
		stats_chunks.curchunks += (size / chunksize);
		if (stats_chunks.curchunks > stats_chunks.highchunks) {
			stats_chunks.highchunks = stats_chunks.curchunks;
#  ifdef JEMALLOC_PROF
			udump = true;
#  endif
		}
#  ifdef JEMALLOC_PROF
		else
			udump = false;
#  endif
		malloc_mutex_unlock(&chunks_mtx);
#  ifdef JEMALLOC_PROF
		if (opt_prof && opt_prof_udump && udump)
			prof_udump();
#  endif
	}
#endif

	assert(CHUNK_ADDR2BASE(ret) == ret);
	return (ret);
}

void
chunk_dealloc(void *chunk, size_t size)
{

	assert(chunk != NULL);
	assert(CHUNK_ADDR2BASE(chunk) == chunk);
	assert(size != 0);
	assert((size & chunksize_mask) == 0);

#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
	malloc_mutex_lock(&chunks_mtx);
	stats_chunks.curchunks -= (size / chunksize);
	malloc_mutex_unlock(&chunks_mtx);
#endif

#ifdef JEMALLOC_SWAP
	if (swap_enabled && chunk_dealloc_swap(chunk, size) == false)
		return;
#endif
#ifdef JEMALLOC_DSS
	if (chunk_dealloc_dss(chunk, size) == false)
		return;
#endif
	chunk_dealloc_mmap(chunk, size);
}

bool
chunk_boot(void)
{

	/* Set variables according to the value of opt_lg_chunk. */
	chunksize = (1LU << opt_lg_chunk);
	assert(chunksize >= PAGE_SIZE);
	chunksize_mask = chunksize - 1;
	chunk_npages = (chunksize >> PAGE_SHIFT);

#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
	if (malloc_mutex_init(&chunks_mtx))
		return (true);
	memset(&stats_chunks, 0, sizeof(chunk_stats_t));
#endif

#ifdef JEMALLOC_SWAP
	if (chunk_swap_boot())
		return (true);
#endif
#ifdef JEMALLOC_DSS
	if (chunk_dss_boot())
		return (true);
#endif

	return (false);
}