1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
|
#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_A_H
#define JEMALLOC_INTERNAL_ARENA_INLINES_A_H
static inline unsigned
arena_ind_get(const arena_t *arena) {
return base_ind_get(arena->base);
}
static inline void
arena_internal_add(arena_t *arena, size_t size) {
atomic_fetch_add_zu(&arena->stats.internal, size, ATOMIC_RELAXED);
}
static inline void
arena_internal_sub(arena_t *arena, size_t size) {
atomic_fetch_sub_zu(&arena->stats.internal, size, ATOMIC_RELAXED);
}
static inline size_t
arena_internal_get(arena_t *arena) {
return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED);
}
static inline bool
arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) {
cassert(config_prof);
if (likely(prof_interval == 0)) {
return false;
}
return prof_accum_add(tsdn, &arena->prof_accum, accumbytes);
}
static inline void
percpu_arena_update(tsd_t *tsd, unsigned cpu) {
assert(have_percpu_arena);
arena_t *oldarena = tsd_arena_get(tsd);
assert(oldarena != NULL);
unsigned oldind = arena_ind_get(oldarena);
if (oldind != cpu) {
unsigned newind = cpu;
arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true);
assert(newarena != NULL);
/* Set new arena/tcache associations. */
arena_migrate(tsd, oldind, newind);
tcache_t *tcache = tcache_get(tsd);
if (tcache != NULL) {
tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
newarena);
}
}
}
#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */
|