summaryrefslogtreecommitdiffstats
path: root/include/jemalloc/internal
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2015-08-19 22:21:32 (GMT)
committerJason Evans <jasone@canonware.com>2015-08-19 22:21:32 (GMT)
commitd01fd19755bc0c2f5be3143349016dd0d7de7b36 (patch)
tree39b7471c99728c296f50b81e20c7aec9bacf2cbe /include/jemalloc/internal
parent5ef33a9f2b9f4fb56553529f7b31f4f5f57ce014 (diff)
downloadjemalloc-d01fd19755bc0c2f5be3143349016dd0d7de7b36.zip
jemalloc-d01fd19755bc0c2f5be3143349016dd0d7de7b36.tar.gz
jemalloc-d01fd19755bc0c2f5be3143349016dd0d7de7b36.tar.bz2
Rename index_t to szind_t to avoid an existing type on Solaris.
This resolves #256.
Diffstat (limited to 'include/jemalloc/internal')
-rw-r--r--include/jemalloc/internal/arena.h38
-rw-r--r--include/jemalloc/internal/jemalloc_internal.h.in28
-rw-r--r--include/jemalloc/internal/tcache.h18
3 files changed, 42 insertions, 42 deletions
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index 2347213..62a9a85 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -39,7 +39,7 @@ typedef struct arena_s arena_t;
#ifdef JEMALLOC_ARENA_STRUCTS_A
struct arena_run_s {
/* Index of bin this run is associated with. */
- index_t binind;
+ szind_t binind;
/* Number of free regions in run. */
unsigned nfree;
@@ -448,7 +448,7 @@ bool arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult);
void arena_maybe_purge(arena_t *arena);
void arena_purge_all(arena_t *arena);
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
- index_t binind, uint64_t prof_accumbytes);
+ szind_t binind, uint64_t prof_accumbytes);
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
bool zero);
#ifdef JEMALLOC_JET
@@ -524,7 +524,7 @@ size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
size_t pageind);
size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
-index_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
+szind_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind);
@@ -541,17 +541,17 @@ void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind,
void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
size_t size, size_t flags);
void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
- index_t binind);
+ szind_t binind);
void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
- size_t runind, index_t binind, size_t flags);
+ size_t runind, szind_t binind, size_t flags);
void arena_metadata_allocated_add(arena_t *arena, size_t size);
void arena_metadata_allocated_sub(arena_t *arena, size_t size);
size_t arena_metadata_allocated_get(arena_t *arena);
bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
bool arena_prof_accum(arena_t *arena, uint64_t accumbytes);
-index_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
-index_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
+szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
+szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
const void *ptr);
prof_tctx_t *arena_prof_tctx_get(const void *ptr);
@@ -701,11 +701,11 @@ arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
return (mapbits >> CHUNK_MAP_RUNIND_SHIFT);
}
-JEMALLOC_ALWAYS_INLINE index_t
+JEMALLOC_ALWAYS_INLINE szind_t
arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
- index_t binind;
+ szind_t binind;
mapbits = arena_mapbits_get(chunk, pageind);
binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
@@ -840,7 +840,7 @@ arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
- index_t binind)
+ szind_t binind)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
@@ -854,7 +854,7 @@ arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
- index_t binind, size_t flags)
+ szind_t binind, size_t flags)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
@@ -931,10 +931,10 @@ arena_prof_accum(arena_t *arena, uint64_t accumbytes)
}
}
-JEMALLOC_ALWAYS_INLINE index_t
+JEMALLOC_ALWAYS_INLINE szind_t
arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
{
- index_t binind;
+ szind_t binind;
binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
@@ -946,7 +946,7 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
size_t rpages_ind;
arena_run_t *run;
arena_bin_t *bin;
- index_t run_binind, actual_binind;
+ szind_t run_binind, actual_binind;
arena_bin_info_t *bin_info;
arena_chunk_map_misc_t *miscelm;
void *rpages;
@@ -980,10 +980,10 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
# endif /* JEMALLOC_ARENA_INLINE_A */
# ifdef JEMALLOC_ARENA_INLINE_B
-JEMALLOC_INLINE index_t
+JEMALLOC_INLINE szind_t
arena_bin_index(arena_t *arena, arena_bin_t *bin)
{
- index_t binind = bin - arena->bins;
+ szind_t binind = bin - arena->bins;
assert(binind < NBINS);
return (binind);
}
@@ -1161,7 +1161,7 @@ arena_salloc(const void *ptr, bool demote)
size_t ret;
arena_chunk_t *chunk;
size_t pageind;
- index_t binind;
+ szind_t binind;
assert(ptr != NULL);
@@ -1220,7 +1220,7 @@ arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) {
/* Small allocation. */
if (likely(tcache != NULL)) {
- index_t binind = arena_ptr_small_binind_get(ptr,
+ szind_t binind = arena_ptr_small_binind_get(ptr,
mapbits);
tcache_dalloc_small(tsd, tcache, ptr, binind);
} else {
@@ -1272,7 +1272,7 @@ arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
if (likely(size <= SMALL_MAXCLASS)) {
/* Small allocation. */
if (likely(tcache != NULL)) {
- index_t binind = size2index(size);
+ szind_t binind = size2index(size);
tcache_dalloc_small(tsd, tcache, ptr, binind);
} else {
size_t pageind = ((uintptr_t)ptr -
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index 7a137b6..f6e464e 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -184,7 +184,7 @@ static const bool config_cache_oblivious =
#include "jemalloc/internal/jemalloc_internal_macros.h"
/* Size class index type. */
-typedef unsigned index_t;
+typedef unsigned szind_t;
/*
* Flags bits:
@@ -511,12 +511,12 @@ void jemalloc_postfork_child(void);
#include "jemalloc/internal/huge.h"
#ifndef JEMALLOC_ENABLE_INLINE
-index_t size2index_compute(size_t size);
-index_t size2index_lookup(size_t size);
-index_t size2index(size_t size);
-size_t index2size_compute(index_t index);
-size_t index2size_lookup(index_t index);
-size_t index2size(index_t index);
+szind_t size2index_compute(size_t size);
+szind_t size2index_lookup(size_t size);
+szind_t size2index(size_t size);
+size_t index2size_compute(szind_t index);
+size_t index2size_lookup(szind_t index);
+size_t index2size(szind_t index);
size_t s2u_compute(size_t size);
size_t s2u_lookup(size_t size);
size_t s2u(size_t size);
@@ -527,7 +527,7 @@ arena_t *arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
-JEMALLOC_INLINE index_t
+JEMALLOC_INLINE szind_t
size2index_compute(size_t size)
{
@@ -558,7 +558,7 @@ size2index_compute(size_t size)
}
}
-JEMALLOC_ALWAYS_INLINE index_t
+JEMALLOC_ALWAYS_INLINE szind_t
size2index_lookup(size_t size)
{
@@ -571,7 +571,7 @@ size2index_lookup(size_t size)
}
}
-JEMALLOC_ALWAYS_INLINE index_t
+JEMALLOC_ALWAYS_INLINE szind_t
size2index(size_t size)
{
@@ -582,7 +582,7 @@ size2index(size_t size)
}
JEMALLOC_INLINE size_t
-index2size_compute(index_t index)
+index2size_compute(szind_t index)
{
#if (NTBINS > 0)
@@ -609,7 +609,7 @@ index2size_compute(index_t index)
}
JEMALLOC_ALWAYS_INLINE size_t
-index2size_lookup(index_t index)
+index2size_lookup(szind_t index)
{
size_t ret = (size_t)index2size_tab[index];
assert(ret == index2size_compute(index));
@@ -617,7 +617,7 @@ index2size_lookup(index_t index)
}
JEMALLOC_ALWAYS_INLINE size_t
-index2size(index_t index)
+index2size(szind_t index)
{
assert(index < NSIZES);
@@ -976,7 +976,7 @@ u2rz(size_t usize)
size_t ret;
if (usize <= SMALL_MAXCLASS) {
- index_t binind = size2index(usize);
+ szind_t binind = size2index(usize);
ret = arena_bin_info[binind].redzone_size;
} else
ret = 0;
diff --git a/include/jemalloc/internal/tcache.h b/include/jemalloc/internal/tcache.h
index 493f457..5079cd2 100644
--- a/include/jemalloc/internal/tcache.h
+++ b/include/jemalloc/internal/tcache.h
@@ -77,7 +77,7 @@ struct tcache_s {
ql_elm(tcache_t) link; /* Used for aggregating stats. */
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */
unsigned ev_cnt; /* Event count since incremental GC. */
- index_t next_gc_bin; /* Next bin to GC. */
+ szind_t next_gc_bin; /* Next bin to GC. */
tcache_bin_t tbins[1]; /* Dynamically sized. */
/*
* The pointer stacks associated with tbins follow as a contiguous
@@ -126,10 +126,10 @@ extern tcaches_t *tcaches;
size_t tcache_salloc(const void *ptr);
void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
void *tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
- tcache_bin_t *tbin, index_t binind);
+ tcache_bin_t *tbin, szind_t binind);
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
- index_t binind, unsigned rem);
-void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
+ szind_t binind, unsigned rem);
+void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
unsigned rem, tcache_t *tcache);
void tcache_arena_associate(tcache_t *tcache, arena_t *arena);
void tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena,
@@ -161,7 +161,7 @@ void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
size_t size, bool zero);
void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
- index_t binind);
+ szind_t binind);
void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
size_t size);
tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
@@ -267,7 +267,7 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
bool zero)
{
void *ret;
- index_t binind;
+ szind_t binind;
size_t usize;
tcache_bin_t *tbin;
@@ -312,7 +312,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
bool zero)
{
void *ret;
- index_t binind;
+ szind_t binind;
size_t usize;
tcache_bin_t *tbin;
@@ -360,7 +360,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
}
JEMALLOC_ALWAYS_INLINE void
-tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, index_t binind)
+tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind)
{
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
@@ -386,7 +386,7 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, index_t binind)
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size)
{
- index_t binind;
+ szind_t binind;
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;