summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/chunk_mmap.c108
-rw-r--r--src/jemalloc.c32
-rw-r--r--src/mutex.c16
-rw-r--r--src/tsd.c27
-rw-r--r--src/util.c7
5 files changed, 150 insertions, 40 deletions
diff --git a/src/chunk_mmap.c b/src/chunk_mmap.c
index 9ff7480..0ad65a1 100644
--- a/src/chunk_mmap.c
+++ b/src/chunk_mmap.c
@@ -7,7 +7,7 @@
static void *pages_map(void *addr, size_t size);
static void pages_unmap(void *addr, size_t size);
static void *chunk_alloc_mmap_slow(size_t size, size_t alignment,
- bool unaligned, bool *zero);
+ bool *zero);
/******************************************************************************/
@@ -16,6 +16,14 @@ pages_map(void *addr, size_t size)
{
void *ret;
+#ifdef _WIN32
+ /*
+ * If VirtualAlloc can't allocate at the given address when one is
+ * given, it fails and returns NULL.
+ */
+ ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
+ PAGE_READWRITE);
+#else
/*
* We don't use MAP_FIXED here, because it can cause the *replacement*
* of existing mappings, and we only want to create new mappings.
@@ -41,7 +49,7 @@ pages_map(void *addr, size_t size)
}
ret = NULL;
}
-
+#endif
assert(ret == NULL || (addr == NULL && ret != addr)
|| (addr != NULL && ret == addr));
return (ret);
@@ -51,55 +59,94 @@ static void
pages_unmap(void *addr, size_t size)
{
- if (munmap(addr, size) == -1) {
+#ifdef _WIN32
+ if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
+#else
+ if (munmap(addr, size) == -1)
+#endif
+ {
char buf[BUFERROR_BUF];
buferror(errno, buf, sizeof(buf));
- malloc_printf("<jemalloc>: Error in munmap(): %s\n", buf);
+ malloc_printf("<jemalloc>: Error in "
+#ifdef _WIN32
+ "VirtualFree"
+#else
+ "munmap"
+#endif
+ "(): %s\n", buf);
if (opt_abort)
abort();
}
}
+static void *
+pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
+{
+ void *ret = (void *)((uintptr_t)addr + leadsize);
+
+ assert(alloc_size >= leadsize + size);
+#ifdef _WIN32
+ {
+ void *new_addr;
+
+ pages_unmap(addr, alloc_size);
+ new_addr = pages_map(ret, size);
+ if (new_addr == ret)
+ return (ret);
+ if (new_addr)
+ pages_unmap(new_addr, size);
+ return (NULL);
+ }
+#else
+ {
+ size_t trailsize = alloc_size - leadsize - size;
+
+ if (leadsize != 0)
+ pages_unmap(addr, leadsize);
+ if (trailsize != 0)
+ pages_unmap((void *)((uintptr_t)ret + size), trailsize);
+ return (ret);
+ }
+#endif
+}
+
void
pages_purge(void *addr, size_t length)
{
-#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
-# define JEMALLOC_MADV_PURGE MADV_DONTNEED
-#elif defined(JEMALLOC_PURGE_MADVISE_FREE)
-# define JEMALLOC_MADV_PURGE MADV_FREE
+#ifdef _WIN32
+ VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE);
#else
-# error "No method defined for purging unused dirty pages."
-#endif
+# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
+# define JEMALLOC_MADV_PURGE MADV_DONTNEED
+# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
+# define JEMALLOC_MADV_PURGE MADV_FREE
+# else
+# error "No method defined for purging unused dirty pages."
+# endif
madvise(addr, length, JEMALLOC_MADV_PURGE);
+#endif
}
static void *
-chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned, bool *zero)
+chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero)
{
void *ret, *pages;
- size_t alloc_size, leadsize, trailsize;
+ size_t alloc_size, leadsize;
alloc_size = size + alignment - PAGE;
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);
- pages = pages_map(NULL, alloc_size);
- if (pages == NULL)
- return (NULL);
- leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
- (uintptr_t)pages;
- assert(alloc_size >= leadsize + size);
- trailsize = alloc_size - leadsize - size;
- ret = (void *)((uintptr_t)pages + leadsize);
- if (leadsize != 0) {
- /* Note that mmap() returned an unaligned mapping. */
- unaligned = true;
- pages_unmap(pages, leadsize);
- }
- if (trailsize != 0)
- pages_unmap((void *)((uintptr_t)ret + size), trailsize);
+ do {
+ pages = pages_map(NULL, alloc_size);
+ if (pages == NULL)
+ return (NULL);
+ leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
+ (uintptr_t)pages;
+ ret = pages_trim(pages, alloc_size, leadsize, size);
+ } while (ret == NULL);
assert(ret != NULL);
*zero = true;
@@ -144,6 +191,9 @@ chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
if (offset != 0) {
+#ifdef _WIN32
+ return (chunk_alloc_mmap_slow(size, alignment, zero));
+#else
/* Try to extend chunk boundary. */
if (pages_map((void *)((uintptr_t)ret + size), chunksize -
offset) == NULL) {
@@ -152,13 +202,13 @@ chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
* reliable-but-expensive method.
*/
pages_unmap(ret, size);
- return (chunk_alloc_mmap_slow(size, alignment, true,
- zero));
+ return (chunk_alloc_mmap_slow(size, alignment, zero));
} else {
/* Clean up unneeded leading space. */
pages_unmap(ret, chunksize - offset);
ret = (void *)((uintptr_t)ret + (chunksize - offset));
}
+#endif
}
assert(ret != NULL);
diff --git a/src/jemalloc.c b/src/jemalloc.c
index f9c8916..67ac90b 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -52,7 +52,19 @@ static bool malloc_initializer = NO_INITIALIZER;
#endif
/* Used to avoid initialization races. */
+#ifdef _WIN32
+static malloc_mutex_t init_lock;
+
+JEMALLOC_ATTR(constructor)
+static void
+init_init_lock()
+{
+
+ malloc_mutex_init(&init_lock);
+}
+#else
static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
+#endif
typedef struct {
void *p; /* Input pointer (as in realloc(p, s)). */
@@ -229,11 +241,17 @@ malloc_ncpus(void)
unsigned ret;
long result;
+#ifdef _WIN32
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ result = si.dwNumberOfProcessors;
+#else
result = sysconf(_SC_NPROCESSORS_ONLN);
if (result == -1) {
/* Error. */
ret = 1;
}
+#endif
ret = (unsigned)result;
return (ret);
@@ -369,13 +387,14 @@ malloc_conf_init(void)
}
break;
case 1: {
+#ifndef _WIN32
int linklen;
const char *linkname =
-#ifdef JEMALLOC_PREFIX
+# ifdef JEMALLOC_PREFIX
"/etc/"JEMALLOC_PREFIX"malloc.conf"
-#else
+# else
"/etc/malloc.conf"
-#endif
+# endif
;
if ((linklen = readlink(linkname, buf,
@@ -386,7 +405,9 @@ malloc_conf_init(void)
*/
buf[linklen] = '\0';
opts = buf;
- } else {
+ } else
+#endif
+ {
/* No configuration specified. */
buf[0] = '\0';
opts = buf;
@@ -610,7 +631,8 @@ malloc_init_hard(void)
malloc_conf_init();
-#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE))
+#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
+ && !defined(_WIN32))
/* Register fork handlers. */
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
jemalloc_postfork_child) != 0) {
diff --git a/src/mutex.c b/src/mutex.c
index 4b8ce57..159d82a 100644
--- a/src/mutex.c
+++ b/src/mutex.c
@@ -1,10 +1,14 @@
#define JEMALLOC_MUTEX_C_
#include "jemalloc/internal/jemalloc_internal.h"
-#ifdef JEMALLOC_LAZY_LOCK
+#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
#include <dlfcn.h>
#endif
+#ifndef _CRT_SPINCOUNT
+#define _CRT_SPINCOUNT 4000
+#endif
+
/******************************************************************************/
/* Data. */
@@ -16,7 +20,7 @@ static bool postpone_init = true;
static malloc_mutex_t *postponed_mutexes = NULL;
#endif
-#ifdef JEMALLOC_LAZY_LOCK
+#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
static void pthread_create_once(void);
#endif
@@ -26,7 +30,7 @@ static void pthread_create_once(void);
* process goes multi-threaded.
*/
-#ifdef JEMALLOC_LAZY_LOCK
+#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
void *(*)(void *), void *__restrict);
@@ -68,7 +72,11 @@ int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
bool
malloc_mutex_init(malloc_mutex_t *mutex)
{
-#ifdef JEMALLOC_OSSPIN
+#ifdef _WIN32
+ if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
+ _CRT_SPINCOUNT))
+ return (true);
+#elif (defined(JEMALLOC_OSSPIN))
mutex->lock = 0;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
if (postpone_init) {
diff --git a/src/tsd.c b/src/tsd.c
index 281a2e9..09f06e8 100644
--- a/src/tsd.c
+++ b/src/tsd.c
@@ -31,7 +31,7 @@ malloc_tsd_no_cleanup(void *arg)
not_reached();
}
-#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
+#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
JEMALLOC_ATTR(visibility("default"))
void
_malloc_thread_cleanup(void)
@@ -70,3 +70,28 @@ malloc_tsd_boot(void)
ncleanups = 0;
}
+
+#ifdef _WIN32
+static BOOL WINAPI
+_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
+{
+
+ switch (fdwReason) {
+#ifdef JEMALLOC_LAZY_LOCK
+ case DLL_THREAD_ATTACH:
+ isthreaded = true;
+ break;
+#endif
+ case DLL_THREAD_DETACH:
+ _malloc_thread_cleanup();
+ break;
+ default:
+ break;
+ }
+ return (true);
+}
+
+JEMALLOC_ATTR(section(".CRT$XLY")) JEMALLOC_ATTR(used)
+static const BOOL (WINAPI *tls_callback)(HINSTANCE hinstDLL,
+ DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
+#endif
diff --git a/src/util.c b/src/util.c
index 99ae26d..ee9efdf 100644
--- a/src/util.c
+++ b/src/util.c
@@ -67,7 +67,12 @@ void (*je_malloc_message)(void *, const char *s)
int
buferror(int errnum, char *buf, size_t buflen)
{
-#ifdef _GNU_SOURCE
+
+#ifdef _WIN32
+ FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, GetLastError(), 0,
+ (LPSTR)buf, buflen, NULL);
+ return (0);
+#elif defined(_GNU_SOURCE)
char *b = strerror_r(errno, buf, buflen);
if (b != buf) {
strncpy(buf, b, buflen);