summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/webkit/JavaScriptCore/wtf/FastMalloc.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/webkit/JavaScriptCore/wtf/FastMalloc.cpp')
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/wtf/FastMalloc.cpp417
1 files changed, 366 insertions, 51 deletions
diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/FastMalloc.cpp b/src/3rdparty/webkit/JavaScriptCore/wtf/FastMalloc.cpp
index 4387d61..f114160 100644
--- a/src/3rdparty/webkit/JavaScriptCore/wtf/FastMalloc.cpp
+++ b/src/3rdparty/webkit/JavaScriptCore/wtf/FastMalloc.cpp
@@ -78,6 +78,7 @@
#include "FastMalloc.h"
#include "Assertions.h"
+#include <limits>
#if ENABLE(JSC_MULTIPLE_THREADS)
#include <pthread.h>
#endif
@@ -94,7 +95,7 @@
#define FORCE_SYSTEM_MALLOC 1
#endif
-#define TCMALLOC_TRACK_DECOMMITED_SPANS (HAVE(VIRTUALALLOC))
+#define TCMALLOC_TRACK_DECOMMITED_SPANS (HAVE(VIRTUALALLOC) || HAVE(MADV_FREE_REUSE))
#ifndef NDEBUG
namespace WTF {
@@ -151,6 +152,19 @@ void fastMallocAllow()
namespace WTF {
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+
+namespace Internal {
+
+void fastMallocMatchFailed(void*)
+{
+ CRASH();
+}
+
+} // namespace Internal
+
+#endif
+
void* fastZeroedMalloc(size_t n)
{
void* result = fastMalloc(n);
@@ -183,13 +197,34 @@ namespace WTF {
void* tryFastMalloc(size_t n)
{
ASSERT(!isForbidden());
+
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+ if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= n) // If overflow would occur...
+ return 0;
+
+ void* result = malloc(n + sizeof(AllocAlignmentInteger));
+ if (!result)
+ return 0;
+
+ *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
+ result = static_cast<AllocAlignmentInteger*>(result) + 1;
+
+ return result;
+#else
return malloc(n);
+#endif
}
void* fastMalloc(size_t n)
{
ASSERT(!isForbidden());
+
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+ void* result = tryFastMalloc(n);
+#else
void* result = malloc(n);
+#endif
+
if (!result)
CRASH();
return result;
@@ -198,13 +233,36 @@ void* fastMalloc(size_t n)
void* tryFastCalloc(size_t n_elements, size_t element_size)
{
ASSERT(!isForbidden());
+
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+ size_t totalBytes = n_elements * element_size;
+ if (n_elements > 1 && element_size && (totalBytes / element_size) != n_elements || (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= totalBytes))
+ return 0;
+
+ totalBytes += sizeof(AllocAlignmentInteger);
+ void* result = malloc(totalBytes);
+ if (!result)
+ return 0;
+
+ memset(result, 0, totalBytes);
+ *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
+ result = static_cast<AllocAlignmentInteger*>(result) + 1;
+ return result;
+#else
return calloc(n_elements, element_size);
+#endif
}
void* fastCalloc(size_t n_elements, size_t element_size)
{
ASSERT(!isForbidden());
+
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+ void* result = tryFastCalloc(n_elements, element_size);
+#else
void* result = calloc(n_elements, element_size);
+#endif
+
if (!result)
CRASH();
return result;
@@ -213,19 +271,57 @@ void* fastCalloc(size_t n_elements, size_t element_size)
void fastFree(void* p)
{
ASSERT(!isForbidden());
+
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+ if (!p)
+ return;
+
+ AllocAlignmentInteger* header = Internal::fastMallocMatchValidationValue(p);
+ if (*header != Internal::AllocTypeMalloc)
+ Internal::fastMallocMatchFailed(p);
+ free(header);
+#else
free(p);
+#endif
}
void* tryFastRealloc(void* p, size_t n)
{
ASSERT(!isForbidden());
+
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+ if (p) {
+ if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= n) // If overflow would occur...
+ return 0;
+ AllocAlignmentInteger* header = Internal::fastMallocMatchValidationValue(p);
+ if (*header != Internal::AllocTypeMalloc)
+ Internal::fastMallocMatchFailed(p);
+ void* result = realloc(header, n + sizeof(AllocAlignmentInteger));
+ if (!result)
+ return 0;
+
+ // This should not be needed because the value is already there:
+ // *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
+ result = static_cast<AllocAlignmentInteger*>(result) + 1;
+ return result;
+ } else {
+ return fastMalloc(n);
+ }
+#else
return realloc(p, n);
+#endif
}
void* fastRealloc(void* p, size_t n)
{
ASSERT(!isForbidden());
+
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+ void* result = tryFastRealloc(p, n);
+#else
void* result = realloc(p, n);
+#endif
+
if (!result)
CRASH();
return result;
@@ -265,6 +361,7 @@ extern "C" const int jscore_fastmalloc_introspection = 0;
#include "TCSystemAlloc.h"
#include <algorithm>
#include <errno.h>
+#include <limits>
#include <new>
#include <pthread.h>
#include <stdarg.h>
@@ -321,9 +418,11 @@ namespace WTF {
#define CHECK_CONDITION ASSERT
#if PLATFORM(DARWIN)
+class Span;
+class TCMalloc_Central_FreeListPadded;
class TCMalloc_PageHeap;
class TCMalloc_ThreadCache;
-class TCMalloc_Central_FreeListPadded;
+template <typename T> class PageHeapAllocator;
class FastMallocZone {
public:
@@ -339,7 +438,7 @@ public:
static void statistics(malloc_zone_t*, malloc_statistics_t* stats) { memset(stats, 0, sizeof(malloc_statistics_t)); }
private:
- FastMallocZone(TCMalloc_PageHeap*, TCMalloc_ThreadCache**, TCMalloc_Central_FreeListPadded*);
+ FastMallocZone(TCMalloc_PageHeap*, TCMalloc_ThreadCache**, TCMalloc_Central_FreeListPadded*, PageHeapAllocator<Span>*, PageHeapAllocator<TCMalloc_ThreadCache>*);
static size_t size(malloc_zone_t*, const void*);
static void* zoneMalloc(malloc_zone_t*, size_t);
static void* zoneCalloc(malloc_zone_t*, size_t numItems, size_t size);
@@ -352,6 +451,8 @@ private:
TCMalloc_PageHeap* m_pageHeap;
TCMalloc_ThreadCache** m_threadHeaps;
TCMalloc_Central_FreeListPadded* m_centralCaches;
+ PageHeapAllocator<Span>* m_spanAllocator;
+ PageHeapAllocator<TCMalloc_ThreadCache>* m_pageHeapAllocator;
};
#endif
@@ -820,6 +921,9 @@ class PageHeapAllocator {
char* free_area_;
size_t free_avail_;
+ // Linked list of all regions allocated by this allocator
+ void* allocated_regions_;
+
// Free list of already carved objects
void* free_list_;
@@ -830,6 +934,7 @@ class PageHeapAllocator {
void Init() {
ASSERT(kAlignedSize <= kAllocIncrement);
inuse_ = 0;
+ allocated_regions_ = 0;
free_area_ = NULL;
free_avail_ = 0;
free_list_ = NULL;
@@ -844,9 +949,14 @@ class PageHeapAllocator {
} else {
if (free_avail_ < kAlignedSize) {
// Need more room
- free_area_ = reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement));
- if (free_area_ == NULL) CRASH();
- free_avail_ = kAllocIncrement;
+ char* new_allocation = reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement));
+ if (!new_allocation)
+ CRASH();
+
+ *(void**)new_allocation = allocated_regions_;
+ allocated_regions_ = new_allocation;
+ free_area_ = new_allocation + kAlignedSize;
+ free_avail_ = kAllocIncrement - kAlignedSize;
}
result = free_area_;
free_area_ += kAlignedSize;
@@ -863,6 +973,18 @@ class PageHeapAllocator {
}
int inuse() const { return inuse_; }
+
+#if defined(WTF_CHANGES) && PLATFORM(DARWIN)
+ template <class Recorder>
+ void recordAdministrativeRegions(Recorder& recorder, const RemoteMemoryReader& reader)
+ {
+ vm_address_t adminAllocation = reinterpret_cast<vm_address_t>(allocated_regions_);
+ while (adminAllocation) {
+ recorder.recordRegion(adminAllocation, kAllocIncrement);
+ adminAllocation = *reader(reinterpret_cast<vm_address_t*>(adminAllocation));
+ }
+ }
+#endif
};
// -------------------------------------------------------------------------
@@ -1037,11 +1159,30 @@ template <int BITS> class MapSelector {
typedef PackedCache<BITS, uint64_t> CacheType;
};
+#if defined(WTF_CHANGES)
+#if PLATFORM(X86_64)
+// On all known X86-64 platforms, the upper 16 bits are always unused and therefore
+// can be excluded from the PageMap key.
+// See http://en.wikipedia.org/wiki/X86-64#Virtual_address_space_details
+
+static const size_t kBitsUnusedOn64Bit = 16;
+#else
+static const size_t kBitsUnusedOn64Bit = 0;
+#endif
+
+// A three-level map for 64-bit machines
+template <> class MapSelector<64> {
+ public:
+ typedef TCMalloc_PageMap3<64 - kPageShift - kBitsUnusedOn64Bit> Type;
+ typedef PackedCache<64, uint64_t> CacheType;
+};
+#endif
+
// A two-level map for 32-bit machines
template <> class MapSelector<32> {
public:
- typedef TCMalloc_PageMap2<32-kPageShift> Type;
- typedef PackedCache<32-kPageShift, uint16_t> CacheType;
+ typedef TCMalloc_PageMap2<32 - kPageShift> Type;
+ typedef PackedCache<32 - kPageShift, uint16_t> CacheType;
};
// -------------------------------------------------------------------------
@@ -1359,8 +1500,14 @@ static ALWAYS_INLINE void mergeDecommittedStates(Span*, Span*) { }
#else
static ALWAYS_INLINE void mergeDecommittedStates(Span* destination, Span* other)
{
- if (other->decommitted)
+ if (destination->decommitted && !other->decommitted) {
+ TCMalloc_SystemRelease(reinterpret_cast<void*>(other->start << kPageShift),
+ static_cast<size_t>(other->length << kPageShift));
+ } else if (other->decommitted && !destination->decommitted) {
+ TCMalloc_SystemRelease(reinterpret_cast<void*>(destination->start << kPageShift),
+ static_cast<size_t>(destination->length << kPageShift));
destination->decommitted = true;
+ }
}
#endif
@@ -3244,7 +3391,20 @@ template <bool crashOnFailure>
ALWAYS_INLINE
#endif
void* malloc(size_t size) {
- void* result = do_malloc(size);
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+ if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= size) // If overflow would occur...
+ return 0;
+ size += sizeof(AllocAlignmentInteger);
+ void* result = do_malloc(size);
+ if (!result)
+ return 0;
+
+ *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
+ result = static_cast<AllocAlignmentInteger*>(result) + 1;
+#else
+ void* result = do_malloc(size);
+#endif
+
#ifndef WTF_CHANGES
MallocHook::InvokeNewHook(result, size);
#endif
@@ -3258,7 +3418,18 @@ void free(void* ptr) {
#ifndef WTF_CHANGES
MallocHook::InvokeDeleteHook(ptr);
#endif
- do_free(ptr);
+
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+ if (!ptr)
+ return;
+
+ AllocAlignmentInteger* header = Internal::fastMallocMatchValidationValue(ptr);
+ if (*header != Internal::AllocTypeMalloc)
+ Internal::fastMallocMatchFailed(ptr);
+ do_free(header);
+#else
+ do_free(ptr);
+#endif
}
#ifndef WTF_CHANGES
@@ -3281,22 +3452,39 @@ template <bool crashOnFailure>
ALWAYS_INLINE
#endif
void* calloc(size_t n, size_t elem_size) {
- const size_t totalBytes = n * elem_size;
+ size_t totalBytes = n * elem_size;
// Protect against overflow
if (n > 1 && elem_size && (totalBytes / elem_size) != n)
return 0;
-
- void* result = do_malloc(totalBytes);
- if (result != NULL) {
+
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+ if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= totalBytes) // If overflow would occur...
+ return 0;
+
+ totalBytes += sizeof(AllocAlignmentInteger);
+ void* result = do_malloc(totalBytes);
+ if (!result)
+ return 0;
+
memset(result, 0, totalBytes);
- }
+ *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
+ result = static_cast<AllocAlignmentInteger*>(result) + 1;
+#else
+ void* result = do_malloc(totalBytes);
+ if (result != NULL) {
+ memset(result, 0, totalBytes);
+ }
+#endif
+
#ifndef WTF_CHANGES
MallocHook::InvokeNewHook(result, totalBytes);
#endif
return result;
}
+// Since cfree isn't used anywhere, we don't compile it in.
+#ifndef WTF_CHANGES
#ifndef WTF_CHANGES
extern "C"
#endif
@@ -3306,6 +3494,7 @@ void cfree(void* ptr) {
#endif
do_free(ptr);
}
+#endif
#ifndef WTF_CHANGES
extern "C"
@@ -3328,10 +3517,14 @@ ALWAYS_INLINE
#endif
void* realloc(void* old_ptr, size_t new_size) {
if (old_ptr == NULL) {
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+ void* result = malloc(new_size);
+#else
void* result = do_malloc(new_size);
#ifndef WTF_CHANGES
MallocHook::InvokeNewHook(result, new_size);
#endif
+#endif
return result;
}
if (new_size == 0) {
@@ -3342,6 +3535,16 @@ void* realloc(void* old_ptr, size_t new_size) {
return NULL;
}
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+ if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= new_size) // If overflow would occur...
+ return 0;
+ new_size += sizeof(AllocAlignmentInteger);
+ AllocAlignmentInteger* header = Internal::fastMallocMatchValidationValue(old_ptr);
+ if (*header != Internal::AllocTypeMalloc)
+ Internal::fastMallocMatchFailed(old_ptr);
+ old_ptr = header;
+#endif
+
// Get the size of the old entry
const PageID p = reinterpret_cast<uintptr_t>(old_ptr) >> kPageShift;
size_t cl = pageheap->GetSizeClassIfCached(p);
@@ -3378,8 +3581,14 @@ void* realloc(void* old_ptr, size_t new_size) {
// that we already know the sizeclass of old_ptr. The benefit
// would be small, so don't bother.
do_free(old_ptr);
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+ new_ptr = static_cast<AllocAlignmentInteger*>(new_ptr) + 1;
+#endif
return new_ptr;
} else {
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+ old_ptr = pByte + sizeof(AllocAlignmentInteger); // Set old_ptr back to the user pointer.
+#endif
return old_ptr;
}
}
@@ -3608,6 +3817,7 @@ public:
void visit(void* ptr) { m_freeObjects.add(ptr); }
bool isFreeObject(void* ptr) const { return m_freeObjects.contains(ptr); }
+ bool isFreeObject(vm_address_t ptr) const { return isFreeObject(reinterpret_cast<void*>(ptr)); }
size_t freeObjectCount() const { return m_freeObjects.size(); }
void findFreeObjects(TCMalloc_ThreadCache* threadCache)
@@ -3658,7 +3868,9 @@ class PageMapMemoryUsageRecorder {
vm_range_recorder_t* m_recorder;
const RemoteMemoryReader& m_reader;
const FreeObjectFinder& m_freeObjectFinder;
- mutable HashSet<void*> m_seenPointers;
+
+ HashSet<void*> m_seenPointers;
+ Vector<Span*> m_coalescedSpans;
public:
PageMapMemoryUsageRecorder(task_t task, void* context, unsigned typeMask, vm_range_recorder_t* recorder, const RemoteMemoryReader& reader, const FreeObjectFinder& freeObjectFinder)
@@ -3670,51 +3882,133 @@ public:
, m_freeObjectFinder(freeObjectFinder)
{ }
- int visit(void* ptr) const
+ ~PageMapMemoryUsageRecorder()
+ {
+ ASSERT(!m_coalescedSpans.size());
+ }
+
+ void recordPendingRegions()
+ {
+ Span* lastSpan = m_coalescedSpans[m_coalescedSpans.size() - 1];
+ vm_range_t ptrRange = { m_coalescedSpans[0]->start << kPageShift, 0 };
+ ptrRange.size = (lastSpan->start << kPageShift) - ptrRange.address + (lastSpan->length * kPageSize);
+
+ // Mark the memory region the spans represent as a candidate for containing pointers
+ if (m_typeMask & MALLOC_PTR_REGION_RANGE_TYPE)
+ (*m_recorder)(m_task, m_context, MALLOC_PTR_REGION_RANGE_TYPE, &ptrRange, 1);
+
+ if (!(m_typeMask & MALLOC_PTR_IN_USE_RANGE_TYPE)) {
+ m_coalescedSpans.clear();
+ return;
+ }
+
+ Vector<vm_range_t, 1024> allocatedPointers;
+ for (size_t i = 0; i < m_coalescedSpans.size(); ++i) {
+ Span *theSpan = m_coalescedSpans[i];
+ if (theSpan->free)
+ continue;
+
+ vm_address_t spanStartAddress = theSpan->start << kPageShift;
+ vm_size_t spanSizeInBytes = theSpan->length * kPageSize;
+
+ if (!theSpan->sizeclass) {
+ // If it's an allocated large object span, mark it as in use
+ if (!m_freeObjectFinder.isFreeObject(spanStartAddress))
+ allocatedPointers.append((vm_range_t){spanStartAddress, spanSizeInBytes});
+ } else {
+ const size_t objectSize = ByteSizeForClass(theSpan->sizeclass);
+
+ // Mark each allocated small object within the span as in use
+ const vm_address_t endOfSpan = spanStartAddress + spanSizeInBytes;
+ for (vm_address_t object = spanStartAddress; object + objectSize <= endOfSpan; object += objectSize) {
+ if (!m_freeObjectFinder.isFreeObject(object))
+ allocatedPointers.append((vm_range_t){object, objectSize});
+ }
+ }
+ }
+
+ (*m_recorder)(m_task, m_context, MALLOC_PTR_IN_USE_RANGE_TYPE, allocatedPointers.data(), allocatedPointers.size());
+
+ m_coalescedSpans.clear();
+ }
+
+ int visit(void* ptr)
{
if (!ptr)
return 1;
Span* span = m_reader(reinterpret_cast<Span*>(ptr));
+ if (!span->start)
+ return 1;
+
if (m_seenPointers.contains(ptr))
return span->length;
m_seenPointers.add(ptr);
- // Mark the memory used for the Span itself as an administrative region
- vm_range_t ptrRange = { reinterpret_cast<vm_address_t>(ptr), sizeof(Span) };
- if (m_typeMask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE))
- (*m_recorder)(m_task, m_context, MALLOC_ADMIN_REGION_RANGE_TYPE, &ptrRange, 1);
+ if (!m_coalescedSpans.size()) {
+ m_coalescedSpans.append(span);
+ return span->length;
+ }
- ptrRange.address = span->start << kPageShift;
- ptrRange.size = span->length * kPageSize;
+ Span* previousSpan = m_coalescedSpans[m_coalescedSpans.size() - 1];
+ vm_address_t previousSpanStartAddress = previousSpan->start << kPageShift;
+ vm_size_t previousSpanSizeInBytes = previousSpan->length * kPageSize;
- // Mark the memory region the span represents as candidates for containing pointers
- if (m_typeMask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE))
- (*m_recorder)(m_task, m_context, MALLOC_PTR_REGION_RANGE_TYPE, &ptrRange, 1);
+ // If the new span is adjacent to the previous span, do nothing for now.
+ vm_address_t spanStartAddress = span->start << kPageShift;
+ if (spanStartAddress == previousSpanStartAddress + previousSpanSizeInBytes) {
+ m_coalescedSpans.append(span);
+ return span->length;
+ }
- if (!span->free && (m_typeMask & MALLOC_PTR_IN_USE_RANGE_TYPE)) {
- // If it's an allocated large object span, mark it as in use
- if (span->sizeclass == 0 && !m_freeObjectFinder.isFreeObject(reinterpret_cast<void*>(ptrRange.address)))
- (*m_recorder)(m_task, m_context, MALLOC_PTR_IN_USE_RANGE_TYPE, &ptrRange, 1);
- else if (span->sizeclass) {
- const size_t byteSize = ByteSizeForClass(span->sizeclass);
- unsigned totalObjects = (span->length << kPageShift) / byteSize;
- ASSERT(span->refcount <= totalObjects);
- char* ptr = reinterpret_cast<char*>(span->start << kPageShift);
+ // New span is not adjacent to previous span, so record the spans coalesced so far.
+ recordPendingRegions();
+ m_coalescedSpans.append(span);
- // Mark each allocated small object within the span as in use
- for (unsigned i = 0; i < totalObjects; i++) {
- char* thisObject = ptr + (i * byteSize);
- if (m_freeObjectFinder.isFreeObject(thisObject))
- continue;
+ return span->length;
+ }
+};
- vm_range_t objectRange = { reinterpret_cast<vm_address_t>(thisObject), byteSize };
- (*m_recorder)(m_task, m_context, MALLOC_PTR_IN_USE_RANGE_TYPE, &objectRange, 1);
- }
- }
+class AdminRegionRecorder {
+ task_t m_task;
+ void* m_context;
+ unsigned m_typeMask;
+ vm_range_recorder_t* m_recorder;
+ const RemoteMemoryReader& m_reader;
+
+ Vector<vm_range_t, 1024> m_pendingRegions;
+
+public:
+ AdminRegionRecorder(task_t task, void* context, unsigned typeMask, vm_range_recorder_t* recorder, const RemoteMemoryReader& reader)
+ : m_task(task)
+ , m_context(context)
+ , m_typeMask(typeMask)
+ , m_recorder(recorder)
+ , m_reader(reader)
+ { }
+
+ void recordRegion(vm_address_t ptr, size_t size)
+ {
+ if (m_typeMask & MALLOC_ADMIN_REGION_RANGE_TYPE)
+ m_pendingRegions.append((vm_range_t){ ptr, size });
+ }
+
+ void visit(void *ptr, size_t size)
+ {
+ recordRegion(reinterpret_cast<vm_address_t>(ptr), size);
+ }
+
+ void recordPendingRegions()
+ {
+ if (m_pendingRegions.size()) {
+ (*m_recorder)(m_task, m_context, MALLOC_ADMIN_REGION_RANGE_TYPE, m_pendingRegions.data(), m_pendingRegions.size());
+ m_pendingRegions.clear();
}
+ }
- return span->length;
+ ~AdminRegionRecorder()
+ {
+ ASSERT(!m_pendingRegions.size());
}
};
@@ -3737,10 +4031,22 @@ kern_return_t FastMallocZone::enumerate(task_t task, void* context, unsigned typ
TCMalloc_PageHeap::PageMap* pageMap = &pageHeap->pagemap_;
PageMapFreeObjectFinder pageMapFinder(memoryReader, finder);
- pageMap->visit(pageMapFinder, memoryReader);
+ pageMap->visitValues(pageMapFinder, memoryReader);
PageMapMemoryUsageRecorder usageRecorder(task, context, typeMask, recorder, memoryReader, finder);
- pageMap->visit(usageRecorder, memoryReader);
+ pageMap->visitValues(usageRecorder, memoryReader);
+ usageRecorder.recordPendingRegions();
+
+ AdminRegionRecorder adminRegionRecorder(task, context, typeMask, recorder, memoryReader);
+ pageMap->visitAllocations(adminRegionRecorder, memoryReader);
+
+ PageHeapAllocator<Span>* spanAllocator = memoryReader(mzone->m_spanAllocator);
+ PageHeapAllocator<TCMalloc_ThreadCache>* pageHeapAllocator = memoryReader(mzone->m_pageHeapAllocator);
+
+ spanAllocator->recordAdministrativeRegions(adminRegionRecorder, memoryReader);
+ pageHeapAllocator->recordAdministrativeRegions(adminRegionRecorder, memoryReader);
+
+ adminRegionRecorder.recordPendingRegions();
return 0;
}
@@ -3781,15 +4087,24 @@ void* FastMallocZone::zoneRealloc(malloc_zone_t*, void*, size_t)
extern "C" {
malloc_introspection_t jscore_fastmalloc_introspection = { &FastMallocZone::enumerate, &FastMallocZone::goodSize, &FastMallocZone::check, &FastMallocZone::print,
- &FastMallocZone::log, &FastMallocZone::forceLock, &FastMallocZone::forceUnlock, &FastMallocZone::statistics };
+ &FastMallocZone::log, &FastMallocZone::forceLock, &FastMallocZone::forceUnlock, &FastMallocZone::statistics
+
+#if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD)
+ , 0 // zone_locked will not be called on the zone unless it advertises itself as version five or higher.
+#endif
+
+ };
}
-FastMallocZone::FastMallocZone(TCMalloc_PageHeap* pageHeap, TCMalloc_ThreadCache** threadHeaps, TCMalloc_Central_FreeListPadded* centralCaches)
+FastMallocZone::FastMallocZone(TCMalloc_PageHeap* pageHeap, TCMalloc_ThreadCache** threadHeaps, TCMalloc_Central_FreeListPadded* centralCaches, PageHeapAllocator<Span>* spanAllocator, PageHeapAllocator<TCMalloc_ThreadCache>* pageHeapAllocator)
: m_pageHeap(pageHeap)
, m_threadHeaps(threadHeaps)
, m_centralCaches(centralCaches)
+ , m_spanAllocator(spanAllocator)
+ , m_pageHeapAllocator(pageHeapAllocator)
{
memset(&m_zone, 0, sizeof(m_zone));
+ m_zone.version = 4;
m_zone.zone_name = "JavaScriptCore FastMalloc";
m_zone.size = &FastMallocZone::size;
m_zone.malloc = &FastMallocZone::zoneMalloc;
@@ -3805,7 +4120,7 @@ FastMallocZone::FastMallocZone(TCMalloc_PageHeap* pageHeap, TCMalloc_ThreadCache
void FastMallocZone::init()
{
- static FastMallocZone zone(pageheap, &thread_heaps, static_cast<TCMalloc_Central_FreeListPadded*>(central_cache));
+ static FastMallocZone zone(pageheap, &thread_heaps, static_cast<TCMalloc_Central_FreeListPadded*>(central_cache), &span_allocator, &threadheap_allocator);
}
#endif