diff options
author | Murray Read <ext-murray.2.read@nokia.com> | 2012-04-17 15:35:49 (GMT) |
---|---|---|
committer | Qt by Nokia <qt-info@nokia.com> | 2012-04-20 11:28:34 (GMT) |
commit | 2e897ab99413bb1e4fd3f057f431785e3ac7abb0 (patch) | |
tree | 69b91cdc53de749c872edaba12e65540de99e689 | |
parent | 51a2b0fde9bf635482fd2463b901139c82ed0dc4 (diff) | |
download | Qt-2e897ab99413bb1e4fd3f057f431785e3ac7abb0.zip Qt-2e897ab99413bb1e4fd3f057f431785e3ac7abb0.tar.gz Qt-2e897ab99413bb1e4fd3f057f431785e3ac7abb0.tar.bz2 |
Reduce virtual address space use by JavaScriptCore on Symbian
JavaScriptCore is reserving 128MB address space for each instance
of JSGlobalData that is created in an app. Eg there is one per
QDeclarativeView, via QDeclarativeEngine, via QScriptEngine.
This can contribute to the app running out
of address space and then crashing.
The AlignedBlockAllocator is modified to be a process singleton so that
all JSGlobalData objects share the same instance.
Now there is only one 128MB address space reservation.
Task-number: ou1cimx1#993804
Change-Id: I9e8702810541905d5e9be197a5f6a9fe8e9bd0c5
Reviewed-by: Shane Kearns <shane.kearns@accenture.com>
Reviewed-by: Pasi Pentikäinen <ext-pasi.a.pentikainen@nokia.com>
4 files changed, 88 insertions, 40 deletions
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/runtime/Collector.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/runtime/Collector.cpp index a7744dd..6af1784 100644 --- a/src/3rdparty/javascriptcore/JavaScriptCore/runtime/Collector.cpp +++ b/src/3rdparty/javascriptcore/JavaScriptCore/runtime/Collector.cpp @@ -135,10 +135,10 @@ Heap::Heap(JSGlobalData* globalData) , m_registeredThreads(0) , m_currentThreadRegistrar(0) #endif - , m_globalData(globalData) #if OS(SYMBIAN) - , m_blockallocator(JSCCOLLECTOR_VIRTUALMEM_RESERVATION, BLOCK_SIZE) + , m_blockallocator(WTF::AlignedBlockAllocator::instance(JSCCOLLECTOR_VIRTUALMEM_RESERVATION, BLOCK_SIZE)) #endif + , m_globalData(globalData) { ASSERT(globalData); memset(&m_heap, 0, sizeof(CollectorHeap)); @@ -183,9 +183,6 @@ void Heap::destroy() t = next; } #endif -#if OS(SYMBIAN) - m_blockallocator.destroy(); -#endif m_globalData = 0; } diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/runtime/Collector.h b/src/3rdparty/javascriptcore/JavaScriptCore/runtime/Collector.h index d3616dc..9c6ffa7 100644 --- a/src/3rdparty/javascriptcore/JavaScriptCore/runtime/Collector.h +++ b/src/3rdparty/javascriptcore/JavaScriptCore/runtime/Collector.h @@ -173,7 +173,7 @@ namespace JSC { #if OS(SYMBIAN) // Allocates collector blocks with correct alignment - WTF::AlignedBlockAllocator m_blockallocator; + WTF::AlignedBlockAllocator& m_blockallocator; #endif JSGlobalData* m_globalData; diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/symbian/BlockAllocatorSymbian.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/symbian/BlockAllocatorSymbian.cpp index 6a28e9e..f09bf8e 100644 --- a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/symbian/BlockAllocatorSymbian.cpp +++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/symbian/BlockAllocatorSymbian.cpp @@ -31,7 +31,7 @@ #if OS(SYMBIAN) #include "BlockAllocatorSymbian.h" - +#include <e32atomics.h> namespace WTF { @@ -43,40 +43,56 @@ AlignedBlockAllocator::AlignedBlockAllocator(TUint32 reservationSize, TUint32 bl : m_reservation(reservationSize), m_blockSize(blockSize) { + TInt err = m_lock.CreateLocal(); + __ASSERT_ALWAYS(m_map.bits, User::Panic(_L("AlignedBlkAlloc0"), err)); + + // Get system's page size value. + SYMBIAN_PAGESIZE(m_pageSize); + + // We only accept multiples of system page size for both initial reservation and the alignment/block size + m_reservation = SYMBIAN_ROUNDUPTOMULTIPLE(m_reservation, m_pageSize); + __ASSERT_ALWAYS(SYMBIAN_ROUNDUPTOMULTIPLE(m_blockSize, m_pageSize), User::Panic(_L("AlignedBlkAlloc1"), KErrArgument)); - // Get system's page size value. - SYMBIAN_PAGESIZE(m_pageSize); - - // We only accept multiples of system page size for both initial reservation and the alignment/block size - m_reservation = SYMBIAN_ROUNDUPTOMULTIPLE(m_reservation, m_pageSize); - __ASSERT_ALWAYS(SYMBIAN_ROUNDUPTOMULTIPLE(m_blockSize, m_pageSize), User::Panic(_L("AlignedBlockAllocator1"), KErrArgument)); - - // Calculate max. bit flags we need to carve a reservationSize range into blockSize-sized blocks - m_map.numBits = m_reservation / m_blockSize; - const TUint32 bitsPerWord = 8*sizeof(TUint32); - const TUint32 numWords = (m_map.numBits + bitsPerWord -1) / bitsPerWord; - - m_map.bits = new TUint32[numWords]; - __ASSERT_ALWAYS(m_map.bits, User::Panic(_L("AlignedBlockAllocator2"), KErrNoMemory)); - m_map.clearAll(); - - // Open a Symbian RChunk, and reserve requested virtual address range - // Any thread in this process can operate this rchunk due to EOwnerProcess access rights. - TInt ret = m_chunk.CreateDisconnectedLocal(0 , 0, (TInt)m_reservation , EOwnerProcess); - if (ret != KErrNone) - User::Panic(_L("AlignedBlockAllocator3"), ret); - - // This is the offset to m_chunk.Base() required to make it m_blockSize-aligned - m_offset = SYMBIAN_ROUNDUPTOMULTIPLE(TUint32(m_chunk.Base()), m_blockSize) - TUint(m_chunk.Base()); + // Calculate max. bit flags we need to carve a reservationSize range into blockSize-sized blocks + m_map.numBits = m_reservation / m_blockSize; + const TUint32 bitsPerWord = 8*sizeof(TUint32); + const TUint32 numWords = (m_map.numBits + bitsPerWord -1) / bitsPerWord; + + m_map.bits = new TUint32[numWords]; + __ASSERT_ALWAYS(m_map.bits, User::Panic(_L("AlignedBlkAlloc2"), KErrNoMemory)); + m_map.clearAll(); + // Open a Symbian RChunk, and reserve requested virtual address range + // Any thread in this process can operate this rchunk due to EOwnerProcess access rights. + TInt ret = m_chunk.CreateDisconnectedLocal(0 , 0, (TInt)m_reservation , EOwnerProcess); + if (ret != KErrNone) + User::Panic(_L("AlignedBlkAlloc3"), ret); + + // This is the offset to m_chunk.Base() required to make it m_blockSize-aligned + m_offset = SYMBIAN_ROUNDUPTOMULTIPLE(TUint32(m_chunk.Base()), m_blockSize) - TUint(m_chunk.Base()); } -void* AlignedBlockAllocator::alloc() +struct AlignedBlockAllocatorScopeLock { + RFastLock& lock; + AlignedBlockAllocatorScopeLock(RFastLock& aLock) : lock(aLock) + { + lock.Wait(); + } + ~AlignedBlockAllocatorScopeLock() + { + lock.Signal(); + } +}; +void* AlignedBlockAllocator::alloc() +{ TInt freeRam = 0; void* address = 0; - + + // lock until this function returns + AlignedBlockAllocatorScopeLock lock(m_lock); + // Look up first free slot in bit map const TInt freeIdx = m_map.findFree(); @@ -100,11 +116,14 @@ void* AlignedBlockAllocator::alloc() void AlignedBlockAllocator::free(void* block) { + // lock until this function returns + AlignedBlockAllocatorScopeLock lock(m_lock); + // Calculate index of block to be freed TInt idx = TUint(static_cast<TUint8*>(block) - m_chunk.Base() - m_offset) / m_blockSize; - __ASSERT_DEBUG(idx >= 0 && idx < m_map.numBits, User::Panic(_L("AlignedBlockAllocator4"), KErrCorrupt)); // valid index check - __ASSERT_DEBUG(m_map.get(idx), User::Panic(_L("AlignedBlockAllocator5"), KErrCorrupt)); // in-use flag check + __ASSERT_DEBUG(idx >= 0 && idx < m_map.numBits, User::Panic(_L("AlignedBlkAlloc4"), KErrCorrupt)); // valid index check + __ASSERT_DEBUG(m_map.get(idx), User::Panic(_L("AlignedBlkAlloc5"), KErrCorrupt)); // in-use flag check // Return committed region to system RAM pool (the physical RAM becomes usable by others) TInt ret = m_chunk.Decommit(m_offset + m_blockSize * idx, m_blockSize); @@ -127,6 +146,33 @@ AlignedBlockAllocator::~AlignedBlockAllocator() delete [] m_map.bits; } +struct AlignedBlockAllocatorPtr +{ + AlignedBlockAllocator* ptr; + ~AlignedBlockAllocatorPtr() + { + delete ptr; + ptr = 0; + } +}; + +AlignedBlockAllocator& AlignedBlockAllocator::instance(TUint32 reservationSize, TUint32 blockSize) +{ + // static cleanup for plugin unload case where leaking this much address space would not be good. + static AlignedBlockAllocatorPtr pAllocator; + // static data is zero initialized, so using zero to indicate not-yet-created. + if (!pAllocator.ptr) + { + AlignedBlockAllocator* newAllocator = new AlignedBlockAllocator(reservationSize, blockSize); + AlignedBlockAllocator* expected = 0; + // atomic if (pAllocator == 0) pAllocator = newAllocator; else delete newAllocator; + if (!__e32_atomic_cas_ord_ptr(&pAllocator.ptr, &expected, newAllocator)) + delete newAllocator; + __ASSERT_ALWAYS(pAllocator.ptr, User::Panic(_L("AlignedBlkAlloc6"), KErrNoMemory)); + } + return *pAllocator.ptr; +} + } // end of namespace #endif // SYMBIAN diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/symbian/BlockAllocatorSymbian.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/symbian/BlockAllocatorSymbian.h index 21422f6..5f1a8ca 100644 --- a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/symbian/BlockAllocatorSymbian.h +++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/symbian/BlockAllocatorSymbian.h @@ -60,18 +60,22 @@ namespace WTF { */ class AlignedBlockAllocator { public: + static AlignedBlockAllocator& instance(TUint32 reservationSize, TUint32 blockSize); + void* alloc(); + void free(void* data); + + private: AlignedBlockAllocator(TUint32 reservationSize, TUint32 blockSize); ~AlignedBlockAllocator(); void destroy(); - void* alloc(); - void free(void* data); - - private: + + private: RChunk m_chunk; // Symbian chunk that lets us reserve/commit/decommit TUint m_offset; // offset of first committed region from base TInt m_pageSize; // cached value of system page size, typically 4K on Symbian TUint32 m_reservation; - TUint32 m_blockSize; + TUint32 m_blockSize; + RFastLock m_lock; // Tracks comitted/decommitted state of a blockSize region struct { @@ -111,6 +115,7 @@ class AlignedBlockAllocator { } m_map; + friend class AlignedBlockAllocatorPtr; }; } |