summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorJerome Pasion <jerome.pasion@nokia.com>2010-10-04 07:11:59 (GMT)
committerJerome Pasion <jerome.pasion@nokia.com>2010-10-04 07:11:59 (GMT)
commitb065a6838a9c54cb4bba6519695c657db5270ce4 (patch)
treeb10925754df0608beb5de941b5c1ea4da1f3bd55 /src
parent813e28ea364e2c1cd568bc3ac38aa5142f025e7d (diff)
parent9d0317c91dbbe660af2ed2aa8ea47446049467d2 (diff)
downloadQt-b065a6838a9c54cb4bba6519695c657db5270ce4.zip
Qt-b065a6838a9c54cb4bba6519695c657db5270ce4.tar.gz
Qt-b065a6838a9c54cb4bba6519695c657db5270ce4.tar.bz2
Merge branch '4.7' of scm.dev.nokia.troll.no:qt/qt-doc-team into 4.7
Diffstat (limited to 'src')
-rw-r--r--src/activeqt/shared/qaxtypes.cpp6
-rw-r--r--src/corelib/arch/symbian/arch.pri15
-rw-r--r--src/corelib/arch/symbian/common_p.h106
-rw-r--r--src/corelib/arch/symbian/debugfunction.cpp1143
-rw-r--r--src/corelib/arch/symbian/dla_p.h969
-rw-r--r--src/corelib/arch/symbian/heap_hybrid.cpp3346
-rw-r--r--src/corelib/arch/symbian/heap_hybrid_p.h402
-rw-r--r--src/corelib/arch/symbian/page_alloc_p.h68
-rw-r--r--src/corelib/arch/symbian/qt_heapsetup_symbian.cpp105
-rw-r--r--src/corelib/arch/symbian/qt_hybridheap_symbian_p.h174
-rw-r--r--src/corelib/arch/symbian/slab_p.h125
-rw-r--r--src/corelib/global/qglobal.h1
-rw-r--r--src/declarative/debugger/qdeclarativedebug.cpp56
-rw-r--r--src/declarative/debugger/qdeclarativedebug_p.h7
-rw-r--r--src/declarative/debugger/qdeclarativedebugclient.cpp201
-rw-r--r--src/declarative/debugger/qdeclarativedebugclient_p.h10
-rw-r--r--src/declarative/debugger/qdeclarativedebugservice.cpp133
-rw-r--r--src/declarative/debugger/qdeclarativedebugservice_p.h9
-rw-r--r--src/declarative/debugger/qdeclarativedebugtrace.cpp8
-rw-r--r--src/declarative/graphicsitems/qdeclarativeflickable.cpp32
-rw-r--r--src/declarative/graphicsitems/qdeclarativeitem.cpp27
-rw-r--r--src/declarative/graphicsitems/qdeclarativeitemsmodule.cpp98
-rw-r--r--src/declarative/graphicsitems/qdeclarativepath.cpp120
-rw-r--r--src/declarative/graphicsitems/qdeclarativetextedit.cpp4
-rw-r--r--src/declarative/graphicsitems/qdeclarativetextinput.cpp6
-rw-r--r--src/declarative/graphicsitems/qdeclarativevisualitemmodel.cpp2
-rw-r--r--src/declarative/qml/parser/qdeclarativejsast_p.h149
-rw-r--r--src/declarative/qml/qdeclarativecompiledbindings.cpp10
-rw-r--r--src/declarative/qml/qdeclarativecompiler.cpp4
-rw-r--r--src/declarative/qml/qdeclarativecomponent.cpp2
-rw-r--r--src/declarative/qml/qdeclarativecontext.cpp4
-rw-r--r--src/declarative/qml/qdeclarativedom.cpp2
-rw-r--r--src/declarative/qml/qdeclarativeengine.cpp12
-rw-r--r--src/declarative/qml/qdeclarativeexpression.cpp2
-rw-r--r--src/declarative/qml/qdeclarativemetatype.cpp25
-rw-r--r--src/declarative/qml/qdeclarativemetatype_p.h1
-rw-r--r--src/declarative/qml/qdeclarativeproperty.cpp4
-rw-r--r--src/declarative/qml/qdeclarativetypenamescriptclass.cpp2
-rw-r--r--src/declarative/qml/qdeclarativevaluetype.cpp12
-rw-r--r--src/declarative/util/qdeclarativeanimation.cpp2
-rw-r--r--src/declarative/util/qdeclarativefontloader.cpp2
-rw-r--r--src/declarative/util/qdeclarativetimer.cpp2
-rw-r--r--src/declarative/util/qdeclarativeutilmodule.cpp54
-rw-r--r--src/declarative/util/qdeclarativexmllistmodel.cpp2
-rw-r--r--src/gui/graphicsview/qgraphicsitem.cpp16
-rw-r--r--src/gui/graphicsview/qgraphicsitem_p.h4
-rw-r--r--src/gui/kernel/qwidget_x11.cpp4
-rw-r--r--src/imports/particles/qdeclarativeparticles.cpp2
-rw-r--r--src/opengl/gl2paintengineex/qglengineshadermanager.cpp17
-rw-r--r--src/opengl/gl2paintengineex/qpaintengineex_opengl2.cpp71
-rw-r--r--src/opengl/gl2paintengineex/qtextureglyphcache_gl.cpp5
-rw-r--r--src/opengl/gl2paintengineex/qtextureglyphcache_gl_p.h8
-rw-r--r--src/s60installs/bwins/QtCoreu.def1
-rw-r--r--src/s60installs/bwins/QtDeclarativeu.def218
-rw-r--r--src/s60installs/eabi/QtCoreu.def1
-rw-r--r--src/s60installs/eabi/QtDeclarativeu.def232
-rw-r--r--src/s60main/newallocator_hook.cpp58
-rw-r--r--src/s60main/s60main.pro3
-rw-r--r--src/script/api/qscriptengine.cpp7
59 files changed, 7537 insertions, 574 deletions
diff --git a/src/activeqt/shared/qaxtypes.cpp b/src/activeqt/shared/qaxtypes.cpp
index 957733e..ff21a9f 100644
--- a/src/activeqt/shared/qaxtypes.cpp
+++ b/src/activeqt/shared/qaxtypes.cpp
@@ -1376,8 +1376,10 @@ QVariant VARIANTToQVariant(const VARIANT &arg, const QByteArray &typeName, uint
}
QVariant::Type proptype = (QVariant::Type)type;
- if (proptype == QVariant::Invalid && !typeName.isEmpty())
- proptype = QVariant::nameToType(typeName);
+ if (proptype == QVariant::Invalid && !typeName.isEmpty()) {
+ if (typeName != "QVariant")
+ proptype = QVariant::nameToType(typeName);
+ }
if (proptype != QVariant::LastType && proptype != QVariant::Invalid && var.type() != proptype) {
if (var.canConvert(proptype)) {
QVariant oldvar = var;
diff --git a/src/corelib/arch/symbian/arch.pri b/src/corelib/arch/symbian/arch.pri
index 3ef1c9e..70daee3 100644
--- a/src/corelib/arch/symbian/arch.pri
+++ b/src/corelib/arch/symbian/arch.pri
@@ -2,4 +2,17 @@
# Symbian architecture
#
SOURCES += $$QT_ARCH_CPP/qatomic_symbian.cpp \
- $$QT_ARCH_CPP/../armv6/qatomic_generic_armv6.cpp
+ $$QT_ARCH_CPP/../armv6/qatomic_generic_armv6.cpp \
+ $$QT_ARCH_CPP/heap_hybrid.cpp \
+ $$QT_ARCH_CPP/debugfunction.cpp \
+ $$QT_ARCH_CPP/qt_heapsetup_symbian.cpp
+
+HEADERS += $$QT_ARCH_CPP/dla_p.h \
+ $$QT_ARCH_CPP/heap_hybrid_p.h \
+ $$QT_ARCH_CPP/common_p.h \
+ $$QT_ARCH_CPP/page_alloc_p.h \
+ $$QT_ARCH_CPP/slab_p.h \
+ $$QT_ARCH_CPP/qt_hybridHeap_symbian_p.h
+
+exists($${EPOCROOT}epoc32/include/platform/u32std.h):DEFINES += QT_SYMBIAN_HAVE_U32STD_H
+exists($${EPOCROOT}epoc32/include/platform/e32btrace.h):DEFINES += QT_SYMBIAN_HAVE_E32BTRACE_H
diff --git a/src/corelib/arch/symbian/common_p.h b/src/corelib/arch/symbian/common_p.h
new file mode 100644
index 0000000..1076621
--- /dev/null
+++ b/src/corelib/arch/symbian/common_p.h
@@ -0,0 +1,106 @@
+/****************************************************************************
+**
+** Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef __E32_COMMON_H__
+#define __E32_COMMON_H__
+
+#ifdef __KERNEL_MODE__
+#include <e32cmn.h>
+#include <e32panic.h>
+#include "u32std.h"
+#else
+#include <e32std.h>
+#include <e32base.h>
+#include <e32math.h>
+#include <e32svr.h>
+#include <e32ver.h>
+#include <e32hal.h>
+#include <e32panic.h>
+// backport of Symbian^4 allocator to Symbian^3 SDK does not contain u32exec.h
+//#include <u32exec.h>
+#endif
+
+GLREF_C void Panic(TCdtPanic aPanic);
+GLDEF_C void PanicBadArrayIndex();
+GLREF_C TInt __DoConvertNum(TUint, TRadix, TUint, TUint8*&);
+GLREF_C TInt __DoConvertNum(Uint64, TRadix, TUint, TUint8*&);
+
+#ifdef __KERNEL_MODE__
+GLREF_C void KernHeapFault(TCdtPanic aPanic);
+GLREF_C void KHeapCheckThreadState();
+TInt StringLength(const TUint16* aPtr);
+TInt StringLength(const TUint8* aPtr);
+
+#define STD_CLASS Kern
+#define STRING_LENGTH(s) StringLength(s)
+#define STRING_LENGTH_16(s) StringLength(s)
+#define PANIC_CURRENT_THREAD(c,r) Kern::PanicCurrentThread(c, r)
+#define __KERNEL_CHECK_RADIX(r) __ASSERT_ALWAYS(((r)==EDecimal)||((r)==EHex),Panic(EInvalidRadix))
+#define APPEND_BUF_SIZE 10
+#define APPEND_BUF_SIZE_64 20
+#define HEAP_PANIC(r) Kern::Printf("HEAP CORRUPTED %s %d", __FILE__, __LINE__), RHeapK::Fault(r)
+#define GET_PAGE_SIZE(x) x = M::PageSizeInBytes()
+#define DIVISION_BY_ZERO() FAULT()
+
+#ifdef _DEBUG
+#define __CHECK_THREAD_STATE RHeapK::CheckThreadState()
+#else
+#define __CHECK_THREAD_STATE
+#endif
+
+#else
+
+#define STD_CLASS User
+#define STRING_LENGTH(s) User::StringLength(s)
+#define STRING_LENGTH_16(s) User::StringLength(s)
+#define PANIC_CURRENT_THREAD(c,r) User::Panic(c, r)
+#define MEM_COMPARE_16 Mem::Compare
+#define __KERNEL_CHECK_RADIX(r)
+#define APPEND_BUF_SIZE 32
+#define APPEND_BUF_SIZE_64 64
+#define HEAP_PANIC(r) RDebug::Printf("HEAP CORRUPTED %s %d", __FILE__, __LINE__), Panic(r)
+#define GET_PAGE_SIZE(x) UserHal::PageSizeInBytes(x)
+#define DIVISION_BY_ZERO() User::RaiseException(EExcIntegerDivideByZero)
+#define __CHECK_THREAD_STATE
+
+#endif // __KERNEL_MODE__
+
+#endif
diff --git a/src/corelib/arch/symbian/debugfunction.cpp b/src/corelib/arch/symbian/debugfunction.cpp
new file mode 100644
index 0000000..f3b5d2d
--- /dev/null
+++ b/src/corelib/arch/symbian/debugfunction.cpp
@@ -0,0 +1,1143 @@
+/****************************************************************************
+**
+** Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qt_hybridheap_symbian_p.h"
+
+#ifdef QT_USE_NEW_SYMBIAN_ALLOCATOR
+
+#define GM (&iGlobalMallocState)
+#define __HEAP_CORRUPTED_TRACE(t,p,l) BTraceContext12(BTrace::EHeap, BTrace::EHeapCorruption, (TUint32)t, (TUint32)p, (TUint32)l);
+#define __HEAP_CORRUPTED_TEST(c,x, p,l) if (!c) { if (iFlags & (EMonitorMemory+ETraceAllocs) ) __HEAP_CORRUPTED_TRACE(this,p,l) HEAP_PANIC(x); }
+#define __HEAP_CORRUPTED_TEST_STATIC(c,t,x,p,l) if (!c) { if (t && (t->iFlags & (EMonitorMemory+ETraceAllocs) )) __HEAP_CORRUPTED_TRACE(t,p,l) HEAP_PANIC(x); }
+
+TInt RHybridHeap::DebugFunction(TInt aFunc, TAny* a1, TAny* a2)
+{
+ TInt r = KErrNone;
+ switch(aFunc)
+ {
+
+ case RAllocator::ECount:
+ struct HeapInfo info;
+ Lock();
+ GetInfo(&info, NULL);
+ *(unsigned*)a1 = info.iFreeN;
+ r = info.iAllocN;
+ Unlock();
+ break;
+
+ case RAllocator::EMarkStart:
+ __DEBUG_ONLY(DoMarkStart());
+ break;
+
+ case RAllocator::EMarkEnd:
+ __DEBUG_ONLY( r = DoMarkEnd((TInt)a1) );
+ break;
+
+ case RAllocator::ECheck:
+ r = DoCheckHeap((SCheckInfo*)a1);
+ break;
+
+ case RAllocator::ESetFail:
+ __DEBUG_ONLY(DoSetAllocFail((TAllocFail)(TInt)a1, (TInt)a2));
+ break;
+
+ case RHybridHeap::EGetFail:
+ __DEBUG_ONLY(r = iFailType);
+ break;
+
+ case RHybridHeap::ESetBurstFail:
+#if _DEBUG
+ {
+ SRAllocatorBurstFail* fail = (SRAllocatorBurstFail*) a2;
+ DoSetAllocFail((TAllocFail)(TInt)a1, fail->iRate, fail->iBurst);
+ }
+#endif
+ break;
+
+ case RHybridHeap::ECheckFailure:
+ // iRand will be incremented for each EFailNext, EBurstFailNext,
+ // EDeterministic and EBurstDeterministic failure.
+ r = iRand;
+ break;
+
+ case RAllocator::ECopyDebugInfo:
+ {
+ TInt nestingLevel = ((SDebugCell*)a1)[-1].nestingLevel;
+ ((SDebugCell*)a2)[-1].nestingLevel = nestingLevel;
+ break;
+ }
+
+ case RHybridHeap::EGetSize:
+ {
+ r = iChunkSize - sizeof(RHybridHeap);
+ break;
+ }
+
+ case RHybridHeap::EGetMaxLength:
+ {
+ r = iMaxLength;
+ break;
+ }
+
+ case RHybridHeap::EGetBase:
+ {
+ *(TAny**)a1 = iBase;
+ break;
+ }
+
+ case RHybridHeap::EAlignInteger:
+ {
+ r = _ALIGN_UP((TInt)a1, iAlign);
+ break;
+ }
+
+ case RHybridHeap::EAlignAddr:
+ {
+ *(TAny**)a2 = (TAny*)_ALIGN_UP((TLinAddr)a1, iAlign);
+ break;
+ }
+
+ case RHybridHeap::EWalk:
+ struct HeapInfo hinfo;
+ SWalkInfo winfo;
+ Lock();
+ winfo.iFunction = (TWalkFunc)a1;
+ winfo.iParam = a2;
+ winfo.iHeap = (RHybridHeap*)this;
+ GetInfo(&hinfo, &winfo);
+ Unlock();
+ break;
+
+#ifndef __KERNEL_MODE__
+
+ case RHybridHeap::EHybridHeap:
+ {
+ if ( !a1 )
+ return KErrGeneral;
+ STestCommand* cmd = (STestCommand*)a1;
+ switch ( cmd->iCommand )
+ {
+ case EGetConfig:
+ cmd->iConfig.iSlabBits = iSlabConfigBits;
+ cmd->iConfig.iDelayedSlabThreshold = iPageThreshold;
+ cmd->iConfig.iPagePower = iPageThreshold;
+ break;
+
+ case ESetConfig:
+ //
+ // New configuration data for slab and page allocator.
+ // Reset heap to get data into use
+ //
+#if USE_HYBRID_HEAP
+ iSlabConfigBits = cmd->iConfig.iSlabBits & 0x3fff;
+ iSlabInitThreshold = cmd->iConfig.iDelayedSlabThreshold;
+ iPageThreshold = (cmd->iConfig.iPagePower & 0x1f);
+ Reset();
+#endif
+ break;
+
+ case EHeapMetaData:
+ cmd->iData = this;
+ break;
+
+ case ETestData:
+ iTestData = cmd->iData;
+ break;
+
+ default:
+ return KErrNotSupported;
+
+ }
+
+ break;
+ }
+#endif // __KERNEL_MODE
+
+ default:
+ return KErrNotSupported;
+
+ }
+ return r;
+}
+
+void RHybridHeap::Walk(SWalkInfo* aInfo, TAny* aBfr, TInt aLth, TCellType aBfrType, TAllocatorType aAllocatorType)
+{
+ //
+ // This function is always called from RHybridHeap::GetInfo.
+ // Actual walk function is called if SWalkInfo pointer is defined
+ //
+ //
+ if ( aInfo )
+ {
+#ifdef __KERNEL_MODE__
+ (void)aAllocatorType;
+#if defined(_DEBUG)
+ if ( aBfrType == EGoodAllocatedCell )
+ aInfo->iFunction(aInfo->iParam, aBfrType, ((TUint8*)aBfr+EDebugHdrSize), (aLth-EDebugHdrSize) );
+ else
+ aInfo->iFunction(aInfo->iParam, aBfrType, aBfr, aLth );
+#else
+ aInfo->iFunction(aInfo->iParam, aBfrType, aBfr, aLth );
+#endif
+
+#else // __KERNEL_MODE__
+
+ if ( aAllocatorType & (EFullSlab + EPartialFullSlab + EEmptySlab + ESlabSpare) )
+ {
+ if ( aInfo->iHeap )
+ {
+ TUint32 dummy;
+ TInt npages;
+ aInfo->iHeap->DoCheckSlab((slab*)aBfr, aAllocatorType);
+ __HEAP_CORRUPTED_TEST_STATIC(aInfo->iHeap->CheckBitmap(Floor(aBfr, PAGESIZE), PAGESIZE, dummy, npages),
+ aInfo->iHeap, ETHeapBadCellAddress, aBfr, aLth);
+ }
+ if ( aAllocatorType & EPartialFullSlab )
+ WalkPartialFullSlab(aInfo, (slab*)aBfr, aBfrType, aLth);
+ else if ( aAllocatorType & EFullSlab )
+ WalkFullSlab(aInfo, (slab*)aBfr, aBfrType, aLth);
+ }
+#if defined(_DEBUG)
+ else if ( aBfrType == EGoodAllocatedCell )
+ aInfo->iFunction(aInfo->iParam, aBfrType, ((TUint8*)aBfr+EDebugHdrSize), (aLth-EDebugHdrSize) );
+ else
+ aInfo->iFunction(aInfo->iParam, aBfrType, aBfr, aLth );
+#else
+ else
+ aInfo->iFunction(aInfo->iParam, aBfrType, aBfr, aLth );
+#endif
+
+#endif // __KERNEL_MODE
+ }
+}
+
+#ifndef __KERNEL_MODE__
+void RHybridHeap::WalkPartialFullSlab(SWalkInfo* aInfo, slab* aSlab, TCellType /*aBfrType*/, TInt /*aLth*/)
+{
+ if ( aInfo )
+ {
+ //
+ // Build bitmap of free buffers in the partial full slab
+ //
+ TUint32 bitmap[4];
+ __HEAP_CORRUPTED_TEST_STATIC( (aInfo->iHeap != NULL), aInfo->iHeap, ETHeapBadCellAddress, 0, aSlab);
+ aInfo->iHeap->BuildPartialSlabBitmap(bitmap, aSlab);
+ //
+ // Find used (allocated) buffers from iPartial full slab
+ //
+ TUint32 h = aSlab->iHeader;
+ TUint32 size = SlabHeaderSize(h);
+ TUint32 count = KMaxSlabPayload / size; // Total buffer count in slab
+ TUint32 i = 0;
+ TUint32 ix = 0;
+ TUint32 bit = 1;
+
+ while ( i < count )
+ {
+
+ if ( bitmap[ix] & bit )
+ {
+ aInfo->iFunction(aInfo->iParam, EGoodFreeCell, &aSlab->iPayload[i*size], size );
+ }
+ else
+ {
+#if defined(_DEBUG)
+ aInfo->iFunction(aInfo->iParam, EGoodAllocatedCell, (&aSlab->iPayload[i*size]+EDebugHdrSize), (size-EDebugHdrSize) );
+#else
+ aInfo->iFunction(aInfo->iParam, EGoodAllocatedCell, &aSlab->iPayload[i*size], size );
+#endif
+ }
+ bit <<= 1;
+ if ( bit == 0 )
+ {
+ bit = 1;
+ ix ++;
+ }
+
+ i ++;
+ }
+ }
+
+}
+
+void RHybridHeap::WalkFullSlab(SWalkInfo* aInfo, slab* aSlab, TCellType aBfrType, TInt /*aLth*/)
+{
+ if ( aInfo )
+ {
+ TUint32 h = aSlab->iHeader;
+ TUint32 size = SlabHeaderSize(h);
+ TUint32 count = (SlabHeaderUsedm4(h) + 4) / size;
+ TUint32 i = 0;
+ while ( i < count )
+ {
+#if defined(_DEBUG)
+ if ( aBfrType == EGoodAllocatedCell )
+ aInfo->iFunction(aInfo->iParam, aBfrType, (&aSlab->iPayload[i*size]+EDebugHdrSize), (size-EDebugHdrSize) );
+ else
+ aInfo->iFunction(aInfo->iParam, aBfrType, &aSlab->iPayload[i*size], size );
+#else
+ aInfo->iFunction(aInfo->iParam, aBfrType, &aSlab->iPayload[i*size], size );
+#endif
+ i ++;
+ }
+ }
+}
+
+void RHybridHeap::BuildPartialSlabBitmap(TUint32* aBitmap, slab* aSlab, TAny* aBfr)
+{
+ //
+ // Build a bitmap of free buffers in a partial full slab
+ //
+ TInt i;
+ TUint32 bit = 0;
+ TUint32 index;
+ TUint32 h = aSlab->iHeader;
+ TUint32 used = SlabHeaderUsedm4(h)+4;
+ TUint32 size = SlabHeaderSize(h);
+ TInt count = (KMaxSlabPayload / size);
+ TInt free_count = count - (used / size); // Total free buffer count in slab
+ aBitmap[0] = 0, aBitmap[1] = 0, aBitmap[2] = 0, aBitmap[3] = 0;
+ TUint32 offs = (h & 0xff) << 2;
+
+ //
+ // Process first buffer in partial slab free buffer chain
+ //
+ while ( offs )
+ {
+ unsigned char* p = (unsigned char*)Offset(aSlab, offs);
+ __HEAP_CORRUPTED_TEST( (sizeof(slabhdr) <= offs), ETHeapBadCellAddress, p, aSlab);
+ offs -= sizeof(slabhdr);
+ __HEAP_CORRUPTED_TEST( (offs % size == 0), ETHeapBadCellAddress, p, aSlab);
+ index = (offs / size); // Bit index in bitmap
+ i = 0;
+ while ( i < 4 )
+ {
+ if ( index < 32 )
+ {
+ bit = (1 << index);
+ break;
+ }
+ index -= 32;
+ i ++;
+ }
+
+ __HEAP_CORRUPTED_TEST( ((aBitmap[i] & bit) == 0), ETHeapBadCellAddress, p, aSlab); // Buffer already in chain
+
+ aBitmap[i] |= bit;
+ free_count --;
+ offs = ((unsigned)*p) << 2; // Next in free chain
+ }
+
+ __HEAP_CORRUPTED_TEST( (free_count >= 0), ETHeapBadCellAddress, aBfr, aSlab); // free buffer count/size mismatch
+ //
+ // Process next rest of the free buffers which are in the
+ // wilderness (at end of the slab)
+ //
+ index = count - 1;
+ i = index / 32;
+ index = index % 32;
+ while ( free_count && (i >= 0))
+ {
+ bit = (1 << index);
+ __HEAP_CORRUPTED_TEST( ((aBitmap[i] & bit) == 0), ETHeapBadCellAddress, aBfr, aSlab); // Buffer already in chain
+ aBitmap[i] |= bit;
+ if ( index )
+ index --;
+ else
+ {
+ index = 31;
+ i --;
+ }
+ free_count --;
+ }
+
+ if ( aBfr ) // Assure that specified buffer does NOT exist in partial slab free buffer chain
+ {
+ offs = LowBits(aBfr, SLABSIZE);
+ __HEAP_CORRUPTED_TEST( (sizeof(slabhdr) <= offs), ETHeapBadCellAddress, aBfr, aSlab);
+ offs -= sizeof(slabhdr);
+ __HEAP_CORRUPTED_TEST( ((offs % size) == 0), ETHeapBadCellAddress, aBfr, aSlab);
+ index = (offs / size); // Bit index in bitmap
+ i = 0;
+ while ( i < 4 )
+ {
+ if ( index < 32 )
+ {
+ bit = (1 << index);
+ break;
+ }
+ index -= 32;
+ i ++;
+ }
+ __HEAP_CORRUPTED_TEST( ((aBitmap[i] & bit) == 0), ETHeapBadCellAddress, aBfr, aSlab); // Buffer already in chain
+ }
+}
+
+#endif // __KERNEL_MODE__
+
+void RHybridHeap::WalkCheckCell(TAny* aPtr, TCellType aType, TAny* aCell, TInt aLen)
+{
+ (void)aCell;
+ SHeapCellInfo& info = *(SHeapCellInfo*)aPtr;
+ switch(aType)
+ {
+ case EGoodAllocatedCell:
+ {
+ ++info.iTotalAlloc;
+ info.iTotalAllocSize += aLen;
+#if defined(_DEBUG)
+ RHybridHeap& h = *info.iHeap;
+ SDebugCell* DbgCell = (SDebugCell*)((TUint8*)aCell-EDebugHdrSize);
+ if ( DbgCell->nestingLevel == h.iNestingLevel )
+ {
+ if (++info.iLevelAlloc==1)
+ info.iStranded = DbgCell;
+#ifdef __KERNEL_MODE__
+ if (KDebugNum(KSERVER) || KDebugNum(KTESTFAST))
+ {
+ Kern::Printf("LEAKED KERNEL HEAP CELL @ %08x : len=%d", aCell, aLen);
+ TLinAddr base = ((TLinAddr)aCell)&~0x0f;
+ TLinAddr end = ((TLinAddr)aCell)+(TLinAddr)aLen;
+ while(base<end)
+ {
+ const TUint32* p = (const TUint32*)base;
+ Kern::Printf("%08x: %08x %08x %08x %08x", p, p[0], p[1], p[2], p[3]);
+ base += 16;
+ }
+ }
+#endif
+ }
+#endif
+ break;
+ }
+ case EGoodFreeCell:
+ ++info.iTotalFree;
+ break;
+ case EBadAllocatedCellSize:
+ HEAP_PANIC(ETHeapBadAllocatedCellSize);
+ case EBadAllocatedCellAddress:
+ HEAP_PANIC(ETHeapBadAllocatedCellAddress);
+ case EBadFreeCellAddress:
+ HEAP_PANIC(ETHeapBadFreeCellAddress);
+ case EBadFreeCellSize:
+ HEAP_PANIC(ETHeapBadFreeCellSize);
+ default:
+ HEAP_PANIC(ETHeapWalkBadCellType);
+ }
+}
+
+
+TInt RHybridHeap::DoCheckHeap(SCheckInfo* aInfo)
+{
+ (void)aInfo;
+ SHeapCellInfo info;
+ memclr(&info, sizeof(info));
+ info.iHeap = this;
+ struct HeapInfo hinfo;
+ SWalkInfo winfo;
+ Lock();
+ DoCheckMallocState(GM); // Check DL heap internal structure
+#ifndef __KERNEL_MODE__
+ TUint32 dummy;
+ TInt npages;
+ __HEAP_CORRUPTED_TEST(CheckBitmap(NULL, 0, dummy, npages), ETHeapBadCellAddress, this, 0); // Check page allocator buffers
+ DoCheckSlabTrees();
+ DoCheckCommittedSize(npages, GM);
+#endif
+ winfo.iFunction = WalkCheckCell;
+ winfo.iParam = &info;
+ winfo.iHeap = (RHybridHeap*)this;
+ GetInfo(&hinfo, &winfo);
+ Unlock();
+
+#if defined(_DEBUG)
+ if (!aInfo)
+ return KErrNone;
+ TInt expected = aInfo->iCount;
+ TInt actual = aInfo->iAll ? info.iTotalAlloc : info.iLevelAlloc;
+ if (actual!=expected && !iTestData)
+ {
+#ifdef __KERNEL_MODE__
+ Kern::Fault("KERN-ALLOC COUNT", (expected<<16)|actual );
+#else
+ User::Panic(_L("ALLOC COUNT"), (expected<<16)|actual );
+#endif
+ }
+#endif
+ return KErrNone;
+}
+
+#ifdef _DEBUG
+void RHybridHeap::DoMarkStart()
+{
+ if (iNestingLevel==0)
+ iAllocCount=0;
+ iNestingLevel++;
+}
+
+TUint32 RHybridHeap::DoMarkEnd(TInt aExpected)
+{
+ if (iNestingLevel==0)
+ return 0;
+ SHeapCellInfo info;
+ SHeapCellInfo* p = iTestData ? (SHeapCellInfo*)iTestData : &info;
+ memclr(p, sizeof(info));
+ p->iHeap = this;
+ struct HeapInfo hinfo;
+ SWalkInfo winfo;
+ Lock();
+ winfo.iFunction = WalkCheckCell;
+ winfo.iParam = p;
+ winfo.iHeap = (RHybridHeap*)this;
+ GetInfo(&hinfo, &winfo);
+ Unlock();
+
+ if (p->iLevelAlloc != aExpected && !iTestData)
+ return (TUint32)(p->iStranded + 1);
+ if (--iNestingLevel == 0)
+ iAllocCount = 0;
+ return 0;
+}
+
+void RHybridHeap::DoSetAllocFail(TAllocFail aType, TInt aRate)
+{// Default to a burst mode of 1, as aType may be a burst type.
+ DoSetAllocFail(aType, aRate, 1);
+}
+
+void ResetAllocCellLevels(TAny* aPtr, RHybridHeap::TCellType aType, TAny* aCell, TInt aLen)
+{
+ (void)aPtr;
+ (void)aLen;
+
+ if (aType == RHybridHeap::EGoodAllocatedCell)
+ {
+ RHybridHeap::SDebugCell* DbgCell = (RHybridHeap::SDebugCell*)((TUint8*)aCell-RHeap::EDebugHdrSize);
+ DbgCell->nestingLevel = 0;
+ }
+}
+
+// Don't change as the ETHeapBadDebugFailParameter check below and the API
+// documentation rely on this being 16 for RHybridHeap.
+LOCAL_D const TInt KBurstFailRateShift = 16;
+LOCAL_D const TInt KBurstFailRateMask = (1 << KBurstFailRateShift) - 1;
+
+void RHybridHeap::DoSetAllocFail(TAllocFail aType, TInt aRate, TUint aBurst)
+{
+ if (aType==EReset)
+ {
+ // reset levels of all allocated cells to 0
+ // this should prevent subsequent tests failing unnecessarily
+ iFailed = EFalse; // Reset for ECheckFailure relies on this.
+ struct HeapInfo hinfo;
+ SWalkInfo winfo;
+ Lock();
+ winfo.iFunction = (TWalkFunc)&ResetAllocCellLevels;
+ winfo.iParam = NULL;
+ winfo.iHeap = (RHybridHeap*)this;
+ GetInfo(&hinfo, &winfo);
+ Unlock();
+ // reset heap allocation mark as well
+ iNestingLevel=0;
+ iAllocCount=0;
+ aType=ENone;
+ }
+
+ switch (aType)
+ {
+ case EBurstRandom:
+ case EBurstTrueRandom:
+ case EBurstDeterministic:
+ case EBurstFailNext:
+ // If the fail type is a burst type then iFailRate is split in 2:
+ // the 16 lsbs are the fail rate and the 16 msbs are the burst length.
+ if (TUint(aRate) > (TUint)KMaxTUint16 || aBurst > KMaxTUint16)
+ HEAP_PANIC(ETHeapBadDebugFailParameter);
+
+ iFailed = EFalse;
+ iFailType = aType;
+ iFailRate = (aRate == 0) ? 1 : aRate;
+ iFailAllocCount = -iFailRate;
+ iFailRate = iFailRate | (aBurst << KBurstFailRateShift);
+ break;
+
+ default:
+ iFailed = EFalse;
+ iFailType = aType;
+ iFailRate = (aRate == 0) ? 1 : aRate; // A rate of <1 is meaningless
+ iFailAllocCount = 0;
+ break;
+ }
+
+ // Set up iRand for either:
+ // - random seed value, or
+ // - a count of the number of failures so far.
+ iRand = 0;
+#ifndef __KERNEL_MODE__
+ switch (iFailType)
+ {
+ case ETrueRandom:
+ case EBurstTrueRandom:
+ {
+ TTime time;
+ time.HomeTime();
+ TInt64 seed = time.Int64();
+ iRand = Math::Rand(seed);
+ break;
+ }
+ case ERandom:
+ case EBurstRandom:
+ {
+ TInt64 seed = 12345;
+ iRand = Math::Rand(seed);
+ break;
+ }
+ default:
+ break;
+ }
+#endif
+}
+
+TBool RHybridHeap::CheckForSimulatedAllocFail()
+//
+// Check to see if the user has requested simulated alloc failure, and if so possibly
+// Return ETrue indicating a failure.
+//
+{
+ // For burst mode failures iFailRate is shared
+ TUint16 rate = (TUint16)(iFailRate & KBurstFailRateMask);
+ TUint16 burst = (TUint16)(iFailRate >> KBurstFailRateShift);
+ TBool r = EFalse;
+ switch (iFailType)
+ {
+#ifndef __KERNEL_MODE__
+ case ERandom:
+ case ETrueRandom:
+ if (++iFailAllocCount>=iFailRate)
+ {
+ iFailAllocCount=0;
+ if (!iFailed) // haven't failed yet after iFailRate allocations so fail now
+ return(ETrue);
+ iFailed=EFalse;
+ }
+ else
+ {
+ if (!iFailed)
+ {
+ TInt64 seed=iRand;
+ iRand=Math::Rand(seed);
+ if (iRand%iFailRate==0)
+ {
+ iFailed=ETrue;
+ return(ETrue);
+ }
+ }
+ }
+ break;
+
+ case EBurstRandom:
+ case EBurstTrueRandom:
+ if (++iFailAllocCount < 0)
+ {
+ // We haven't started failing yet so should we now?
+ TInt64 seed = iRand;
+ iRand = Math::Rand(seed);
+ if (iRand % rate == 0)
+ {// Fail now. Reset iFailAllocCount so we fail burst times
+ iFailAllocCount = 0;
+ r = ETrue;
+ }
+ }
+ else
+ {
+ if (iFailAllocCount < burst)
+ {// Keep failing for burst times
+ r = ETrue;
+ }
+ else
+ {// We've now failed burst times so start again.
+ iFailAllocCount = -(rate - 1);
+ }
+ }
+ break;
+#endif
+ case EDeterministic:
+ if (++iFailAllocCount%iFailRate==0)
+ {
+ r=ETrue;
+ iRand++; // Keep count of how many times we have failed
+ }
+ break;
+
+ case EBurstDeterministic:
+ // This will fail burst number of times, every rate attempts.
+ if (++iFailAllocCount >= 0)
+ {
+ if (iFailAllocCount == burst - 1)
+ {// This is the burst time we have failed so make it the last by
+ // reseting counts so we next fail after rate attempts.
+ iFailAllocCount = -rate;
+ }
+ r = ETrue;
+ iRand++; // Keep count of how many times we have failed
+ }
+ break;
+
+ case EFailNext:
+ if ((++iFailAllocCount%iFailRate)==0)
+ {
+ iFailType=ENone;
+ r=ETrue;
+ iRand++; // Keep count of how many times we have failed
+ }
+ break;
+
+ case EBurstFailNext:
+ if (++iFailAllocCount >= 0)
+ {
+ if (iFailAllocCount == burst - 1)
+ {// This is the burst time we have failed so make it the last.
+ iFailType = ENone;
+ }
+ r = ETrue;
+ iRand++; // Keep count of how many times we have failed
+ }
+ break;
+
+ default:
+ break;
+ }
+ return r;
+}
+
+#endif // DEBUG
+
+//
+// Methods for Doug Lea allocator detailed check
+//
+
+void RHybridHeap::DoCheckAnyChunk(mstate m, mchunkptr p)
+{
+ __HEAP_CORRUPTED_TEST(((IS_ALIGNED(CHUNK2MEM(p))) || (p->iHead == FENCEPOST_HEAD)), ETHeapBadCellAddress, p, 0);
+ (void)m;
+}
+
+/* Check properties of iTop chunk */
+void RHybridHeap::DoCheckTopChunk(mstate m, mchunkptr p)
+{
+ msegmentptr sp = &m->iSeg;
+ size_t sz = CHUNKSIZE(p);
+ __HEAP_CORRUPTED_TEST((sp != 0), ETHeapBadCellAddress, p, 0);
+ __HEAP_CORRUPTED_TEST(((IS_ALIGNED(CHUNK2MEM(p))) || (p->iHead == FENCEPOST_HEAD)), ETHeapBadCellAddress, p,0);
+ __HEAP_CORRUPTED_TEST((sz == m->iTopSize), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST((sz > 0), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST((sz == ((sp->iBase + sp->iSize) - (TUint8*)p) - TOP_FOOT_SIZE), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST((PINUSE(p)), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST((!NEXT_PINUSE(p)), ETHeapBadCellAddress,p,0);
+}
+
+/* Check properties of inuse chunks */
+void RHybridHeap::DoCheckInuseChunk(mstate m, mchunkptr p)
+{
+ DoCheckAnyChunk(m, p);
+ __HEAP_CORRUPTED_TEST((CINUSE(p)), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST((NEXT_PINUSE(p)), ETHeapBadCellAddress,p,0);
+ /* If not PINUSE and not mmapped, previous chunk has OK offset */
+ __HEAP_CORRUPTED_TEST((PINUSE(p) || NEXT_CHUNK(PREV_CHUNK(p)) == p), ETHeapBadCellAddress,p,0);
+}
+
+/* Check properties of free chunks */
+void RHybridHeap::DoCheckFreeChunk(mstate m, mchunkptr p)
+{
+ size_t sz = p->iHead & ~(PINUSE_BIT|CINUSE_BIT);
+ mchunkptr next = CHUNK_PLUS_OFFSET(p, sz);
+ DoCheckAnyChunk(m, p);
+ __HEAP_CORRUPTED_TEST((!CINUSE(p)), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST((!NEXT_PINUSE(p)), ETHeapBadCellAddress,p,0);
+ if (p != m->iDv && p != m->iTop)
+ {
+ if (sz >= MIN_CHUNK_SIZE)
+ {
+ __HEAP_CORRUPTED_TEST(((sz & CHUNK_ALIGN_MASK) == 0), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST((IS_ALIGNED(CHUNK2MEM(p))), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST((next->iPrevFoot == sz), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST((PINUSE(p)), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST( (next == m->iTop || CINUSE(next)), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST((p->iFd->iBk == p), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST((p->iBk->iFd == p), ETHeapBadCellAddress,p,0);
+ }
+ else /* markers are always of size SIZE_T_SIZE */
+ __HEAP_CORRUPTED_TEST((sz == SIZE_T_SIZE), ETHeapBadCellAddress,p,0);
+ }
+}
+
+/* Check properties of malloced chunks at the point they are malloced */
+void RHybridHeap::DoCheckMallocedChunk(mstate m, void* mem, size_t s)
+{
+ if (mem != 0)
+ {
+ mchunkptr p = MEM2CHUNK(mem);
+ size_t sz = p->iHead & ~(PINUSE_BIT|CINUSE_BIT);
+ DoCheckInuseChunk(m, p);
+ __HEAP_CORRUPTED_TEST(((sz & CHUNK_ALIGN_MASK) == 0), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST((sz >= MIN_CHUNK_SIZE), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST((sz >= s), ETHeapBadCellAddress,p,0);
+ /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
+ __HEAP_CORRUPTED_TEST((sz < (s + MIN_CHUNK_SIZE)), ETHeapBadCellAddress,p,0);
+ }
+}
+
+/* Check a tree and its subtrees. */
+void RHybridHeap::DoCheckTree(mstate m, tchunkptr t)
+{
+ tchunkptr head = 0;
+ tchunkptr u = t;
+ bindex_t tindex = t->iIndex;
+ size_t tsize = CHUNKSIZE(t);
+ bindex_t idx;
+ DoComputeTreeIndex(tsize, idx);
+ __HEAP_CORRUPTED_TEST((tindex == idx), ETHeapBadCellAddress,u,0);
+ __HEAP_CORRUPTED_TEST((tsize >= MIN_LARGE_SIZE), ETHeapBadCellAddress,u,0);
+ __HEAP_CORRUPTED_TEST((tsize >= MINSIZE_FOR_TREE_INDEX(idx)), ETHeapBadCellAddress,u,0);
+ __HEAP_CORRUPTED_TEST(((idx == NTREEBINS-1) || (tsize < MINSIZE_FOR_TREE_INDEX((idx+1)))), ETHeapBadCellAddress,u,0);
+
+ do
+ { /* traverse through chain of same-sized nodes */
+ DoCheckAnyChunk(m, ((mchunkptr)u));
+ __HEAP_CORRUPTED_TEST((u->iIndex == tindex), ETHeapBadCellAddress,u,0);
+ __HEAP_CORRUPTED_TEST((CHUNKSIZE(u) == tsize), ETHeapBadCellAddress,u,0);
+ __HEAP_CORRUPTED_TEST((!CINUSE(u)), ETHeapBadCellAddress,u,0);
+ __HEAP_CORRUPTED_TEST((!NEXT_PINUSE(u)), ETHeapBadCellAddress,u,0);
+ __HEAP_CORRUPTED_TEST((u->iFd->iBk == u), ETHeapBadCellAddress,u,0);
+ __HEAP_CORRUPTED_TEST((u->iBk->iFd == u), ETHeapBadCellAddress,u,0);
+ if (u->iParent == 0)
+ {
+ __HEAP_CORRUPTED_TEST((u->iChild[0] == 0), ETHeapBadCellAddress,u,0);
+ __HEAP_CORRUPTED_TEST((u->iChild[1] == 0), ETHeapBadCellAddress,u,0);
+ }
+ else
+ {
+ __HEAP_CORRUPTED_TEST((head == 0), ETHeapBadCellAddress,u,0); /* only one node on chain has iParent */
+ head = u;
+ __HEAP_CORRUPTED_TEST((u->iParent != u), ETHeapBadCellAddress,u,0);
+ __HEAP_CORRUPTED_TEST( (u->iParent->iChild[0] == u ||
+ u->iParent->iChild[1] == u ||
+ *((tbinptr*)(u->iParent)) == u), ETHeapBadCellAddress,u,0);
+ if (u->iChild[0] != 0)
+ {
+ __HEAP_CORRUPTED_TEST((u->iChild[0]->iParent == u), ETHeapBadCellAddress,u,0);
+ __HEAP_CORRUPTED_TEST((u->iChild[0] != u), ETHeapBadCellAddress,u,0);
+ DoCheckTree(m, u->iChild[0]);
+ }
+ if (u->iChild[1] != 0)
+ {
+ __HEAP_CORRUPTED_TEST((u->iChild[1]->iParent == u), ETHeapBadCellAddress,u,0);
+ __HEAP_CORRUPTED_TEST((u->iChild[1] != u), ETHeapBadCellAddress,u,0);
+ DoCheckTree(m, u->iChild[1]);
+ }
+ if (u->iChild[0] != 0 && u->iChild[1] != 0)
+ {
+ __HEAP_CORRUPTED_TEST((CHUNKSIZE(u->iChild[0]) < CHUNKSIZE(u->iChild[1])), ETHeapBadCellAddress,u,0);
+ }
+ }
+ u = u->iFd;
+ }
+ while (u != t);
+ __HEAP_CORRUPTED_TEST((head != 0), ETHeapBadCellAddress,u,0);
+}
+
+/* Check all the chunks in a treebin. */
+void RHybridHeap::DoCheckTreebin(mstate m, bindex_t i)
+{
+ tbinptr* tb = TREEBIN_AT(m, i);
+ tchunkptr t = *tb;
+ int empty = (m->iTreeMap & (1U << i)) == 0;
+ if (t == 0)
+ __HEAP_CORRUPTED_TEST((empty), ETHeapBadCellAddress,t,0);
+ if (!empty)
+ DoCheckTree(m, t);
+}
+
+/* Check all the chunks in a smallbin. */
+void RHybridHeap::DoCheckSmallbin(mstate m, bindex_t i)
+{
+ sbinptr b = SMALLBIN_AT(m, i);
+ mchunkptr p = b->iBk;
+ unsigned int empty = (m->iSmallMap & (1U << i)) == 0;
+ if (p == b)
+ __HEAP_CORRUPTED_TEST((empty), ETHeapBadCellAddress,p,0);
+ if (!empty)
+ {
+ for (; p != b; p = p->iBk)
+ {
+ size_t size = CHUNKSIZE(p);
+ mchunkptr q;
+ /* each chunk claims to be free */
+ DoCheckFreeChunk(m, p);
+ /* chunk belongs in bin */
+ __HEAP_CORRUPTED_TEST((SMALL_INDEX(size) == i), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST((p->iBk == b || CHUNKSIZE(p->iBk) == CHUNKSIZE(p)), ETHeapBadCellAddress,p,0);
+ /* chunk is followed by an inuse chunk */
+ q = NEXT_CHUNK(p);
+ if (q->iHead != FENCEPOST_HEAD)
+ DoCheckInuseChunk(m, q);
+ }
+ }
+}
+
+/* Find x in a bin. Used in other check functions. */
+TInt RHybridHeap::BinFind(mstate m, mchunkptr x)
+{
+ size_t size = CHUNKSIZE(x);
+ if (IS_SMALL(size))
+ {
+ bindex_t sidx = SMALL_INDEX(size);
+ sbinptr b = SMALLBIN_AT(m, sidx);
+ if (SMALLMAP_IS_MARKED(m, sidx))
+ {
+ mchunkptr p = b;
+ do
+ {
+ if (p == x)
+ return 1;
+ }
+ while ((p = p->iFd) != b);
+ }
+ }
+ else
+ {
+ bindex_t tidx;
+ DoComputeTreeIndex(size, tidx);
+ if (TREEMAP_IS_MARKED(m, tidx))
+ {
+ tchunkptr t = *TREEBIN_AT(m, tidx);
+ size_t sizebits = size << LEFTSHIFT_FOR_TREE_INDEX(tidx);
+ while (t != 0 && CHUNKSIZE(t) != size)
+ {
+ t = t->iChild[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
+ sizebits <<= 1;
+ }
+ if (t != 0)
+ {
+ tchunkptr u = t;
+ do
+ {
+ if (u == (tchunkptr)x)
+ return 1;
+ }
+ while ((u = u->iFd) != t);
+ }
+ }
+ }
+ return 0;
+}
+
+/* Traverse each chunk and check it; return total */
+size_t RHybridHeap::TraverseAndCheck(mstate m)
+{
+ size_t sum = 0;
+ msegmentptr s = &m->iSeg;
+ sum += m->iTopSize + TOP_FOOT_SIZE;
+ mchunkptr q = ALIGN_AS_CHUNK(s->iBase);
+ mchunkptr lastq = 0;
+ __HEAP_CORRUPTED_TEST((PINUSE(q)), ETHeapBadCellAddress,q,0);
+ while (q != m->iTop && q->iHead != FENCEPOST_HEAD)
+ {
+ sum += CHUNKSIZE(q);
+ if (CINUSE(q))
+ {
+ __HEAP_CORRUPTED_TEST((!BinFind(m, q)), ETHeapBadCellAddress,q,0);
+ DoCheckInuseChunk(m, q);
+ }
+ else
+ {
+ __HEAP_CORRUPTED_TEST((q == m->iDv || BinFind(m, q)), ETHeapBadCellAddress,q,0);
+ __HEAP_CORRUPTED_TEST((lastq == 0 || CINUSE(lastq)), ETHeapBadCellAddress,q,0); /* Not 2 consecutive free */
+ DoCheckFreeChunk(m, q);
+ }
+ lastq = q;
+ q = NEXT_CHUNK(q);
+ }
+ return sum;
+}
+
+/* Check all properties of malloc_state. */
+void RHybridHeap::DoCheckMallocState(mstate m)
+{
+ bindex_t i;
+// size_t total;
+ /* check bins */
+ for (i = 0; i < NSMALLBINS; ++i)
+ DoCheckSmallbin(m, i);
+ for (i = 0; i < NTREEBINS; ++i)
+ DoCheckTreebin(m, i);
+
+ if (m->iDvSize != 0)
+ { /* check iDv chunk */
+ DoCheckAnyChunk(m, m->iDv);
+ __HEAP_CORRUPTED_TEST((m->iDvSize == CHUNKSIZE(m->iDv)), ETHeapBadCellAddress,m->iDv,0);
+ __HEAP_CORRUPTED_TEST((m->iDvSize >= MIN_CHUNK_SIZE), ETHeapBadCellAddress,m->iDv,0);
+ __HEAP_CORRUPTED_TEST((BinFind(m, m->iDv) == 0), ETHeapBadCellAddress,m->iDv,0);
+ }
+
+ if (m->iTop != 0)
+ { /* check iTop chunk */
+ DoCheckTopChunk(m, m->iTop);
+ __HEAP_CORRUPTED_TEST((m->iTopSize == CHUNKSIZE(m->iTop)), ETHeapBadCellAddress,m->iTop,0);
+ __HEAP_CORRUPTED_TEST((m->iTopSize > 0), ETHeapBadCellAddress,m->iTop,0);
+ __HEAP_CORRUPTED_TEST((BinFind(m, m->iTop) == 0), ETHeapBadCellAddress,m->iTop,0);
+ }
+
+// total =
+ TraverseAndCheck(m);
+}
+
+#ifndef __KERNEL_MODE__
+//
+// Methods for Slab allocator detailed check
+//
+void RHybridHeap::DoCheckSlabTree(slab** aS, TBool aPartialPage)
+{
+ slab* s = *aS;
+ if (!s)
+ return;
+
+ TUint size = SlabHeaderSize(s->iHeader);
+ slab** parent = aS;
+ slab** child2 = &s->iChild2;
+
+ while ( s )
+ {
+ __HEAP_CORRUPTED_TEST((s->iParent == parent), ETHeapBadCellAddress,s,SLABSIZE);
+ __HEAP_CORRUPTED_TEST((!s->iChild1 || s < s->iChild1), ETHeapBadCellAddress,s,SLABSIZE);
+ __HEAP_CORRUPTED_TEST((!s->iChild2 || s < s->iChild2), ETHeapBadCellAddress,s,SLABSIZE);
+
+ if ( aPartialPage )
+ {
+ if ( s->iChild1 )
+ size = SlabHeaderSize(s->iChild1->iHeader);
+ }
+ else
+ {
+ __HEAP_CORRUPTED_TEST((SlabHeaderSize(s->iHeader) == size), ETHeapBadCellAddress,s,SLABSIZE);
+ }
+ parent = &s->iChild1;
+ s = s->iChild1;
+
+ }
+
+ parent = child2;
+ s = *child2;
+
+ while ( s )
+ {
+ __HEAP_CORRUPTED_TEST((s->iParent == parent), ETHeapBadCellAddress,s,SLABSIZE);
+ __HEAP_CORRUPTED_TEST((!s->iChild1 || s < s->iChild1), ETHeapBadCellAddress,s,SLABSIZE);
+ __HEAP_CORRUPTED_TEST((!s->iChild2 || s < s->iChild2), ETHeapBadCellAddress,s,SLABSIZE);
+
+ if ( aPartialPage )
+ {
+ if ( s->iChild2 )
+ size = SlabHeaderSize(s->iChild2->iHeader);
+ }
+ else
+ {
+ __HEAP_CORRUPTED_TEST((SlabHeaderSize(s->iHeader) == size), ETHeapBadCellAddress,s,SLABSIZE);
+ }
+ parent = &s->iChild2;
+ s = s->iChild2;
+
+ }
+
+}
+
+void RHybridHeap::DoCheckSlabTrees()
+{
+ for (TInt i = 0; i < (MAXSLABSIZE>>2); ++i)
+ DoCheckSlabTree(&iSlabAlloc[i].iPartial, EFalse);
+ DoCheckSlabTree(&iPartialPage, ETrue);
+}
+
+void RHybridHeap::DoCheckSlab(slab* aSlab, TAllocatorType aSlabType, TAny* aBfr)
+{
+ if ( (aSlabType == ESlabSpare) || (aSlabType == EEmptySlab) )
+ return;
+
+ unsigned h = aSlab->iHeader;
+ __HEAP_CORRUPTED_TEST((ZEROBITS(h)), ETHeapBadCellAddress,aBfr,aSlab);
+ unsigned used = SlabHeaderUsedm4(h)+4;
+ unsigned size = SlabHeaderSize(h);
+ __HEAP_CORRUPTED_TEST( (used < SLABSIZE),ETHeapBadCellAddress, aBfr, aSlab);
+ __HEAP_CORRUPTED_TEST( ((size > 3 ) && (size < MAXSLABSIZE)), ETHeapBadCellAddress,aBfr,aSlab);
+ unsigned count = 0;
+
+ switch ( aSlabType )
+ {
+ case EFullSlab:
+ count = (KMaxSlabPayload / size );
+ __HEAP_CORRUPTED_TEST((used == count*size), ETHeapBadCellAddress,aBfr,aSlab);
+ __HEAP_CORRUPTED_TEST((HeaderFloating(h)), ETHeapBadCellAddress,aBfr,aSlab);
+ break;
+
+ case EPartialFullSlab:
+ __HEAP_CORRUPTED_TEST(((used % size)==0),ETHeapBadCellAddress,aBfr,aSlab);
+ __HEAP_CORRUPTED_TEST(((SlabHeaderFree(h) == 0) || (((SlabHeaderFree(h)<<2)-sizeof(slabhdr)) % SlabHeaderSize(h) == 0)),
+ ETHeapBadCellAddress,aBfr,aSlab);
+ break;
+
+ default:
+ break;
+
+ }
+}
+
+//
+// Check that committed size in heap equals number of pages in bitmap
+// plus size of Doug Lea region
+//
+void RHybridHeap::DoCheckCommittedSize(TInt aNPages, mstate aM)
+{
+ TInt total_committed = (aNPages * iPageSize) + aM->iSeg.iSize + (iBase - (TUint8*)this);
+ __HEAP_CORRUPTED_TEST((total_committed == iChunkSize), ETHeapBadCellAddress,total_committed,iChunkSize);
+}
+
+#endif // __KERNEL_MODE__
+
+#endif /* QT_USE_NEW_SYMBIAN_ALLOCATOR */
diff --git a/src/corelib/arch/symbian/dla_p.h b/src/corelib/arch/symbian/dla_p.h
new file mode 100644
index 0000000..519a4a2
--- /dev/null
+++ b/src/corelib/arch/symbian/dla_p.h
@@ -0,0 +1,969 @@
+/****************************************************************************
+**
+** Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef __DLA__
+#define __DLA__
+
+#define DEFAULT_TRIM_THRESHOLD ((size_t)4U * (size_t)1024U)
+
+#define MSPACES 0
+#define HAVE_MORECORE 1
+#define MORECORE_CONTIGUOUS 1
+#define HAVE_MMAP 0
+#define HAVE_MREMAP 0
+#define DEFAULT_GRANULARITY (4096U)
+#define FOOTERS 0
+#define USE_LOCKS 0
+#define INSECURE 1
+#define NO_MALLINFO 0
+
+#define LACKS_SYS_TYPES_H
+#ifndef LACKS_SYS_TYPES_H
+#include <sys/types.h> /* For size_t */
+#else
+#ifndef _SIZE_T_DECLARED
+typedef unsigned int size_t;
+#define _SIZE_T_DECLARED
+#endif
+#endif /* LACKS_SYS_TYPES_H */
+
+/* The maximum possible size_t value has all bits set */
+#define MAX_SIZE_T (~(size_t)0)
+
+#ifndef ONLY_MSPACES
+ #define ONLY_MSPACES 0
+#endif /* ONLY_MSPACES */
+
+#ifndef MSPACES
+ #if ONLY_MSPACES
+ #define MSPACES 1
+ #else /* ONLY_MSPACES */
+ #define MSPACES 0
+ #endif /* ONLY_MSPACES */
+#endif /* MSPACES */
+
+//#ifndef MALLOC_ALIGNMENT
+// #define MALLOC_ALIGNMENT ((size_t)8U)
+//#endif /* MALLOC_ALIGNMENT */
+
+#ifndef FOOTERS
+ #define FOOTERS 0
+#endif /* FOOTERS */
+
+#ifndef ABORT
+// #define ABORT abort()
+// #define ABORT User::Invariant()// redefined so euser isn't dependant on oe
+ #define ABORT HEAP_PANIC(ETHeapBadCellAddress)
+#endif /* ABORT */
+
+#ifndef PROCEED_ON_ERROR
+ #define PROCEED_ON_ERROR 0
+#endif /* PROCEED_ON_ERROR */
+
+#ifndef USE_LOCKS
+ #define USE_LOCKS 0
+#endif /* USE_LOCKS */
+
+#ifndef INSECURE
+ #define INSECURE 0
+#endif /* INSECURE */
+
+#ifndef HAVE_MMAP
+ #define HAVE_MMAP 1
+#endif /* HAVE_MMAP */
+
+#ifndef MMAP_CLEARS
+ #define MMAP_CLEARS 1
+#endif /* MMAP_CLEARS */
+
+#ifndef HAVE_MREMAP
+ #ifdef linux
+ #define HAVE_MREMAP 1
+ #else /* linux */
+ #define HAVE_MREMAP 0
+ #endif /* linux */
+#endif /* HAVE_MREMAP */
+
+#ifndef MALLOC_FAILURE_ACTION
+ //#define MALLOC_FAILURE_ACTION errno = ENOMEM;
+ #define MALLOC_FAILURE_ACTION ;
+#endif /* MALLOC_FAILURE_ACTION */
+
+#ifndef HAVE_MORECORE
+ #if ONLY_MSPACES
+ #define HAVE_MORECORE 1 /*AMOD: has changed */
+ #else /* ONLY_MSPACES */
+ #define HAVE_MORECORE 1
+ #endif /* ONLY_MSPACES */
+#endif /* HAVE_MORECORE */
+
+#if !HAVE_MORECORE
+ #define MORECORE_CONTIGUOUS 0
+#else /* !HAVE_MORECORE */
+ #ifndef MORECORE
+ #define MORECORE DLAdjust
+ #endif /* MORECORE */
+ #ifndef MORECORE_CONTIGUOUS
+ #define MORECORE_CONTIGUOUS 0
+ #endif /* MORECORE_CONTIGUOUS */
+#endif /* !HAVE_MORECORE */
+
+#ifndef DEFAULT_GRANULARITY
+ #if MORECORE_CONTIGUOUS
+ #define DEFAULT_GRANULARITY 4096 /* 0 means to compute in init_mparams */
+ #else /* MORECORE_CONTIGUOUS */
+ #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
+ #endif /* MORECORE_CONTIGUOUS */
+#endif /* DEFAULT_GRANULARITY */
+
+#ifndef DEFAULT_TRIM_THRESHOLD
+ #ifndef MORECORE_CANNOT_TRIM
+ #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
+ #else /* MORECORE_CANNOT_TRIM */
+ #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
+ #endif /* MORECORE_CANNOT_TRIM */
+#endif /* DEFAULT_TRIM_THRESHOLD */
+
+#ifndef DEFAULT_MMAP_THRESHOLD
+ #if HAVE_MMAP
+ #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
+ #else /* HAVE_MMAP */
+ #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
+ #endif /* HAVE_MMAP */
+#endif /* DEFAULT_MMAP_THRESHOLD */
+
+#ifndef USE_BUILTIN_FFS
+ #define USE_BUILTIN_FFS 0
+#endif /* USE_BUILTIN_FFS */
+
+#ifndef USE_DEV_RANDOM
+ #define USE_DEV_RANDOM 0
+#endif /* USE_DEV_RANDOM */
+
+#ifndef NO_MALLINFO
+ #define NO_MALLINFO 0
+#endif /* NO_MALLINFO */
+#ifndef MALLINFO_FIELD_TYPE
+ #define MALLINFO_FIELD_TYPE size_t
+#endif /* MALLINFO_FIELD_TYPE */
+
+/*
+ mallopt tuning options. SVID/XPG defines four standard parameter
+ numbers for mallopt, normally defined in malloc.h. None of these
+ are used in this malloc, so setting them has no effect. But this
+ malloc does support the following options.
+*/
+
+#define M_TRIM_THRESHOLD (-1)
+#define M_GRANULARITY (-2)
+#define M_MMAP_THRESHOLD (-3)
+
+#if !NO_MALLINFO
+/*
+ This version of malloc supports the standard SVID/XPG mallinfo
+ routine that returns a struct containing usage properties and
+ statistics. It should work on any system that has a
+ /usr/include/malloc.h defining struct mallinfo. The main
+ declaration needed is the mallinfo struct that is returned (by-copy)
+ by mallinfo(). The malloinfo struct contains a bunch of fields that
+ are not even meaningful in this version of malloc. These fields are
+ are instead filled by mallinfo() with other numbers that might be of
+ interest.
+
+ HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
+ /usr/include/malloc.h file that includes a declaration of struct
+ mallinfo. If so, it is included; else a compliant version is
+ declared below. These must be precisely the same for mallinfo() to
+ work. The original SVID version of this struct, defined on most
+ systems with mallinfo, declares all fields as ints. But some others
+ define as unsigned long. If your system defines the fields using a
+ type of different width than listed here, you MUST #include your
+ system version and #define HAVE_USR_INCLUDE_MALLOC_H.
+*/
+
+/* #define HAVE_USR_INCLUDE_MALLOC_H */
+
+#ifdef HAVE_USR_INCLUDE_MALLOC_H
+#include "/usr/include/malloc.h"
+#else /* HAVE_USR_INCLUDE_MALLOC_H */
+
+struct mallinfo {
+ MALLINFO_FIELD_TYPE iArena; /* non-mmapped space allocated from system */
+ MALLINFO_FIELD_TYPE iOrdblks; /* number of free chunks */
+ MALLINFO_FIELD_TYPE iSmblks; /* always 0 */
+ MALLINFO_FIELD_TYPE iHblks; /* always 0 */
+ MALLINFO_FIELD_TYPE iHblkhd; /* space in mmapped regions */
+ MALLINFO_FIELD_TYPE iUsmblks; /* maximum total allocated space */
+ MALLINFO_FIELD_TYPE iFsmblks; /* always 0 */
+ MALLINFO_FIELD_TYPE iUordblks; /* total allocated space */
+ MALLINFO_FIELD_TYPE iFordblks; /* total free space */
+ MALLINFO_FIELD_TYPE iKeepcost; /* releasable (via malloc_trim) space */
+ MALLINFO_FIELD_TYPE iCellCount;/* Number of chunks allocated*/
+};
+
+#endif /* HAVE_USR_INCLUDE_MALLOC_H */
+#endif /* NO_MALLINFO */
+
+#if MSPACES
+ typedef void* mspace;
+#endif /* MSPACES */
+
+#if 0
+
+#include <stdio.h>/* for printing in malloc_stats */
+
+#ifndef LACKS_ERRNO_H
+ #include <errno.h> /* for MALLOC_FAILURE_ACTION */
+#endif /* LACKS_ERRNO_H */
+
+#if FOOTERS
+ #include <time.h> /* for iMagic initialization */
+#endif /* FOOTERS */
+
+#ifndef LACKS_STDLIB_H
+ #include <stdlib.h> /* for abort() */
+#endif /* LACKS_STDLIB_H */
+
+#if !defined(ASSERT)
+#define ASSERT(x) __ASSERT_DEBUG(x, HEAP_PANIC(ETHeapBadCellAddress))
+#endif
+
+#ifndef LACKS_STRING_H
+ #include <string.h> /* for memset etc */
+#endif /* LACKS_STRING_H */
+
+#if USE_BUILTIN_FFS
+ #ifndef LACKS_STRINGS_H
+ #include <strings.h> /* for ffs */
+ #endif /* LACKS_STRINGS_H */
+#endif /* USE_BUILTIN_FFS */
+
+#if HAVE_MMAP
+ #ifndef LACKS_SYS_MMAN_H
+ #include <sys/mman.h> /* for mmap */
+ #endif /* LACKS_SYS_MMAN_H */
+ #ifndef LACKS_FCNTL_H
+ #include <fcntl.h>
+ #endif /* LACKS_FCNTL_H */
+#endif /* HAVE_MMAP */
+
+#if HAVE_MORECORE
+ #ifndef LACKS_UNISTD_H
+ #include <unistd.h> /* for sbrk */
+ extern void* sbrk(size_t);
+ #else /* LACKS_UNISTD_H */
+ #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
+ extern void* sbrk(ptrdiff_t);
+ /*Amod sbrk is not defined in WIN32 need to check in symbian*/
+ #endif /* FreeBSD etc */
+ #endif /* LACKS_UNISTD_H */
+#endif /* HAVE_MORECORE */
+
+#endif
+
+/*AMOD: For MALLOC_GETPAGESIZE*/
+#if 0 // replaced with GET_PAGE_SIZE() defined in heap.cpp
+#ifndef WIN32
+ #ifndef MALLOC_GETPAGESIZE
+ #ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
+ #ifndef _SC_PAGE_SIZE
+ #define _SC_PAGE_SIZE _SC_PAGESIZE
+ #endif
+ #endif
+ #ifdef _SC_PAGE_SIZE
+ #define MALLOC_GETPAGESIZE sysconf(_SC_PAGE_SIZE)
+ #else
+ #if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
+ extern size_t getpagesize();
+ #define MALLOC_GETPAGESIZE getpagesize()
+ #else
+ #ifdef WIN32 /* use supplied emulation of getpagesize */
+ #define MALLOC_GETPAGESIZE getpagesize()
+ #else
+ #ifndef LACKS_SYS_PARAM_H
+ #include <sys/param.h>
+ #endif
+ #ifdef EXEC_PAGESIZE
+ #define MALLOC_GETPAGESIZE EXEC_PAGESIZE
+ #else
+ #ifdef NBPG
+ #ifndef CLSIZE
+ #define MALLOC_GETPAGESIZE NBPG
+ #else
+ #define MALLOC_GETPAGESIZE (NBPG * CLSIZE)
+ #endif
+ #else
+ #ifdef NBPC
+ #define MALLOC_GETPAGESIZE NBPC
+ #else
+ #ifdef PAGESIZE
+ #define MALLOC_GETPAGESIZE PAGESIZE
+ #else /* just guess */
+ #define MALLOC_GETPAGESIZE ((size_t)4096U)
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+#endif
+#endif
+/*AMOD: For MALLOC_GETPAGESIZE*/
+
+/* ------------------- size_t and alignment properties -------------------- */
+
+/* The byte and bit size of a size_t */
+#define SIZE_T_SIZE (sizeof(size_t))
+#define SIZE_T_BITSIZE (sizeof(size_t) << 3)
+
+/* Some constants coerced to size_t */
+/* Annoying but necessary to avoid errors on some plaftorms */
+#define SIZE_T_ZERO ((size_t)0)
+#define SIZE_T_ONE ((size_t)1)
+#define SIZE_T_TWO ((size_t)2)
+#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
+#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
+#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
+#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
+
+/* The bit mask value corresponding to MALLOC_ALIGNMENT */
+#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
+
+/* True if address a has acceptable alignment */
+//#define IS_ALIGNED(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
+#define IS_ALIGNED(A) (((unsigned int)((A)) & (CHUNK_ALIGN_MASK)) == 0)
+
+/* the number of bytes to offset an address to align it */
+#define ALIGN_OFFSET(A)\
+ ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
+ ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
+
+/* -------------------------- MMAP preliminaries ------------------------- */
+
+/*
+ If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
+ checks to fail so compiler optimizer can delete code rather than
+ using so many "#if"s.
+*/
+
+
+/* MORECORE and MMAP must return MFAIL on failure */
+#define MFAIL ((void*)(MAX_SIZE_T))
+#define CMFAIL ((TUint8*)(MFAIL)) /* defined for convenience */
+
+#if !HAVE_MMAP
+ #define IS_MMAPPED_BIT (SIZE_T_ZERO)
+ #define USE_MMAP_BIT (SIZE_T_ZERO)
+ #define CALL_MMAP(s) MFAIL
+ #define CALL_MUNMAP(a, s) (-1)
+ #define DIRECT_MMAP(s) MFAIL
+#else /* !HAVE_MMAP */
+ #define IS_MMAPPED_BIT (SIZE_T_ONE)
+ #define USE_MMAP_BIT (SIZE_T_ONE)
+ #ifndef WIN32
+ #define CALL_MUNMAP(a, s) DLUMMAP((a),(s)) /*munmap((a), (s))*/
+ #define MMAP_PROT (PROT_READ|PROT_WRITE)
+ #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
+ #define MAP_ANONYMOUS MAP_ANON
+ #endif /* MAP_ANON */
+ #ifdef MAP_ANONYMOUS
+ #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
+ #define CALL_MMAP(s) mmap(0, (s), MMAP_PROT, (int)MMAP_FLAGS, -1, 0)
+ #else /* MAP_ANONYMOUS */
+ /*
+ Nearly all versions of mmap support MAP_ANONYMOUS, so the following
+ is unlikely to be needed, but is supplied just in case.
+ */
+ #define MMAP_FLAGS (MAP_PRIVATE)
+ //static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
+ #define CALL_MMAP(s) DLMMAP(s)
+ /*#define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
+ (dev_zero_fd = open("/dev/zero", O_RDWR), \
+ mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
+ mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
+ */
+ #define CALL_REMAP(a, s, d) DLREMAP((a),(s),(d))
+ #endif /* MAP_ANONYMOUS */
+ #define DIRECT_MMAP(s) CALL_MMAP(s)
+ #else /* WIN32 */
+ #define CALL_MMAP(s) win32mmap(s)
+ #define CALL_MUNMAP(a, s) win32munmap((a), (s))
+ #define DIRECT_MMAP(s) win32direct_mmap(s)
+ #endif /* WIN32 */
+#endif /* HAVE_MMAP */
+
+#if HAVE_MMAP && HAVE_MREMAP
+ #define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
+#else /* HAVE_MMAP && HAVE_MREMAP */
+ #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
+#endif /* HAVE_MMAP && HAVE_MREMAP */
+
+#if HAVE_MORECORE
+ #define CALL_MORECORE(S) SetBrk(S)
+#else /* HAVE_MORECORE */
+ #define CALL_MORECORE(S) MFAIL
+#endif /* HAVE_MORECORE */
+
+/* mstate bit set if continguous morecore disabled or failed */
+#define USE_NONCONTIGUOUS_BIT (4U)
+
+/* segment bit set in create_mspace_with_base */
+#define EXTERN_BIT (8U)
+
+
+#if USE_LOCKS
+/*
+ When locks are defined, there are up to two global locks:
+ * If HAVE_MORECORE, iMorecoreMutex protects sequences of calls to
+ MORECORE. In many cases sys_alloc requires two calls, that should
+ not be interleaved with calls by other threads. This does not
+ protect against direct calls to MORECORE by other threads not
+ using this lock, so there is still code to cope the best we can on
+ interference.
+ * iMagicInitMutex ensures that mparams.iMagic and other
+ unique mparams values are initialized only once.
+*/
+ #ifndef WIN32
+ /* By default use posix locks */
+ #include <pthread.h>
+ #define MLOCK_T pthread_mutex_t
+ #define INITIAL_LOCK(l) pthread_mutex_init(l, NULL)
+ #define ACQUIRE_LOCK(l) pthread_mutex_lock(l)
+ #define RELEASE_LOCK(l) pthread_mutex_unlock(l)
+
+ #if HAVE_MORECORE
+ //static MLOCK_T iMorecoreMutex = PTHREAD_MUTEX_INITIALIZER;
+ #endif /* HAVE_MORECORE */
+ //static MLOCK_T iMagicInitMutex = PTHREAD_MUTEX_INITIALIZER;
+ #else /* WIN32 */
+ #define MLOCK_T long
+ #define INITIAL_LOCK(l) *(l)=0
+ #define ACQUIRE_LOCK(l) win32_acquire_lock(l)
+ #define RELEASE_LOCK(l) win32_release_lock(l)
+ #if HAVE_MORECORE
+ static MLOCK_T iMorecoreMutex;
+ #endif /* HAVE_MORECORE */
+ static MLOCK_T iMagicInitMutex;
+ #endif /* WIN32 */
+ #define USE_LOCK_BIT (2U)
+#else /* USE_LOCKS */
+ #define USE_LOCK_BIT (0U)
+ #define INITIAL_LOCK(l)
+#endif /* USE_LOCKS */
+
+#if USE_LOCKS && HAVE_MORECORE
+ #define ACQUIRE_MORECORE_LOCK(M) ACQUIRE_LOCK((M->iMorecoreMutex)/*&iMorecoreMutex*/);
+ #define RELEASE_MORECORE_LOCK(M) RELEASE_LOCK((M->iMorecoreMutex)/*&iMorecoreMutex*/);
+#else /* USE_LOCKS && HAVE_MORECORE */
+ #define ACQUIRE_MORECORE_LOCK(M)
+ #define RELEASE_MORECORE_LOCK(M)
+#endif /* USE_LOCKS && HAVE_MORECORE */
+
+#if USE_LOCKS
+ /*Currently not suporting this*/
+ #define ACQUIRE_MAGIC_INIT_LOCK(M) ACQUIRE_LOCK(((M)->iMagicInitMutex));
+ //AMOD: changed #define ACQUIRE_MAGIC_INIT_LOCK()
+ //#define RELEASE_MAGIC_INIT_LOCK()
+ #define RELEASE_MAGIC_INIT_LOCK(M) RELEASE_LOCK(((M)->iMagicInitMutex));
+#else /* USE_LOCKS */
+ #define ACQUIRE_MAGIC_INIT_LOCK(M)
+ #define RELEASE_MAGIC_INIT_LOCK(M)
+#endif /* USE_LOCKS */
+
+/*CHUNK representation*/
+struct malloc_chunk {
+ size_t iPrevFoot; /* Size of previous chunk (if free). */
+ size_t iHead; /* Size and inuse bits. */
+ struct malloc_chunk* iFd; /* double links -- used only if free. */
+ struct malloc_chunk* iBk;
+};
+
+typedef struct malloc_chunk mchunk;
+typedef struct malloc_chunk* mchunkptr;
+typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */
+typedef unsigned int bindex_t; /* Described below */
+typedef unsigned int binmap_t; /* Described below */
+typedef unsigned int flag_t; /* The type of various bit flag sets */
+
+
+/* ------------------- Chunks sizes and alignments ----------------------- */
+#define MCHUNK_SIZE (sizeof(mchunk))
+
+//#if FOOTERS
+// #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
+//#else /* FOOTERS */
+// #define CHUNK_OVERHEAD (SIZE_T_SIZE)
+//#endif /* FOOTERS */
+
+/* MMapped chunks need a second word of overhead ... */
+#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
+/* ... and additional padding for fake next-chunk at foot */
+#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
+
+/* The smallest size we can malloc is an aligned minimal chunk */
+#define MIN_CHUNK_SIZE ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
+
+/* conversion from malloc headers to user pointers, and back */
+#define CHUNK2MEM(p) ((void*)((TUint8*)(p) + TWO_SIZE_T_SIZES))
+#define MEM2CHUNK(mem) ((mchunkptr)((TUint8*)(mem) - TWO_SIZE_T_SIZES))
+/* chunk associated with aligned address A */
+#define ALIGN_AS_CHUNK(A) (mchunkptr)((A) + ALIGN_OFFSET(CHUNK2MEM(A)))
+
+/* Bounds on request (not chunk) sizes. */
+#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
+#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
+
+/* pad request bytes into a usable size */
+#define PAD_REQUEST(req) (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
+
+/* pad request, checking for minimum (but not maximum) */
+#define REQUEST2SIZE(req) (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : PAD_REQUEST(req))
+
+/* ------------------ Operations on iHead and foot fields ----------------- */
+
+/*
+ The iHead field of a chunk is or'ed with PINUSE_BIT when previous
+ adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
+ use. If the chunk was obtained with mmap, the iPrevFoot field has
+ IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
+ mmapped region to the base of the chunk.
+*/
+#define PINUSE_BIT (SIZE_T_ONE)
+#define CINUSE_BIT (SIZE_T_TWO)
+#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
+
+/* Head value for fenceposts */
+#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
+
+/* extraction of fields from iHead words */
+#define CINUSE(p) ((p)->iHead & CINUSE_BIT)
+#define PINUSE(p) ((p)->iHead & PINUSE_BIT)
+#define CHUNKSIZE(p) ((p)->iHead & ~(INUSE_BITS))
+
+#define CLEAR_PINUSE(p) ((p)->iHead &= ~PINUSE_BIT)
+#define CLEAR_CINUSE(p) ((p)->iHead &= ~CINUSE_BIT)
+
+/* Treat space at ptr +/- offset as a chunk */
+#define CHUNK_PLUS_OFFSET(p, s) ((mchunkptr)(((TUint8*)(p)) + (s)))
+#define CHUNK_MINUS_OFFSET(p, s) ((mchunkptr)(((TUint8*)(p)) - (s)))
+
+/* Ptr to next or previous physical malloc_chunk. */
+#define NEXT_CHUNK(p) ((mchunkptr)( ((TUint8*)(p)) + ((p)->iHead & ~INUSE_BITS)))
+#define PREV_CHUNK(p) ((mchunkptr)( ((TUint8*)(p)) - ((p)->iPrevFoot) ))
+
+/* extract next chunk's PINUSE bit */
+#define NEXT_PINUSE(p) ((NEXT_CHUNK(p)->iHead) & PINUSE_BIT)
+
+/* Get/set size at footer */
+#define GET_FOOT(p, s) (((mchunkptr)((TUint8*)(p) + (s)))->iPrevFoot)
+#define SET_FOOT(p, s) (((mchunkptr)((TUint8*)(p) + (s)))->iPrevFoot = (s))
+
+/* Set size, PINUSE bit, and foot */
+#define SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(p, s) ((p)->iHead = (s|PINUSE_BIT), SET_FOOT(p, s))
+
+/* Set size, PINUSE bit, foot, and clear next PINUSE */
+#define SET_FREE_WITH_PINUSE(p, s, n) (CLEAR_PINUSE(n), SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(p, s))
+
+#define IS_MMAPPED(p) (!((p)->iHead & PINUSE_BIT) && ((p)->iPrevFoot & IS_MMAPPED_BIT))
+
+/* Get the internal overhead associated with chunk p */
+#define OVERHEAD_FOR(p) (IS_MMAPPED(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
+
+/* Return true if malloced space is not necessarily cleared */
+#if MMAP_CLEARS
+ #define CALLOC_MUST_CLEAR(p) (!IS_MMAPPED(p))
+#else /* MMAP_CLEARS */
+ #define CALLOC_MUST_CLEAR(p) (1)
+#endif /* MMAP_CLEARS */
+
+/* ---------------------- Overlaid data structures ----------------------- */
+struct malloc_tree_chunk {
+ /* The first four fields must be compatible with malloc_chunk */
+ size_t iPrevFoot;
+ size_t iHead;
+ struct malloc_tree_chunk* iFd;
+ struct malloc_tree_chunk* iBk;
+
+ struct malloc_tree_chunk* iChild[2];
+ struct malloc_tree_chunk* iParent;
+ bindex_t iIndex;
+};
+
+typedef struct malloc_tree_chunk tchunk;
+typedef struct malloc_tree_chunk* tchunkptr;
+typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
+
+/* A little helper macro for trees */
+#define LEFTMOST_CHILD(t) ((t)->iChild[0] != 0? (t)->iChild[0] : (t)->iChild[1])
+/*Segment structur*/
+//struct malloc_segment {
+// TUint8* iBase; /* base address */
+// size_t iSize; /* allocated size */
+//};
+
+#define IS_MMAPPED_SEGMENT(S) ((S)->iSflags & IS_MMAPPED_BIT)
+#define IS_EXTERN_SEGMENT(S) ((S)->iSflags & EXTERN_BIT)
+
+typedef struct malloc_segment msegment;
+typedef struct malloc_segment* msegmentptr;
+
+/*Malloc State data structur*/
+
+//#define NSMALLBINS (32U)
+//#define NTREEBINS (32U)
+#define SMALLBIN_SHIFT (3U)
+#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
+#define TREEBIN_SHIFT (8U)
+#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
+#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
+#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
+
+/*struct malloc_state {
+ binmap_t iSmallMap;
+ binmap_t iTreeMap;
+ size_t iDvSize;
+ size_t iTopSize;
+ mchunkptr iDv;
+ mchunkptr iTop;
+ size_t iTrimCheck;
+ mchunkptr iSmallBins[(NSMALLBINS+1)*2];
+ tbinptr iTreeBins[NTREEBINS];
+ msegment iSeg;
+ };*/
+/*
+struct malloc_state {
+ binmap_t iSmallMap;
+ binmap_t iTreeMap;
+ size_t iDvSize;
+ size_t iTopSize;
+ TUint8* iLeastAddr;
+ mchunkptr iDv;
+ mchunkptr iTop;
+ size_t iTrimCheck;
+ size_t iMagic;
+ mchunkptr iSmallBins[(NSMALLBINS+1)*2];
+ tbinptr iTreeBins[NTREEBINS];
+ size_t iFootprint;
+ size_t iMaxFootprint;
+ flag_t iMflags;
+#if USE_LOCKS
+ MLOCK_T iMutex;
+ MLOCK_T iMagicInitMutex;
+ MLOCK_T iMorecoreMutex;
+#endif
+ msegment iSeg;
+};
+*/
+typedef struct malloc_state* mstate;
+
+/* ------------- Global malloc_state and malloc_params ------------------- */
+
+/*
+ malloc_params holds global properties, including those that can be
+ dynamically set using mallopt. There is a single instance, mparams,
+ initialized in init_mparams.
+*/
+
+struct malloc_params {
+ size_t iMagic;
+ size_t iPageSize;
+ size_t iGranularity;
+ size_t iMmapThreshold;
+ size_t iTrimThreshold;
+ flag_t iDefaultMflags;
+#if USE_LOCKS
+ MLOCK_T iMagicInitMutex;
+#endif /* USE_LOCKS */
+};
+
+/* The global malloc_state used for all non-"mspace" calls */
+/*AMOD: Need to check this as this will be the member of the class*/
+
+//static struct malloc_state _gm_;
+//#define GM (&_gm_)
+
+//#define IS_GLOBAL(M) ((M) == &_gm_)
+/*AMOD: has changed*/
+#define IS_GLOBAL(M) ((M) == GM)
+#define IS_INITIALIZED(M) ((M)->iTop != 0)
+
+/* -------------------------- system alloc setup ------------------------- */
+
+/* Operations on iMflags */
+
+#define USE_LOCK(M) ((M)->iMflags & USE_LOCK_BIT)
+#define ENABLE_LOCK(M) ((M)->iMflags |= USE_LOCK_BIT)
+#define DISABLE_LOCK(M) ((M)->iMflags &= ~USE_LOCK_BIT)
+
+#define USE_MMAP(M) ((M)->iMflags & USE_MMAP_BIT)
+#define ENABLE_MMAP(M) ((M)->iMflags |= USE_MMAP_BIT)
+#define DISABLE_MMAP(M) ((M)->iMflags &= ~USE_MMAP_BIT)
+
+#define USE_NONCONTIGUOUS(M) ((M)->iMflags & USE_NONCONTIGUOUS_BIT)
+#define DISABLE_CONTIGUOUS(M) ((M)->iMflags |= USE_NONCONTIGUOUS_BIT)
+
+#define SET_LOCK(M,L) ((M)->iMflags = (L)? ((M)->iMflags | USE_LOCK_BIT) : ((M)->iMflags & ~USE_LOCK_BIT))
+
+/* page-align a size */
+#define PAGE_ALIGN(S) (((S) + (mparams.iPageSize)) & ~(mparams.iPageSize - SIZE_T_ONE))
+
+/* iGranularity-align a size */
+#define GRANULARITY_ALIGN(S) (((S) + (mparams.iGranularity)) & ~(mparams.iGranularity - SIZE_T_ONE))
+
+#define IS_PAGE_ALIGNED(S) (((size_t)(S) & (mparams.iPageSize - SIZE_T_ONE)) == 0)
+#define IS_GRANULARITY_ALIGNED(S) (((size_t)(S) & (mparams.iGranularity - SIZE_T_ONE)) == 0)
+
+/* True if segment S holds address A */
+#define SEGMENT_HOLDS(S, A) ((TUint8*)(A) >= S->iBase && (TUint8*)(A) < S->iBase + S->iSize)
+
+#ifndef MORECORE_CANNOT_TRIM
+ #define SHOULD_TRIM(M,s) ((s) > (M)->iTrimCheck)
+#else /* MORECORE_CANNOT_TRIM */
+ #define SHOULD_TRIM(M,s) (0)
+#endif /* MORECORE_CANNOT_TRIM */
+
+/*
+ TOP_FOOT_SIZE is padding at the end of a segment, including space
+ that may be needed to place segment records and fenceposts when new
+ noncontiguous segments are added.
+*/
+#define TOP_FOOT_SIZE (ALIGN_OFFSET(CHUNK2MEM(0))+PAD_REQUEST(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
+
+#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
+/* ------------------------------- Hooks -------------------------------- */
+
+/*
+ PREACTION should be defined to return 0 on success, and nonzero on
+ failure. If you are not using locking, you can redefine these to do
+ anything you like.
+*/
+
+#if USE_LOCKS
+ /* Ensure locks are initialized */
+ #define GLOBALLY_INITIALIZE() (mparams.iPageSize == 0 && init_mparams())
+ #define PREACTION(M) (USE_LOCK((M))?(ACQUIRE_LOCK((M)->iMutex),0):0) /*Action to take like lock before alloc*/
+ #define POSTACTION(M) { if (USE_LOCK(M)) RELEASE_LOCK((M)->iMutex); }
+
+#else /* USE_LOCKS */
+ #ifndef PREACTION
+ #define PREACTION(M) (0)
+ #endif /* PREACTION */
+ #ifndef POSTACTION
+ #define POSTACTION(M)
+ #endif /* POSTACTION */
+#endif /* USE_LOCKS */
+
+/*
+ CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
+ USAGE_ERROR_ACTION is triggered on detected bad frees and
+ reallocs. The argument p is an address that might have triggered the
+ fault. It is ignored by the two predefined actions, but might be
+ useful in custom actions that try to help diagnose errors.
+*/
+
+#if PROCEED_ON_ERROR
+ /* A count of the number of corruption errors causing resets */
+ int malloc_corruption_error_count;
+ /* default corruption action */
+ static void ResetOnError(mstate m);
+ #define CORRUPTION_ERROR_ACTION(m) ResetOnError(m)
+ #define USAGE_ERROR_ACTION(m, p)
+#else /* PROCEED_ON_ERROR */
+ #ifndef CORRUPTION_ERROR_ACTION
+ #define CORRUPTION_ERROR_ACTION(m) ABORT
+ #endif /* CORRUPTION_ERROR_ACTION */
+ #ifndef USAGE_ERROR_ACTION
+ #define USAGE_ERROR_ACTION(m,p) ABORT
+ #endif /* USAGE_ERROR_ACTION */
+#endif /* PROCEED_ON_ERROR */
+
+
+#ifdef _DEBUG
+ #define CHECK_FREE_CHUNK(M,P) DoCheckFreeChunk(M,P)
+ #define CHECK_INUSE_CHUNK(M,P) DoCheckInuseChunk(M,P)
+ #define CHECK_TOP_CHUNK(M,P) DoCheckTopChunk(M,P)
+ #define CHECK_MALLOCED_CHUNK(M,P,N) DoCheckMallocedChunk(M,P,N)
+ #define CHECK_MMAPPED_CHUNK(M,P) DoCheckMmappedChunk(M,P)
+ #define CHECK_MALLOC_STATE(M) DoCheckMallocState(M)
+#else /* DEBUG */
+ #define CHECK_FREE_CHUNK(M,P)
+ #define CHECK_INUSE_CHUNK(M,P)
+ #define CHECK_MALLOCED_CHUNK(M,P,N)
+ #define CHECK_MMAPPED_CHUNK(M,P)
+ #define CHECK_MALLOC_STATE(M)
+ #define CHECK_TOP_CHUNK(M,P)
+#endif /* DEBUG */
+
+/* ---------------------------- Indexing Bins ---------------------------- */
+
+#define IS_SMALL(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
+#define SMALL_INDEX(s) ((s) >> SMALLBIN_SHIFT)
+#define SMALL_INDEX2SIZE(i) ((i) << SMALLBIN_SHIFT)
+#define MIN_SMALL_INDEX (SMALL_INDEX(MIN_CHUNK_SIZE))
+
+/* addressing by index. See above about smallbin repositioning */
+#define SMALLBIN_AT(M, i) ((sbinptr)((TUint8*)&((M)->iSmallBins[(i)<<1])))
+#define TREEBIN_AT(M,i) (&((M)->iTreeBins[i]))
+
+
+/* Bit representing maximum resolved size in a treebin at i */
+#define BIT_FOR_TREE_INDEX(i) (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
+
+/* Shift placing maximum resolved bit in a treebin at i as sign bit */
+#define LEFTSHIFT_FOR_TREE_INDEX(i) ((i == NTREEBINS-1)? 0 : ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
+
+/* The size of the smallest chunk held in bin with index i */
+#define MINSIZE_FOR_TREE_INDEX(i) ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
+
+
+/* ------------------------ Operations on bin maps ----------------------- */
+/* bit corresponding to given index */
+#define IDX2BIT(i) ((binmap_t)(1) << (i))
+/* Mark/Clear bits with given index */
+#define MARK_SMALLMAP(M,i) ((M)->iSmallMap |= IDX2BIT(i))
+#define CLEAR_SMALLMAP(M,i) ((M)->iSmallMap &= ~IDX2BIT(i))
+#define SMALLMAP_IS_MARKED(M,i) ((M)->iSmallMap & IDX2BIT(i))
+#define MARK_TREEMAP(M,i) ((M)->iTreeMap |= IDX2BIT(i))
+#define CLEAR_TREEMAP(M,i) ((M)->iTreeMap &= ~IDX2BIT(i))
+#define TREEMAP_IS_MARKED(M,i) ((M)->iTreeMap & IDX2BIT(i))
+
+ /* isolate the least set bit of a bitmap */
+#define LEAST_BIT(x) ((x) & -(x))
+
+/* mask with all bits to left of least bit of x on */
+#define LEFT_BITS(x) ((x<<1) | -(x<<1))
+
+/* mask with all bits to left of or equal to least bit of x on */
+#define SAME_OR_LEFT_BITS(x) ((x) | -(x))
+
+#if !INSECURE
+ /* Check if address a is at least as high as any from MORECORE or MMAP */
+ #define OK_ADDRESS(M, a) ((TUint8*)(a) >= (M)->iLeastAddr)
+ /* Check if address of next chunk n is higher than base chunk p */
+ #define OK_NEXT(p, n) ((TUint8*)(p) < (TUint8*)(n))
+ /* Check if p has its CINUSE bit on */
+ #define OK_CINUSE(p) CINUSE(p)
+ /* Check if p has its PINUSE bit on */
+ #define OK_PINUSE(p) PINUSE(p)
+#else /* !INSECURE */
+ #define OK_ADDRESS(M, a) (1)
+ #define OK_NEXT(b, n) (1)
+ #define OK_CINUSE(p) (1)
+ #define OK_PINUSE(p) (1)
+#endif /* !INSECURE */
+
+#if (FOOTERS && !INSECURE)
+ /* Check if (alleged) mstate m has expected iMagic field */
+ #define OK_MAGIC(M) ((M)->iMagic == mparams.iMagic)
+#else /* (FOOTERS && !INSECURE) */
+ #define OK_MAGIC(M) (1)
+#endif /* (FOOTERS && !INSECURE) */
+
+/* In gcc, use __builtin_expect to minimize impact of checks */
+#if !INSECURE
+ #if defined(__GNUC__) && __GNUC__ >= 3
+ #define RTCHECK(e) __builtin_expect(e, 1)
+ #else /* GNUC */
+ #define RTCHECK(e) (e)
+ #endif /* GNUC */
+
+#else /* !INSECURE */
+ #define RTCHECK(e) (1)
+#endif /* !INSECURE */
+/* macros to set up inuse chunks with or without footers */
+#if !FOOTERS
+ #define MARK_INUSE_FOOT(M,p,s)
+ /* Set CINUSE bit and PINUSE bit of next chunk */
+ #define SET_INUSE(M,p,s) ((p)->iHead = (((p)->iHead & PINUSE_BIT)|s|CINUSE_BIT),((mchunkptr)(((TUint8*)(p)) + (s)))->iHead |= PINUSE_BIT)
+ /* Set CINUSE and PINUSE of this chunk and PINUSE of next chunk */
+ #define SET_INUSE_AND_PINUSE(M,p,s) ((p)->iHead = (s|PINUSE_BIT|CINUSE_BIT),((mchunkptr)(((TUint8*)(p)) + (s)))->iHead |= PINUSE_BIT)
+ /* Set size, CINUSE and PINUSE bit of this chunk */
+ #define SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(M, p, s) ((p)->iHead = (s|PINUSE_BIT|CINUSE_BIT))
+#else /* FOOTERS */
+ /* Set foot of inuse chunk to be xor of mstate and seed */
+ #define MARK_INUSE_FOOT(M,p,s) (((mchunkptr)((TUint8*)(p) + (s)))->iPrevFoot = ((size_t)(M) ^ mparams.iMagic))
+ #define GET_MSTATE_FOR(p) ((mstate)(((mchunkptr)((TUint8*)(p)+(CHUNKSIZE(p))))->iPrevFoot ^ mparams.iMagic))
+ #define SET_INUSE(M,p,s)\
+ ((p)->iHead = (((p)->iHead & PINUSE_BIT)|s|CINUSE_BIT),\
+ (((mchunkptr)(((TUint8*)(p)) + (s)))->iHead |= PINUSE_BIT), \
+ MARK_INUSE_FOOT(M,p,s))
+ #define SET_INUSE_AND_PINUSE(M,p,s)\
+ ((p)->iHead = (s|PINUSE_BIT|CINUSE_BIT),\
+ (((mchunkptr)(((TUint8*)(p)) + (s)))->iHead |= PINUSE_BIT),\
+ MARK_INUSE_FOOT(M,p,s))
+ #define SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(M, p, s)\
+ ((p)->iHead = (s|PINUSE_BIT|CINUSE_BIT),\
+ MARK_INUSE_FOOT(M, p, s))
+#endif /* !FOOTERS */
+
+
+#if ONLY_MSPACES
+#define INTERNAL_MALLOC(m, b) mspace_malloc(m, b)
+#define INTERNAL_FREE(m, mem) mspace_free(m,mem);
+#else /* ONLY_MSPACES */
+ #if MSPACES
+ #define INTERNAL_MALLOC(m, b) (m == GM)? dlmalloc(b) : mspace_malloc(m, b)
+ #define INTERNAL_FREE(m, mem) if (m == GM) dlfree(mem); else mspace_free(m,mem);
+ #else /* MSPACES */
+ #define INTERNAL_MALLOC(m, b) dlmalloc(b)
+ #define INTERNAL_FREE(m, mem) dlfree(mem)
+ #endif /* MSPACES */
+#endif /* ONLY_MSPACES */
+
+ #ifndef NDEBUG
+ #define CHECKING 1
+ #endif
+// #define HYSTERESIS 4
+ #define HYSTERESIS 1
+ #define HYSTERESIS_BYTES (2*PAGESIZE)
+ #define HYSTERESIS_GROW (HYSTERESIS*PAGESIZE)
+
+ #if CHECKING
+ #define CHECK(x) x
+ #else
+ #undef ASSERT
+ #define ASSERT(x) (void)0
+ #define CHECK(x) (void)0
+ #endif
+
+#endif/*__DLA__*/
diff --git a/src/corelib/arch/symbian/heap_hybrid.cpp b/src/corelib/arch/symbian/heap_hybrid.cpp
new file mode 100644
index 0000000..91faaed
--- /dev/null
+++ b/src/corelib/arch/symbian/heap_hybrid.cpp
@@ -0,0 +1,3346 @@
+/****************************************************************************
+**
+** Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qt_hybridheap_symbian_p.h"
+
+#ifdef QT_USE_NEW_SYMBIAN_ALLOCATOR
+
+// if non zero this causes the iSlabs to be configured only when the chunk size exceeds this level
+#define DELAYED_SLAB_THRESHOLD (64*1024) // 64KB seems about right based on trace data
+#define SLAB_CONFIG 0x3fff // Use all slab sizes 4,8..56 bytes. This is more efficient for large heaps as Qt tends to have
+
+#ifdef _DEBUG
+#define __SIMULATE_ALLOC_FAIL(s) if (CheckForSimulatedAllocFail()) {s}
+#define __ALLOC_DEBUG_HEADER(s) (s += EDebugHdrSize)
+#define __SET_DEBUG_DATA(p,n,c) (((SDebugCell*)(p))->nestingLevel = (n), ((SDebugCell*)(p))->allocCount = (c))
+#define __GET_USER_DATA_BFR(p) ((p!=0) ? (TUint8*)(p) + EDebugHdrSize : NULL)
+#define __GET_DEBUG_DATA_BFR(p) ((p!=0) ? (TUint8*)(p) - EDebugHdrSize : NULL)
+#define __ZAP_CELL(p) memset( (TUint8*)p, 0xde, (AllocLen(__GET_USER_DATA_BFR(p))+EDebugHdrSize))
+#define __DEBUG_SAVE(p) TInt dbgNestLevel = ((SDebugCell*)p)->nestingLevel
+#define __DEBUG_RESTORE(p) if (p) {((SDebugCell*)p)->nestingLevel = dbgNestLevel;}
+#define __DEBUG_HDR_SIZE EDebugHdrSize
+#define __REMOVE_DBG_HDR(n) (n*EDebugHdrSize)
+#define __GET_AVAIL_BLOCK_SIZE(s) ( (s<EDebugHdrSize) ? 0 : s-EDebugHdrSize )
+#define __UPDATE_ALLOC_COUNT(o,n,c) if (o!=n && n) {((SDebugCell*)n)->allocCount = (c);}
+#define __INIT_COUNTERS(i) iCellCount=i,iTotalAllocSize=i
+#define __INCREMENT_COUNTERS(p) iCellCount++, iTotalAllocSize += AllocLen(p)
+#define __DECREMENT_COUNTERS(p) iCellCount--, iTotalAllocSize -= AllocLen(p)
+#define __UPDATE_TOTAL_ALLOC(p,s) iTotalAllocSize += (AllocLen(__GET_USER_DATA_BFR(p)) - s)
+
+#else
+#define __SIMULATE_ALLOC_FAIL(s)
+#define __ALLOC_DEBUG_HEADER(s)
+#define __SET_DEBUG_DATA(p,n,c)
+#define __GET_USER_DATA_BFR(p) (p)
+#define __GET_DEBUG_DATA_BFR(p) (p)
+#define __ZAP_CELL(p)
+#define __DEBUG_SAVE(p)
+#define __DEBUG_RESTORE(p)
+#define __DEBUG_HDR_SIZE 0
+#define __REMOVE_DBG_HDR(n) 0
+#define __GET_AVAIL_BLOCK_SIZE(s) (s)
+#define __UPDATE_ALLOC_COUNT(o,n,c)
+#define __INIT_COUNTERS(i) iCellCount=i,iTotalAllocSize=i
+#define __INCREMENT_COUNTERS(p)
+#define __DECREMENT_COUNTERS(p)
+#define __UPDATE_TOTAL_ALLOC(p,s)
+
+#endif
+
+
+#define MEMORY_MONITORED (iFlags & EMonitorMemory)
+#define GM (&iGlobalMallocState)
+#define IS_FIXED_HEAP (iFlags & EFixedSize)
+#define __INIT_COUNTERS(i) iCellCount=i,iTotalAllocSize=i
+#define __POWER_OF_2(x) (!((x)&((x)-1)))
+
+#define __DL_BFR_CHECK(M,P) \
+ if ( MEMORY_MONITORED ) \
+ if ( !IS_ALIGNED(P) || ((TUint8*)(P)<M->iSeg.iBase) || ((TUint8*)(P)>(M->iSeg.iBase+M->iSeg.iSize))) \
+ BTraceContext12(BTrace::EHeap, BTrace::EHeapCorruption, (TUint32)this, (TUint32)P, (TUint32)0), HEAP_PANIC(ETHeapBadCellAddress); \
+ else DoCheckInuseChunk(M, MEM2CHUNK(P))
+
+#ifndef __KERNEL_MODE__
+
+#define __SLAB_BFR_CHECK(S,P,B) \
+ if ( MEMORY_MONITORED ) \
+ if ( ((TUint32)P & 0x3) || ((TUint8*)P<iMemBase) || ((TUint8*)(P)>(TUint8*)this)) \
+ BTraceContext12(BTrace::EHeap, BTrace::EHeapCorruption, (TUint32)this, (TUint32)P, (TUint32)S), HEAP_PANIC(ETHeapBadCellAddress); \
+ else DoCheckSlab(S, EPartialFullSlab, P), BuildPartialSlabBitmap(B,S,P)
+#define __PAGE_BFR_CHECK(P) \
+ if ( MEMORY_MONITORED ) \
+ if ( ((TUint32)P & ((1 << iPageSize)-1)) || ((TUint8*)P<iMemBase) || ((TUint8*)(P)>(TUint8*)this)) \
+ BTraceContext12(BTrace::EHeap, BTrace::EHeapCorruption, (TUint32)this, (TUint32)P, (TUint32)0), HEAP_PANIC(ETHeapBadCellAddress)
+
+#endif
+
+#ifdef _MSC_VER
+// This is required while we are still using VC6 to compile, so as to avoid warnings that cannot be fixed
+// without having to edit the original Doug Lea source. The 4146 warnings are due to the original code having
+// a liking for negating unsigned numbers and the 4127 warnings are due to the original code using the RTCHECK
+// macro with values that are always defined as 1. It is better to turn these warnings off than to introduce
+// diffs between the original Doug Lea implementation and our adaptation of it
+#pragma warning( disable : 4146 ) /* unary minus operator applied to unsigned type, result still unsigned */
+#pragma warning( disable : 4127 ) /* conditional expression is constant */
+#endif // _MSC_VER
+
+
+/**
+@SYMPatchable
+@publishedPartner
+@released
+
+Defines the minimum cell size of a heap.
+
+The constant can be changed at ROM build time using patchdata OBY keyword.
+
+@deprecated Patching this constant no longer has any effect.
+*/
+#ifdef __X86GCC__ // For X86GCC we dont use the proper data import attribute
+#undef IMPORT_D // since the constants are not really imported. GCC doesn't
+#define IMPORT_D // allow imports from self.
+#endif
+IMPORT_D extern const TInt KHeapMinCellSize;
+
+/**
+@SYMPatchable
+@publishedPartner
+@released
+
+This constant defines the ratio that determines the amount of hysteresis between heap growing and heap
+shrinking.
+It is a 32-bit fixed point number where the radix point is defined to be
+between bits 7 and 8 (where the LSB is bit 0) i.e. using standard notation, a Q8 or a fx24.8
+fixed point number. For example, for a ratio of 2.0, set KHeapShrinkHysRatio=0x200.
+
+The heap shrinking hysteresis value is calculated to be:
+@code
+KHeapShrinkHysRatio*(iGrowBy>>8)
+@endcode
+where iGrowBy is a page aligned value set by the argument, aGrowBy, to the RHeap constructor.
+The default hysteresis value is iGrowBy bytes i.e. KHeapShrinkHysRatio=2.0.
+
+Memory usage may be improved by reducing the heap shrinking hysteresis
+by setting 1.0 < KHeapShrinkHysRatio < 2.0. Heap shrinking hysteresis is disabled/removed
+when KHeapShrinkHysRatio <= 1.0.
+
+The constant can be changed at ROM build time using patchdata OBY keyword.
+*/
+IMPORT_D extern const TInt KHeapShrinkHysRatio;
+
+UEXPORT_C TInt RHeap::AllocLen(const TAny* aCell) const
+{
+ const MAllocator* m = this;
+ return m->AllocLen(aCell);
+}
+
+UEXPORT_C TAny* RHeap::Alloc(TInt aSize)
+{
+ const MAllocator* m = this;
+ return ((MAllocator*)m)->Alloc(aSize);
+}
+
+UEXPORT_C void RHeap::Free(TAny* aCell)
+{
+ const MAllocator* m = this;
+ ((MAllocator*)m)->Free(aCell);
+}
+
+UEXPORT_C TAny* RHeap::ReAlloc(TAny* aCell, TInt aSize, TInt aMode)
+{
+ const MAllocator* m = this;
+ return ((MAllocator*)m)->ReAlloc(aCell, aSize, aMode);
+}
+
+UEXPORT_C TInt RHeap::DebugFunction(TInt aFunc, TAny* a1, TAny* a2)
+{
+ const MAllocator* m = this;
+ return ((MAllocator*)m)->DebugFunction(aFunc, a1, a2);
+}
+
+UEXPORT_C TInt RHeap::Extension_(TUint aExtensionId, TAny*& a0, TAny* a1)
+{
+ const MAllocator* m = this;
+ return ((MAllocator*)m)->Extension_(aExtensionId, a0, a1);
+}
+
+#ifndef __KERNEL_MODE__
+
+EXPORT_C TInt RHeap::AllocSize(TInt& aTotalAllocSize) const
+{
+ const MAllocator* m = this;
+ return m->AllocSize(aTotalAllocSize);
+}
+
+EXPORT_C TInt RHeap::Available(TInt& aBiggestBlock) const
+{
+ const MAllocator* m = this;
+ return m->Available(aBiggestBlock);
+}
+
+EXPORT_C void RHeap::Reset()
+{
+ const MAllocator* m = this;
+ ((MAllocator*)m)->Reset();
+}
+
+EXPORT_C TInt RHeap::Compress()
+{
+ const MAllocator* m = this;
+ return ((MAllocator*)m)->Compress();
+}
+#endif
+
+RHybridHeap::RHybridHeap()
+ {
+ // This initialisation cannot be done in RHeap() for compatibility reasons
+ iMaxLength = iChunkHandle = iNestingLevel = 0;
+ iTop = NULL;
+ iFailType = ENone;
+ iTestData = NULL;
+ }
+
+void RHybridHeap::operator delete(TAny*, TAny*)
+/**
+Called if constructor issued by operator new(TUint aSize, TAny* aBase) throws exception.
+This is dummy as corresponding new operator does not allocate memory.
+*/
+{}
+
+
+#ifndef __KERNEL_MODE__
+void RHybridHeap::Lock() const
+ /**
+ @internalComponent
+*/
+ {((RFastLock&)iLock).Wait();}
+
+
+void RHybridHeap::Unlock() const
+ /**
+ @internalComponent
+*/
+ {((RFastLock&)iLock).Signal();}
+
+
+TInt RHybridHeap::ChunkHandle() const
+ /**
+ @internalComponent
+*/
+{
+ return iChunkHandle;
+}
+
+#else
+//
+// This method is implemented in kheap.cpp
+//
+//void RHybridHeap::Lock() const
+ /**
+ @internalComponent
+*/
+// {;}
+
+
+
+//
+// This method is implemented in kheap.cpp
+//
+//void RHybridHeap::Unlock() const
+ /**
+ @internalComponent
+*/
+// {;}
+
+
+TInt RHybridHeap::ChunkHandle() const
+ /**
+ @internalComponent
+*/
+{
+ return 0;
+}
+#endif
+
+RHybridHeap::RHybridHeap(TInt aChunkHandle, TInt aOffset, TInt aMinLength, TInt aMaxLength, TInt aGrowBy, TInt aAlign, TBool aSingleThread, TBool aDLOnly, TBool aUseAdjust)
+/**
+Constructor for a non fixed heap. Unlike the fixed heap, this heap is quite flexible in terms of its minimum and
+maximum lengths and in that it can use the hybrid allocator if all of its requirements are met.
+*/
+ : iOffset(aOffset), iChunkSize(aMinLength)
+ {
+ __ASSERT_ALWAYS(iOffset>=0, HEAP_PANIC(ETHeapNewBadOffset));
+
+ iChunkHandle = aChunkHandle;
+ iMinLength = aMinLength;
+ iMaxLength = aMaxLength;
+
+ // If the user has explicitly specified 0 as the aGrowBy value, set it to 1 so that it will be rounded up to the nearst page size
+ if (aGrowBy == 0)
+ aGrowBy = 1;
+ GET_PAGE_SIZE(iPageSize);
+ iGrowBy = _ALIGN_UP(aGrowBy, iPageSize);
+
+ Construct(aSingleThread, aDLOnly, aUseAdjust, aAlign);
+ }
+
+RHybridHeap::RHybridHeap(TInt aMaxLength, TInt aAlign, TBool aSingleThread)
+/**
+Constructor for a fixed heap. We have restrictions in that we have fixed minimum and maximum lengths and cannot grow
+and we only use DL allocator.
+*/
+ : iOffset(0), iChunkSize(aMaxLength)
+ {
+ iChunkHandle = NULL;
+ iMinLength = aMaxLength;
+ iMaxLength = aMaxLength;
+ iGrowBy = 0;
+
+ Construct(aSingleThread, ETrue, ETrue, aAlign);
+ }
+
+TAny* RHybridHeap::operator new(TUint aSize, TAny* aBase) __NO_THROW
+{
+ __ASSERT_ALWAYS(aSize>=sizeof(RHybridHeap), HEAP_PANIC(ETHeapNewBadSize));
+ RHybridHeap* h = (RHybridHeap*)aBase;
+ h->iBase = ((TUint8*)aBase) + aSize;
+ return aBase;
+}
+
+void RHybridHeap::Construct(TBool aSingleThread, TBool aDLOnly, TBool aUseAdjust, TInt aAlign)
+{
+ iAlign = aAlign ? aAlign : RHybridHeap::ECellAlignment;
+ __ASSERT_ALWAYS((TUint32)iAlign>=sizeof(TAny*) && __POWER_OF_2(iAlign), HEAP_PANIC(ETHeapNewBadAlignment));
+
+ // This initialisation cannot be done in RHeap() for compatibility reasons
+ iTop = NULL;
+ iFailType = ENone;
+ iNestingLevel = 0;
+ iTestData = NULL;
+
+ iHighWaterMark = iMinLength;
+ iAllocCount = 0;
+ iFlags = aSingleThread ? ESingleThreaded : 0;
+ iGrowBy = _ALIGN_UP(iGrowBy, iPageSize);
+
+ if ( iMinLength == iMaxLength )
+ {
+ iFlags |= EFixedSize;
+ aDLOnly = ETrue;
+ }
+#ifndef __KERNEL_MODE__
+#ifdef DELAYED_SLAB_THRESHOLD
+ iSlabInitThreshold = DELAYED_SLAB_THRESHOLD;
+#else
+ iSlabInitThreshold = 0;
+#endif // DELAYED_SLAB_THRESHOLD
+ iUseAdjust = aUseAdjust;
+ iDLOnly = aDLOnly;
+#else
+ (void)aUseAdjust;
+#endif
+ // Initialise suballocators
+ // if DL only is required then it cannot allocate slab or page memory
+ // so these sub-allocators should be disabled. Otherwise initialise with default values
+ if ( aDLOnly )
+ {
+ Init(0, 0);
+ }
+ else
+ {
+ Init(SLAB_CONFIG, 16);
+ }
+
+#ifdef ENABLE_BTRACE
+
+ TUint32 traceData[4];
+ traceData[0] = iMinLength;
+ traceData[1] = iMaxLength;
+ traceData[2] = iGrowBy;
+ traceData[3] = iAlign;
+ BTraceContextN(BTrace::ETest1, 90, (TUint32)this, 11, traceData, sizeof(traceData));
+#endif
+
+}
+
+#ifndef __KERNEL_MODE__
+TInt RHybridHeap::ConstructLock(TUint32 aMode)
+{
+ TBool duplicateLock = EFalse;
+ TInt r = KErrNone;
+ if (!(iFlags & ESingleThreaded))
+ {
+ duplicateLock = aMode & UserHeap::EChunkHeapSwitchTo;
+ r = iLock.CreateLocal(duplicateLock ? EOwnerThread : EOwnerProcess);
+ if( r != KErrNone)
+ {
+ iChunkHandle = 0;
+ return r;
+ }
+ }
+
+ if ( aMode & UserHeap::EChunkHeapSwitchTo )
+ User::SwitchHeap(this);
+
+ iHandles = &iChunkHandle;
+ if (!(iFlags & ESingleThreaded))
+ {
+ // now change the thread-relative chunk/semaphore handles into process-relative handles
+ iHandleCount = 2;
+ if(duplicateLock)
+ {
+ RHandleBase s = iLock;
+ r = iLock.Duplicate(RThread());
+ s.Close();
+ }
+ if (r==KErrNone && (aMode & UserHeap::EChunkHeapDuplicate))
+ {
+ r = ((RChunk*)&iChunkHandle)->Duplicate(RThread());
+ if (r!=KErrNone)
+ iLock.Close(), iChunkHandle=0;
+ }
+ }
+ else
+ {
+ iHandleCount = 1;
+ if (aMode & UserHeap::EChunkHeapDuplicate)
+ r = ((RChunk*)&iChunkHandle)->Duplicate(RThread(), EOwnerThread);
+ }
+
+ return r;
+}
+#endif
+
+void RHybridHeap::Init(TInt aBitmapSlab, TInt aPagePower)
+{
+ /*Moved code which does initilization */
+ iTop = (TUint8*)this + iMinLength;
+ iBase = Ceiling(iBase, ECellAlignment); // Align iBase address
+
+ __INIT_COUNTERS(0);
+ // memset(&mparams,0,sizeof(mparams));
+
+ InitDlMalloc(iTop - iBase, 0);
+
+#ifndef __KERNEL_MODE__
+ SlabInit();
+ iSlabConfigBits = aBitmapSlab;
+ if ( iChunkSize > iSlabInitThreshold )
+ {
+ iSlabInitThreshold = KMaxTInt32;
+ SlabConfig(aBitmapSlab); // Delayed slab configuration done
+ }
+ if ( aPagePower )
+ {
+ RChunk chunk;
+ chunk.SetHandle(iChunkHandle);
+ iMemBase = chunk.Base(); // Store base address for paged allocator
+ }
+
+ /*10-1K,11-2K,12-4k,13-8K,14-16K,15-32K,16-64K*/
+ PagedInit(aPagePower);
+
+#ifdef ENABLE_BTRACE
+ TUint32 traceData[3];
+ traceData[0] = aBitmapSlab;
+ traceData[1] = aPagePower;
+ traceData[2] = GM->iTrimCheck;
+ BTraceContextN(BTrace::ETest1, 90, (TUint32)this, 0, traceData, sizeof(traceData));
+#endif
+#else
+ (void)aBitmapSlab;
+ (void)aPagePower;
+#endif // __KERNEL_MODE__
+
+}
+
+
+TInt RHybridHeap::AllocLen(const TAny* aCell) const
+{
+ aCell = __GET_DEBUG_DATA_BFR(aCell);
+
+ if (PtrDiff(aCell, this) >= 0)
+ {
+ mchunkptr m = MEM2CHUNK(aCell);
+ return CHUNKSIZE(m) - OVERHEAD_FOR(m) - __DEBUG_HDR_SIZE;
+ }
+#ifndef __KERNEL_MODE__
+ if ( aCell )
+ {
+ if (LowBits(aCell, iPageSize) )
+ return SlabHeaderSize(slab::SlabFor(aCell)->iHeader) - __DEBUG_HDR_SIZE;
+
+ return PagedSize((void*)aCell) - __DEBUG_HDR_SIZE;
+ }
+#endif
+ return 0; // NULL pointer situation, should PANIC !!
+}
+
+#ifdef __KERNEL_MODE__
+TAny* RHybridHeap::Alloc(TInt aSize)
+{
+ __CHECK_THREAD_STATE;
+ __ASSERT_ALWAYS((TUint)aSize<(KMaxTInt/2),HEAP_PANIC(ETHeapBadAllocatedCellSize));
+ __SIMULATE_ALLOC_FAIL(return NULL;)
+ Lock();
+ __ALLOC_DEBUG_HEADER(aSize);
+ TAny* addr = DlMalloc(aSize);
+ if ( addr )
+ {
+// iCellCount++;
+ __SET_DEBUG_DATA(addr, iNestingLevel, ++iAllocCount);
+ addr = __GET_USER_DATA_BFR(addr);
+ __INCREMENT_COUNTERS(addr);
+ memclr(addr, AllocLen(addr));
+ }
+ Unlock();
+#ifdef ENABLE_BTRACE
+ if (iFlags & ETraceAllocs)
+ {
+ if ( addr )
+ {
+ TUint32 traceData[3];
+ traceData[0] = AllocLen(addr);
+ traceData[1] = aSize - __DEBUG_HDR_SIZE;
+ traceData[2] = 0;
+ BTraceContextN(BTrace::EHeap, BTrace::EHeapAlloc, (TUint32)this, (TUint32)addr, traceData, sizeof(traceData));
+ }
+ else
+ BTraceContext8(BTrace::EHeap, BTrace::EHeapAllocFail, (TUint32)this, (TUint32)(aSize - __DEBUG_HDR_SIZE));
+ }
+#endif
+ return addr;
+}
+#else
+
+TAny* RHybridHeap::Alloc(TInt aSize)
+{
+ __ASSERT_ALWAYS((TUint)aSize<(KMaxTInt/2),HEAP_PANIC(ETHeapBadAllocatedCellSize));
+ __SIMULATE_ALLOC_FAIL(return NULL;)
+
+ TAny* addr;
+#ifdef ENABLE_BTRACE
+ TInt aSubAllocator=0;
+#endif
+
+ Lock();
+
+ __ALLOC_DEBUG_HEADER(aSize);
+
+ if (aSize < iSlabThreshold)
+ {
+ TInt ix = iSizeMap[(aSize+3)>>2];
+ HEAP_ASSERT(ix != 0xff);
+ addr = SlabAllocate(iSlabAlloc[ix]);
+ if ( !addr )
+ { // Slab allocation has failed, try to allocate from DL
+ addr = DlMalloc(aSize);
+ }
+#ifdef ENABLE_BTRACE
+ else
+ aSubAllocator=1;
+#endif
+ }else if((aSize >> iPageThreshold)==0)
+ {
+ addr = DlMalloc(aSize);
+ }
+ else
+ {
+ addr = PagedAllocate(aSize);
+ if ( !addr )
+ { // Page allocation has failed, try to allocate from DL
+ addr = DlMalloc(aSize);
+ }
+#ifdef ENABLE_BTRACE
+ else
+ aSubAllocator=2;
+#endif
+ }
+
+ if ( addr )
+ {
+// iCellCount++;
+ __SET_DEBUG_DATA(addr, iNestingLevel, ++iAllocCount);
+ addr = __GET_USER_DATA_BFR(addr);
+ __INCREMENT_COUNTERS(addr);
+ }
+ Unlock();
+
+#ifdef ENABLE_BTRACE
+ if (iFlags & ETraceAllocs)
+ {
+ if ( addr )
+ {
+ TUint32 traceData[3];
+ traceData[0] = AllocLen(addr);
+ traceData[1] = aSize - __DEBUG_HDR_SIZE;
+ traceData[2] = aSubAllocator;
+ BTraceContextN(BTrace::EHeap, BTrace::EHeapAlloc, (TUint32)this, (TUint32)addr, traceData, sizeof(traceData));
+ }
+ else
+ BTraceContext8(BTrace::EHeap, BTrace::EHeapAllocFail, (TUint32)this, (TUint32)(aSize - __DEBUG_HDR_SIZE));
+ }
+#endif
+
+ return addr;
+}
+#endif // __KERNEL_MODE__
+
+#ifndef __KERNEL_MODE__
+TInt RHybridHeap::Compress()
+{
+ if ( IS_FIXED_HEAP )
+ return 0;
+
+ Lock();
+ TInt Reduced = SysTrim(GM, 0);
+ if (iSparePage)
+ {
+ Unmap(iSparePage, iPageSize);
+ iSparePage = 0;
+ Reduced += iPageSize;
+ }
+ Unlock();
+ return Reduced;
+}
+#endif
+
+void RHybridHeap::Free(TAny* aPtr)
+{
+ __CHECK_THREAD_STATE;
+ if ( !aPtr )
+ return;
+#ifdef ENABLE_BTRACE
+ TInt aSubAllocator=0;
+#endif
+ Lock();
+
+ aPtr = __GET_DEBUG_DATA_BFR(aPtr);
+
+#ifndef __KERNEL_MODE__
+ if (PtrDiff(aPtr, this) >= 0)
+ {
+#endif
+ __DL_BFR_CHECK(GM, aPtr);
+ __DECREMENT_COUNTERS(__GET_USER_DATA_BFR(aPtr));
+ __ZAP_CELL(aPtr);
+ DlFree( aPtr);
+#ifndef __KERNEL_MODE__
+ }
+
+ else if ( LowBits(aPtr, iPageSize) == 0 )
+ {
+#ifdef ENABLE_BTRACE
+ aSubAllocator = 2;
+#endif
+ __PAGE_BFR_CHECK(aPtr);
+ __DECREMENT_COUNTERS(__GET_USER_DATA_BFR(aPtr));
+ PagedFree(aPtr);
+ }
+ else
+ {
+#ifdef ENABLE_BTRACE
+ aSubAllocator = 1;
+#endif
+ TUint32 bm[4];
+ __SLAB_BFR_CHECK(slab::SlabFor(aPtr),aPtr,bm);
+ __DECREMENT_COUNTERS(__GET_USER_DATA_BFR(aPtr));
+ __ZAP_CELL(aPtr);
+ SlabFree(aPtr);
+ }
+#endif // __KERNEL_MODE__
+// iCellCount--;
+ Unlock();
+#ifdef ENABLE_BTRACE
+ if (iFlags & ETraceAllocs)
+ {
+ TUint32 traceData;
+ traceData = aSubAllocator;
+ BTraceContextN(BTrace::EHeap, BTrace::EHeapFree, (TUint32)this, (TUint32)__GET_USER_DATA_BFR(aPtr), &traceData, sizeof(traceData));
+ }
+#endif
+}
+
+#ifndef __KERNEL_MODE__
+void RHybridHeap::Reset()
+/**
+Frees all allocated cells on this heap.
+*/
+{
+ Lock();
+ if ( !IS_FIXED_HEAP )
+ {
+ if ( GM->iSeg.iSize > (iMinLength - sizeof(*this)) )
+ Unmap(GM->iSeg.iBase + (iMinLength - sizeof(*this)), (GM->iSeg.iSize - (iMinLength - sizeof(*this))));
+ ResetBitmap();
+ if ( !iDLOnly )
+ Init(iSlabConfigBits, iPageThreshold);
+ else
+ Init(0,0);
+ }
+ else Init(0,0);
+ Unlock();
+}
+#endif
+
+TAny* RHybridHeap::ReAllocImpl(TAny* aPtr, TInt aSize, TInt aMode)
+{
+ // First handle special case of calling reallocate with NULL aPtr
+ if (!aPtr)
+ {
+ if (( aMode & ENeverMove ) == 0 )
+ {
+ aPtr = Alloc(aSize - __DEBUG_HDR_SIZE);
+ aPtr = __GET_DEBUG_DATA_BFR(aPtr);
+ }
+ return aPtr;
+ }
+
+ TInt oldsize = AllocLen(__GET_USER_DATA_BFR(aPtr)) + __DEBUG_HDR_SIZE;
+
+ // Insist on geometric growth when reallocating memory, this reduces copying and fragmentation
+ // generated during arithmetic growth of buffer/array/vector memory
+ // Experiments have shown that 25% is a good threshold for this policy
+ if (aSize <= oldsize)
+ {
+ if (aSize >= oldsize - (oldsize>>2))
+ return aPtr; // don't change if >75% original size
+ }
+ else
+ {
+ __SIMULATE_ALLOC_FAIL(return NULL;)
+ if (aSize < oldsize + (oldsize>>2))
+ {
+ aSize = _ALIGN_UP(oldsize + (oldsize>>2), 4); // grow to at least 125% original size
+ }
+ }
+ __DEBUG_SAVE(aPtr);
+
+ TAny* newp;
+#ifdef __KERNEL_MODE__
+ Lock();
+ __DL_BFR_CHECK(GM, aPtr);
+ newp = DlRealloc(aPtr, aSize, aMode);
+ Unlock();
+ if ( newp )
+ {
+ if ( aSize > oldsize )
+ memclr(((TUint8*)newp) + oldsize, (aSize-oldsize)); // Buffer has grown in place, clear extra
+ __DEBUG_RESTORE(newp);
+ __UPDATE_ALLOC_COUNT(aPtr, newp, ++iAllocCount);
+ __UPDATE_TOTAL_ALLOC(newp, oldsize);
+ }
+#else
+ // Decide how to reallocate based on (a) the current cell location, (b) the mode requested and (c) the new size
+ if ( PtrDiff(aPtr, this) >= 0 )
+ { // current cell in Doug Lea iArena
+ if ( (aMode & ENeverMove)
+ ||
+ (!(aMode & EAllowMoveOnShrink) && (aSize < oldsize))
+ ||
+ ((aSize >= iSlabThreshold) && ((aSize >> iPageThreshold) == 0)) )
+ {
+ Lock();
+ __DL_BFR_CHECK(GM, aPtr);
+ newp = DlRealloc(aPtr, aSize, aMode); // old and new in DL allocator
+ Unlock();
+ __DEBUG_RESTORE(newp);
+ __UPDATE_ALLOC_COUNT(aPtr,newp, ++iAllocCount);
+ __UPDATE_TOTAL_ALLOC(newp, oldsize);
+ return newp;
+ }
+ }
+ else if (LowBits(aPtr, iPageSize) == 0)
+ { // current cell in paged iArena
+ if ( (aMode & ENeverMove)
+ ||
+ (!(aMode & EAllowMoveOnShrink) && (aSize < oldsize))
+ ||
+ ((aSize >> iPageThreshold) != 0) )
+ {
+ Lock();
+ __PAGE_BFR_CHECK(aPtr);
+ newp = PagedReallocate(aPtr, aSize, aMode); // old and new in paged allocator
+ Unlock();
+ __DEBUG_RESTORE(newp);
+ __UPDATE_ALLOC_COUNT(aPtr,newp, ++iAllocCount);
+ __UPDATE_TOTAL_ALLOC(newp, oldsize);
+ return newp;
+ }
+ }
+ else
+ { // current cell in slab iArena
+ TUint32 bm[4];
+ Lock();
+ __SLAB_BFR_CHECK(slab::SlabFor(aPtr), aPtr, bm);
+ Unlock();
+ if ( aSize <= oldsize)
+ return aPtr;
+ if (aMode & ENeverMove)
+ return NULL; // cannot grow in slab iArena
+ // just use alloc/copy/free...
+ }
+
+ // fallback to allocate and copy
+ // shouldn't get here if we cannot move the cell
+ // __ASSERT(mode == emobile || (mode==efixshrink && size>oldsize));
+
+ newp = Alloc(aSize - __DEBUG_HDR_SIZE);
+ newp = __GET_DEBUG_DATA_BFR(newp);
+ if (newp)
+ {
+ memcpy(newp, aPtr, oldsize<aSize ? oldsize : aSize);
+ __DEBUG_RESTORE(newp);
+ Free(__GET_USER_DATA_BFR(aPtr));
+ }
+
+#endif // __KERNEL_MODE__
+ return newp;
+}
+
+
+TAny* RHybridHeap::ReAlloc(TAny* aPtr, TInt aSize, TInt aMode )
+{
+
+ aPtr = __GET_DEBUG_DATA_BFR(aPtr);
+ __ALLOC_DEBUG_HEADER(aSize);
+
+ TAny* retval = ReAllocImpl(aPtr, aSize, aMode);
+
+ retval = __GET_USER_DATA_BFR(retval);
+
+#ifdef ENABLE_BTRACE
+ if (iFlags & ETraceAllocs)
+ {
+ if ( retval )
+ {
+ TUint32 traceData[3];
+ traceData[0] = AllocLen(retval);
+ traceData[1] = aSize - __DEBUG_HDR_SIZE;
+ traceData[2] = (TUint32)aPtr;
+ BTraceContextN(BTrace::EHeap, BTrace::EHeapReAlloc,(TUint32)this, (TUint32)retval, traceData, sizeof(traceData));
+ }
+ else
+ BTraceContext12(BTrace::EHeap, BTrace::EHeapReAllocFail, (TUint32)this, (TUint32)aPtr, (TUint32)(aSize - __DEBUG_HDR_SIZE));
+ }
+#endif
+ return retval;
+}
+
+#ifndef __KERNEL_MODE__
+TInt RHybridHeap::Available(TInt& aBiggestBlock) const
+/**
+Gets the total free space currently available on the heap and the space
+available in the largest free block.
+
+Note that this function exists mainly for compatibility reasons. In a modern
+heap implementation such as that present in Symbian it is not appropriate to
+concern oneself with details such as the amount of free memory available on a
+heap and its largeset free block, because the way that a modern heap implmentation
+works is not simple. The amount of available virtual memory != physical memory
+and there are multiple allocation strategies used internally, which makes all
+memory usage figures "fuzzy" at best.
+
+In short, if you want to see if there is enough memory available to allocate a
+block of memory, call Alloc() and if it succeeds then there is enough memory!
+Messing around with functions like this is somewhat pointless with modern heap
+allocators.
+
+@param aBiggestBlock On return, contains the space available in the largest
+ free block on the heap. Due to the internals of modern
+ heap implementations, you can probably still allocate a
+ block larger than this!
+
+@return The total free space currently available on the heap. Again, you can
+ probably still allocate more than this!
+*/
+{
+ struct HeapInfo info;
+ Lock();
+ TInt Biggest = GetInfo(&info);
+ aBiggestBlock = __GET_AVAIL_BLOCK_SIZE(Biggest);
+ Unlock();
+ return __GET_AVAIL_BLOCK_SIZE(info.iFreeBytes);
+
+}
+
+TInt RHybridHeap::AllocSize(TInt& aTotalAllocSize) const
+ /**
+ Gets the number of cells allocated on this heap, and the total space
+ allocated to them.
+
+ @param aTotalAllocSize On return, contains the total space allocated
+ to the cells.
+
+ @return The number of cells allocated on this heap.
+*/
+{
+ struct HeapInfo info;
+ Lock();
+ GetInfo(&info);
+ aTotalAllocSize = info.iAllocBytes - __REMOVE_DBG_HDR(info.iAllocN);
+ Unlock();
+ return info.iAllocN;
+}
+
+#endif
+
+TInt RHybridHeap::Extension_(TUint /* aExtensionId */, TAny*& /* a0 */, TAny* /* a1 */)
+{
+ return KErrNotSupported;
+}
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// imported from dla.cpp
+///////////////////////////////////////////////////////////////////////////////
+
+//#include <unistd.h>
+//#define DEBUG_REALLOC
+#ifdef DEBUG_REALLOC
+#include <e32debug.h>
+#endif
+
+inline void RHybridHeap::InitBins(mstate m)
+{
+ /* Establish circular links for iSmallBins */
+ bindex_t i;
+ for (i = 0; i < NSMALLBINS; ++i) {
+ sbinptr bin = SMALLBIN_AT(m,i);
+ bin->iFd = bin->iBk = bin;
+ }
+ }
+/* ---------------------------- malloc support --------------------------- */
+
+/* allocate a large request from the best fitting chunk in a treebin */
+void* RHybridHeap::TmallocLarge(mstate m, size_t nb) {
+ tchunkptr v = 0;
+ size_t rsize = -nb; /* Unsigned negation */
+ tchunkptr t;
+ bindex_t idx;
+ ComputeTreeIndex(nb, idx);
+
+ if ((t = *TREEBIN_AT(m, idx)) != 0)
+ {
+ /* Traverse tree for this bin looking for node with size == nb */
+ size_t sizebits = nb << LEFTSHIFT_FOR_TREE_INDEX(idx);
+ tchunkptr rst = 0; /* The deepest untaken right subtree */
+ for (;;)
+ {
+ tchunkptr rt;
+ size_t trem = CHUNKSIZE(t) - nb;
+ if (trem < rsize)
+ {
+ v = t;
+ if ((rsize = trem) == 0)
+ break;
+ }
+ rt = t->iChild[1];
+ t = t->iChild[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
+ if (rt != 0 && rt != t)
+ rst = rt;
+ if (t == 0)
+ {
+ t = rst; /* set t to least subtree holding sizes > nb */
+ break;
+ }
+ sizebits <<= 1;
+ }
+ }
+ if (t == 0 && v == 0)
+ { /* set t to root of next non-empty treebin */
+ binmap_t leftbits = LEFT_BITS(IDX2BIT(idx)) & m->iTreeMap;
+ if (leftbits != 0)
+ {
+ bindex_t i;
+ binmap_t leastbit = LEAST_BIT(leftbits);
+ ComputeBit2idx(leastbit, i);
+ t = *TREEBIN_AT(m, i);
+ }
+ }
+ while (t != 0)
+ { /* Find smallest of tree or subtree */
+ size_t trem = CHUNKSIZE(t) - nb;
+ if (trem < rsize) {
+ rsize = trem;
+ v = t;
+ }
+ t = LEFTMOST_CHILD(t);
+ }
+ /* If iDv is a better fit, return 0 so malloc will use it */
+ if (v != 0 && rsize < (size_t)(m->iDvSize - nb))
+ {
+ if (RTCHECK(OK_ADDRESS(m, v)))
+ { /* split */
+ mchunkptr r = CHUNK_PLUS_OFFSET(v, nb);
+ HEAP_ASSERT(CHUNKSIZE(v) == rsize + nb);
+ if (RTCHECK(OK_NEXT(v, r)))
+ {
+ UnlinkLargeChunk(m, v);
+ if (rsize < MIN_CHUNK_SIZE)
+ SET_INUSE_AND_PINUSE(m, v, (rsize + nb));
+ else
+ {
+ SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(m, v, nb);
+ SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(r, rsize);
+ InsertChunk(m, r, rsize);
+ }
+ return CHUNK2MEM(v);
+ }
+ }
+ // CORRUPTION_ERROR_ACTION(m);
+ }
+ return 0;
+ }
+
+/* allocate a small request from the best fitting chunk in a treebin */
+void* RHybridHeap::TmallocSmall(mstate m, size_t nb)
+{
+ tchunkptr t, v;
+ size_t rsize;
+ bindex_t i;
+ binmap_t leastbit = LEAST_BIT(m->iTreeMap);
+ ComputeBit2idx(leastbit, i);
+
+ v = t = *TREEBIN_AT(m, i);
+ rsize = CHUNKSIZE(t) - nb;
+
+ while ((t = LEFTMOST_CHILD(t)) != 0)
+ {
+ size_t trem = CHUNKSIZE(t) - nb;
+ if (trem < rsize)
+ {
+ rsize = trem;
+ v = t;
+ }
+ }
+
+ if (RTCHECK(OK_ADDRESS(m, v)))
+ {
+ mchunkptr r = CHUNK_PLUS_OFFSET(v, nb);
+ HEAP_ASSERT(CHUNKSIZE(v) == rsize + nb);
+ if (RTCHECK(OK_NEXT(v, r)))
+ {
+ UnlinkLargeChunk(m, v);
+ if (rsize < MIN_CHUNK_SIZE)
+ SET_INUSE_AND_PINUSE(m, v, (rsize + nb));
+ else
+ {
+ SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(m, v, nb);
+ SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(r, rsize);
+ ReplaceDv(m, r, rsize);
+ }
+ return CHUNK2MEM(v);
+ }
+ }
+ // CORRUPTION_ERROR_ACTION(m);
+ // return 0;
+ }
+
+inline void RHybridHeap::InitTop(mstate m, mchunkptr p, size_t psize)
+{
+ /* Ensure alignment */
+ size_t offset = ALIGN_OFFSET(CHUNK2MEM(p));
+ p = (mchunkptr)((TUint8*)p + offset);
+ psize -= offset;
+ m->iTop = p;
+ m->iTopSize = psize;
+ p->iHead = psize | PINUSE_BIT;
+ /* set size of fake trailing chunk holding overhead space only once */
+ mchunkptr chunkPlusOff = CHUNK_PLUS_OFFSET(p, psize);
+ chunkPlusOff->iHead = TOP_FOOT_SIZE;
+ m->iTrimCheck = KHeapShrinkHysRatio*(iGrowBy>>8);
+}
+
+
+/* Unlink the first chunk from a smallbin */
+inline void RHybridHeap::UnlinkFirstSmallChunk(mstate M,mchunkptr B,mchunkptr P,bindex_t& I)
+{
+ mchunkptr F = P->iFd;
+ HEAP_ASSERT(P != B);
+ HEAP_ASSERT(P != F);
+ HEAP_ASSERT(CHUNKSIZE(P) == SMALL_INDEX2SIZE(I));
+ if (B == F)
+ CLEAR_SMALLMAP(M, I);
+ else if (RTCHECK(OK_ADDRESS(M, F)))
+ {
+ B->iFd = F;
+ F->iBk = B;
+ }
+ else
+ {
+ CORRUPTION_ERROR_ACTION(M);
+ }
+}
+/* Link a free chunk into a smallbin */
+inline void RHybridHeap::InsertSmallChunk(mstate M,mchunkptr P, size_t S)
+{
+ bindex_t I = SMALL_INDEX(S);
+ mchunkptr B = SMALLBIN_AT(M, I);
+ mchunkptr F = B;
+ HEAP_ASSERT(S >= MIN_CHUNK_SIZE);
+ if (!SMALLMAP_IS_MARKED(M, I))
+ MARK_SMALLMAP(M, I);
+ else if (RTCHECK(OK_ADDRESS(M, B->iFd)))
+ F = B->iFd;
+ else
+ {
+ CORRUPTION_ERROR_ACTION(M);
+ }
+ B->iFd = P;
+ F->iBk = P;
+ P->iFd = F;
+ P->iBk = B;
+}
+
+
+inline void RHybridHeap::InsertChunk(mstate M,mchunkptr P,size_t S)
+{
+ if (IS_SMALL(S))
+ InsertSmallChunk(M, P, S);
+ else
+ {
+ tchunkptr TP = (tchunkptr)(P); InsertLargeChunk(M, TP, S);
+ }
+}
+
+inline void RHybridHeap::UnlinkLargeChunk(mstate M,tchunkptr X)
+{
+ tchunkptr XP = X->iParent;
+ tchunkptr R;
+ if (X->iBk != X)
+ {
+ tchunkptr F = X->iFd;
+ R = X->iBk;
+ if (RTCHECK(OK_ADDRESS(M, F)))
+ {
+ F->iBk = R;
+ R->iFd = F;
+ }
+ else
+ {
+ CORRUPTION_ERROR_ACTION(M);
+ }
+ }
+ else
+ {
+ tchunkptr* RP;
+ if (((R = *(RP = &(X->iChild[1]))) != 0) ||
+ ((R = *(RP = &(X->iChild[0]))) != 0))
+ {
+ tchunkptr* CP;
+ while ((*(CP = &(R->iChild[1])) != 0) ||
+ (*(CP = &(R->iChild[0])) != 0))
+ {
+ R = *(RP = CP);
+ }
+ if (RTCHECK(OK_ADDRESS(M, RP)))
+ *RP = 0;
+ else
+ {
+ CORRUPTION_ERROR_ACTION(M);
+ }
+ }
+ }
+ if (XP != 0)
+ {
+ tbinptr* H = TREEBIN_AT(M, X->iIndex);
+ if (X == *H)
+ {
+ if ((*H = R) == 0)
+ CLEAR_TREEMAP(M, X->iIndex);
+ }
+ else if (RTCHECK(OK_ADDRESS(M, XP)))
+ {
+ if (XP->iChild[0] == X)
+ XP->iChild[0] = R;
+ else
+ XP->iChild[1] = R;
+ }
+ else
+ CORRUPTION_ERROR_ACTION(M);
+ if (R != 0)
+ {
+ if (RTCHECK(OK_ADDRESS(M, R)))
+ {
+ tchunkptr C0, C1;
+ R->iParent = XP;
+ if ((C0 = X->iChild[0]) != 0)
+ {
+ if (RTCHECK(OK_ADDRESS(M, C0)))
+ {
+ R->iChild[0] = C0;
+ C0->iParent = R;
+ }
+ else
+ CORRUPTION_ERROR_ACTION(M);
+ }
+ if ((C1 = X->iChild[1]) != 0)
+ {
+ if (RTCHECK(OK_ADDRESS(M, C1)))
+ {
+ R->iChild[1] = C1;
+ C1->iParent = R;
+ }
+ else
+ CORRUPTION_ERROR_ACTION(M);
+ }
+ }
+ else
+ CORRUPTION_ERROR_ACTION(M);
+ }
+ }
+}
+
+/* Unlink a chunk from a smallbin */
+inline void RHybridHeap::UnlinkSmallChunk(mstate M, mchunkptr P,size_t S)
+{
+ mchunkptr F = P->iFd;
+ mchunkptr B = P->iBk;
+ bindex_t I = SMALL_INDEX(S);
+ HEAP_ASSERT(P != B);
+ HEAP_ASSERT(P != F);
+ HEAP_ASSERT(CHUNKSIZE(P) == SMALL_INDEX2SIZE(I));
+ if (F == B)
+ CLEAR_SMALLMAP(M, I);
+ else if (RTCHECK((F == SMALLBIN_AT(M,I) || OK_ADDRESS(M, F)) &&
+ (B == SMALLBIN_AT(M,I) || OK_ADDRESS(M, B))))
+ {
+ F->iBk = B;
+ B->iFd = F;
+ }
+ else
+ {
+ CORRUPTION_ERROR_ACTION(M);
+ }
+}
+
+inline void RHybridHeap::UnlinkChunk(mstate M, mchunkptr P, size_t S)
+{
+ if (IS_SMALL(S))
+ UnlinkSmallChunk(M, P, S);
+ else
+ {
+ tchunkptr TP = (tchunkptr)(P); UnlinkLargeChunk(M, TP);
+ }
+}
+
+// For DL debug functions
+void RHybridHeap::DoComputeTreeIndex(size_t S, bindex_t& I)
+{
+ ComputeTreeIndex(S, I);
+}
+
+inline void RHybridHeap::ComputeTreeIndex(size_t S, bindex_t& I)
+{
+ size_t X = S >> TREEBIN_SHIFT;
+ if (X == 0)
+ I = 0;
+ else if (X > 0xFFFF)
+ I = NTREEBINS-1;
+ else
+ {
+ unsigned int Y = (unsigned int)X;
+ unsigned int N = ((Y - 0x100) >> 16) & 8;
+ unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;
+ N += K;
+ N += K = (((Y <<= K) - 0x4000) >> 16) & 2;
+ K = 14 - N + ((Y <<= K) >> 15);
+ I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));
+ }
+}
+
+/* ------------------------- Operations on trees ------------------------- */
+
+/* Insert chunk into tree */
+inline void RHybridHeap::InsertLargeChunk(mstate M,tchunkptr X,size_t S)
+{
+ tbinptr* H;
+ bindex_t I;
+ ComputeTreeIndex(S, I);
+ H = TREEBIN_AT(M, I);
+ X->iIndex = I;
+ X->iChild[0] = X->iChild[1] = 0;
+ if (!TREEMAP_IS_MARKED(M, I))
+ {
+ MARK_TREEMAP(M, I);
+ *H = X;
+ X->iParent = (tchunkptr)H;
+ X->iFd = X->iBk = X;
+ }
+ else
+ {
+ tchunkptr T = *H;
+ size_t K = S << LEFTSHIFT_FOR_TREE_INDEX(I);
+ for (;;)
+ {
+ if (CHUNKSIZE(T) != S) {
+ tchunkptr* C = &(T->iChild[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);
+ K <<= 1;
+ if (*C != 0)
+ T = *C;
+ else if (RTCHECK(OK_ADDRESS(M, C)))
+ {
+ *C = X;
+ X->iParent = T;
+ X->iFd = X->iBk = X;
+ break;
+ }
+ else
+ {
+ CORRUPTION_ERROR_ACTION(M);
+ break;
+ }
+ }
+ else
+ {
+ tchunkptr F = T->iFd;
+ if (RTCHECK(OK_ADDRESS(M, T) && OK_ADDRESS(M, F)))
+ {
+ T->iFd = F->iBk = X;
+ X->iFd = F;
+ X->iBk = T;
+ X->iParent = 0;
+ break;
+ }
+ else
+ {
+ CORRUPTION_ERROR_ACTION(M);
+ break;
+ }
+ }
+ }
+ }
+}
+
+/*
+Unlink steps:
+
+1. If x is a chained node, unlink it from its same-sized iFd/iBk links
+and choose its iBk node as its replacement.
+2. If x was the last node of its size, but not a leaf node, it must
+be replaced with a leaf node (not merely one with an open left or
+right), to make sure that lefts and rights of descendents
+correspond properly to bit masks. We use the rightmost descendent
+of x. We could use any other leaf, but this is easy to locate and
+tends to counteract removal of leftmosts elsewhere, and so keeps
+paths shorter than minimally guaranteed. This doesn't loop much
+because on average a node in a tree is near the bottom.
+3. If x is the base of a chain (i.e., has iParent links) relink
+x's iParent and children to x's replacement (or null if none).
+*/
+
+/* Replace iDv node, binning the old one */
+/* Used only when iDvSize known to be small */
+inline void RHybridHeap::ReplaceDv(mstate M, mchunkptr P, size_t S)
+{
+ size_t DVS = M->iDvSize;
+ if (DVS != 0)
+ {
+ mchunkptr DV = M->iDv;
+ HEAP_ASSERT(IS_SMALL(DVS));
+ InsertSmallChunk(M, DV, DVS);
+ }
+ M->iDvSize = S;
+ M->iDv = P;
+}
+
+
+inline void RHybridHeap::ComputeBit2idx(binmap_t X,bindex_t& I)
+{
+ unsigned int Y = X - 1;
+ unsigned int K = Y >> (16-4) & 16;
+ unsigned int N = K; Y >>= K;
+ N += K = Y >> (8-3) & 8; Y >>= K;
+ N += K = Y >> (4-2) & 4; Y >>= K;
+ N += K = Y >> (2-1) & 2; Y >>= K;
+ N += K = Y >> (1-0) & 1; Y >>= K;
+ I = (bindex_t)(N + Y);
+}
+
+
+
+int RHybridHeap::SysTrim(mstate m, size_t pad)
+{
+ size_t extra = 0;
+
+ if ( IS_INITIALIZED(m) )
+ {
+ pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
+
+ if (m->iTopSize > pad)
+ {
+ extra = Floor(m->iTopSize - pad, iPageSize);
+ if ( (m->iSeg.iSize - extra) < (iMinLength - sizeof(*this)) )
+ {
+ if ( m->iSeg.iSize > (iMinLength - sizeof(*this)) )
+ extra = Floor(m->iSeg.iSize - (iMinLength - sizeof(*this)), iPageSize); /* do not shrink heap below min length */
+ else extra = 0;
+ }
+
+ if ( extra )
+ {
+ Unmap(m->iSeg.iBase + m->iSeg.iSize - extra, extra);
+
+ m->iSeg.iSize -= extra;
+ InitTop(m, m->iTop, m->iTopSize - extra);
+ CHECK_TOP_CHUNK(m, m->iTop);
+ }
+ }
+
+ }
+
+ return extra;
+}
+
+/* Get memory from system using MORECORE */
+
+void* RHybridHeap::SysAlloc(mstate m, size_t nb)
+{
+ HEAP_ASSERT(m->iTop);
+ /* Subtract out existing available iTop space from MORECORE request. */
+// size_t asize = _ALIGN_UP(nb - m->iTopSize + TOP_FOOT_SIZE + SIZE_T_ONE, iGrowBy);
+ TInt asize = _ALIGN_UP(nb - m->iTopSize + SYS_ALLOC_PADDING, iGrowBy); // From DLA version 2.8.4
+
+ char* br = (char*)Map(m->iSeg.iBase+m->iSeg.iSize, asize);
+ if (!br)
+ return 0;
+ HEAP_ASSERT(br == (char*)m->iSeg.iBase+m->iSeg.iSize);
+
+ /* Merge with an existing segment */
+ m->iSeg.iSize += asize;
+ InitTop(m, m->iTop, m->iTopSize + asize);
+
+ if (nb < m->iTopSize)
+ { /* Allocate from new or extended iTop space */
+ size_t rsize = m->iTopSize -= nb;
+ mchunkptr p = m->iTop;
+ mchunkptr r = m->iTop = CHUNK_PLUS_OFFSET(p, nb);
+ r->iHead = rsize | PINUSE_BIT;
+ SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(m, p, nb);
+ CHECK_TOP_CHUNK(m, m->iTop);
+ CHECK_MALLOCED_CHUNK(m, CHUNK2MEM(p), nb);
+ return CHUNK2MEM(p);
+ }
+
+ return 0;
+}
+
+
+void RHybridHeap::InitDlMalloc(size_t capacity, int /*locked*/)
+{
+ memset(GM,0,sizeof(malloc_state));
+ // The maximum amount that can be allocated can be calculated as:-
+ // 2^sizeof(size_t) - sizeof(malloc_state) - TOP_FOOT_SIZE - page Size(all accordingly padded)
+ // If the capacity exceeds this, no allocation will be done.
+ GM->iSeg.iBase = iBase;
+ GM->iSeg.iSize = capacity;
+ InitBins(GM);
+ InitTop(GM, (mchunkptr)iBase, capacity - TOP_FOOT_SIZE);
+}
+
+void* RHybridHeap::DlMalloc(size_t bytes)
+{
+ /*
+ Basic algorithm:
+ If a small request (< 256 bytes minus per-chunk overhead):
+ 1. If one exists, use a remainderless chunk in associated smallbin.
+ (Remainderless means that there are too few excess bytes to
+ represent as a chunk.)
+ 2. If it is big enough, use the iDv chunk, which is normally the
+ chunk adjacent to the one used for the most recent small request.
+ 3. If one exists, split the smallest available chunk in a bin,
+ saving remainder in iDv.
+ 4. If it is big enough, use the iTop chunk.
+ 5. If available, get memory from system and use it
+ Otherwise, for a large request:
+ 1. Find the smallest available binned chunk that fits, and use it
+ if it is better fitting than iDv chunk, splitting if necessary.
+ 2. If better fitting than any binned chunk, use the iDv chunk.
+ 3. If it is big enough, use the iTop chunk.
+ 4. If request size >= mmap threshold, try to directly mmap this chunk.
+ 5. If available, get memory from system and use it
+*/
+ void* mem;
+ size_t nb;
+ if (bytes <= MAX_SMALL_REQUEST)
+ {
+ bindex_t idx;
+ binmap_t smallbits;
+ nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : PAD_REQUEST(bytes);
+ idx = SMALL_INDEX(nb);
+ smallbits = GM->iSmallMap >> idx;
+
+ if ((smallbits & 0x3U) != 0)
+ { /* Remainderless fit to a smallbin. */
+ mchunkptr b, p;
+ idx += ~smallbits & 1; /* Uses next bin if idx empty */
+ b = SMALLBIN_AT(GM, idx);
+ p = b->iFd;
+ HEAP_ASSERT(CHUNKSIZE(p) == SMALL_INDEX2SIZE(idx));
+ UnlinkFirstSmallChunk(GM, b, p, idx);
+ SET_INUSE_AND_PINUSE(GM, p, SMALL_INDEX2SIZE(idx));
+ mem = CHUNK2MEM(p);
+ CHECK_MALLOCED_CHUNK(GM, mem, nb);
+ return mem;
+ }
+
+ else if (nb > GM->iDvSize)
+ {
+ if (smallbits != 0)
+ { /* Use chunk in next nonempty smallbin */
+ mchunkptr b, p, r;
+ size_t rsize;
+ bindex_t i;
+ binmap_t leftbits = (smallbits << idx) & LEFT_BITS(IDX2BIT(idx));
+ binmap_t leastbit = LEAST_BIT(leftbits);
+ ComputeBit2idx(leastbit, i);
+ b = SMALLBIN_AT(GM, i);
+ p = b->iFd;
+ HEAP_ASSERT(CHUNKSIZE(p) == SMALL_INDEX2SIZE(i));
+ UnlinkFirstSmallChunk(GM, b, p, i);
+ rsize = SMALL_INDEX2SIZE(i) - nb;
+ /* Fit here cannot be remainderless if 4byte sizes */
+ if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
+ SET_INUSE_AND_PINUSE(GM, p, SMALL_INDEX2SIZE(i));
+ else
+ {
+ SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(GM, p, nb);
+ r = CHUNK_PLUS_OFFSET(p, nb);
+ SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(r, rsize);
+ ReplaceDv(GM, r, rsize);
+ }
+ mem = CHUNK2MEM(p);
+ CHECK_MALLOCED_CHUNK(GM, mem, nb);
+ return mem;
+ }
+
+ else if (GM->iTreeMap != 0 && (mem = TmallocSmall(GM, nb)) != 0)
+ {
+ CHECK_MALLOCED_CHUNK(GM, mem, nb);
+ return mem;
+ }
+ }
+ }
+ else if (bytes >= MAX_REQUEST)
+ nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
+ else
+ {
+ nb = PAD_REQUEST(bytes);
+ if (GM->iTreeMap != 0 && (mem = TmallocLarge(GM, nb)) != 0)
+ {
+ CHECK_MALLOCED_CHUNK(GM, mem, nb);
+ return mem;
+ }
+ }
+
+ if (nb <= GM->iDvSize)
+ {
+ size_t rsize = GM->iDvSize - nb;
+ mchunkptr p = GM->iDv;
+ if (rsize >= MIN_CHUNK_SIZE)
+ { /* split iDv */
+ mchunkptr r = GM->iDv = CHUNK_PLUS_OFFSET(p, nb);
+ GM->iDvSize = rsize;
+ SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(r, rsize);
+ SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(GM, p, nb);
+ }
+ else
+ { /* exhaust iDv */
+ size_t dvs = GM->iDvSize;
+ GM->iDvSize = 0;
+ GM->iDv = 0;
+ SET_INUSE_AND_PINUSE(GM, p, dvs);
+ }
+ mem = CHUNK2MEM(p);
+ CHECK_MALLOCED_CHUNK(GM, mem, nb);
+ return mem;
+ }
+
+ else if (nb < GM->iTopSize)
+ { /* Split iTop */
+ size_t rsize = GM->iTopSize -= nb;
+ mchunkptr p = GM->iTop;
+ mchunkptr r = GM->iTop = CHUNK_PLUS_OFFSET(p, nb);
+ r->iHead = rsize | PINUSE_BIT;
+ SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(GM, p, nb);
+ mem = CHUNK2MEM(p);
+ CHECK_TOP_CHUNK(GM, GM->iTop);
+ CHECK_MALLOCED_CHUNK(GM, mem, nb);
+ return mem;
+ }
+
+ return SysAlloc(GM, nb);
+}
+
+
+void RHybridHeap::DlFree(void* mem)
+{
+ /*
+ Consolidate freed chunks with preceeding or succeeding bordering
+ free chunks, if they exist, and then place in a bin. Intermixed
+ with special cases for iTop, iDv, mmapped chunks, and usage errors.
+*/
+ mchunkptr p = MEM2CHUNK(mem);
+ CHECK_INUSE_CHUNK(GM, p);
+ if (RTCHECK(OK_ADDRESS(GM, p) && OK_CINUSE(p)))
+ {
+ size_t psize = CHUNKSIZE(p);
+ mchunkptr next = CHUNK_PLUS_OFFSET(p, psize);
+ if (!PINUSE(p))
+ {
+ size_t prevsize = p->iPrevFoot;
+ mchunkptr prev = CHUNK_MINUS_OFFSET(p, prevsize);
+ psize += prevsize;
+ p = prev;
+ if (RTCHECK(OK_ADDRESS(GM, prev)))
+ { /* consolidate backward */
+ if (p != GM->iDv)
+ {
+ UnlinkChunk(GM, p, prevsize);
+ }
+ else if ((next->iHead & INUSE_BITS) == INUSE_BITS)
+ {
+ GM->iDvSize = psize;
+ SET_FREE_WITH_PINUSE(p, psize, next);
+ return;
+ }
+ }
+ else
+ {
+ USAGE_ERROR_ACTION(GM, p);
+ return;
+ }
+ }
+
+ if (RTCHECK(OK_NEXT(p, next) && OK_PINUSE(next)))
+ {
+ if (!CINUSE(next))
+ { /* consolidate forward */
+ if (next == GM->iTop)
+ {
+ size_t tsize = GM->iTopSize += psize;
+ GM->iTop = p;
+ p->iHead = tsize | PINUSE_BIT;
+ if (p == GM->iDv)
+ {
+ GM->iDv = 0;
+ GM->iDvSize = 0;
+ }
+ if ( !IS_FIXED_HEAP && SHOULD_TRIM(GM, tsize) )
+ SysTrim(GM, 0);
+ return;
+ }
+ else if (next == GM->iDv)
+ {
+ size_t dsize = GM->iDvSize += psize;
+ GM->iDv = p;
+ SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(p, dsize);
+ return;
+ }
+ else
+ {
+ size_t nsize = CHUNKSIZE(next);
+ psize += nsize;
+ UnlinkChunk(GM, next, nsize);
+ SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(p, psize);
+ if (p == GM->iDv)
+ {
+ GM->iDvSize = psize;
+ return;
+ }
+ }
+ }
+ else
+ SET_FREE_WITH_PINUSE(p, psize, next);
+ InsertChunk(GM, p, psize);
+ CHECK_FREE_CHUNK(GM, p);
+ return;
+ }
+ }
+}
+
+
+void* RHybridHeap::DlRealloc(void* oldmem, size_t bytes, TInt mode)
+{
+ mchunkptr oldp = MEM2CHUNK(oldmem);
+ size_t oldsize = CHUNKSIZE(oldp);
+ mchunkptr next = CHUNK_PLUS_OFFSET(oldp, oldsize);
+ mchunkptr newp = 0;
+ void* extra = 0;
+
+ /* Try to either shrink or extend into iTop. Else malloc-copy-free */
+
+ if (RTCHECK(OK_ADDRESS(GM, oldp) && OK_CINUSE(oldp) &&
+ OK_NEXT(oldp, next) && OK_PINUSE(next)))
+ {
+ size_t nb = REQUEST2SIZE(bytes);
+ if (oldsize >= nb) { /* already big enough */
+ size_t rsize = oldsize - nb;
+ newp = oldp;
+ if (rsize >= MIN_CHUNK_SIZE)
+ {
+ mchunkptr remainder = CHUNK_PLUS_OFFSET(newp, nb);
+ SET_INUSE(GM, newp, nb);
+// SET_INUSE(GM, remainder, rsize);
+ SET_INUSE_AND_PINUSE(GM, remainder, rsize); // corrected in original DLA version V2.8.4
+ extra = CHUNK2MEM(remainder);
+ }
+ }
+ else if (next == GM->iTop && oldsize + GM->iTopSize > nb)
+ {
+ /* Expand into iTop */
+ size_t newsize = oldsize + GM->iTopSize;
+ size_t newtopsize = newsize - nb;
+ mchunkptr newtop = CHUNK_PLUS_OFFSET(oldp, nb);
+ SET_INUSE(GM, oldp, nb);
+ newtop->iHead = newtopsize |PINUSE_BIT;
+ GM->iTop = newtop;
+ GM->iTopSize = newtopsize;
+ newp = oldp;
+ }
+ }
+ else
+ {
+ USAGE_ERROR_ACTION(GM, oldmem);
+ }
+
+ if (newp != 0)
+ {
+ if (extra != 0)
+ {
+ DlFree(extra);
+ }
+ CHECK_INUSE_CHUNK(GM, newp);
+ return CHUNK2MEM(newp);
+ }
+ else
+ {
+ if ( mode & ENeverMove )
+ return 0; // cannot move
+ void* newmem = DlMalloc(bytes);
+ if (newmem != 0)
+ {
+ size_t oc = oldsize - OVERHEAD_FOR(oldp);
+ memcpy(newmem, oldmem, (oc < bytes)? oc : bytes);
+ DlFree(oldmem);
+ }
+ return newmem;
+ }
+ // return 0;
+}
+
+size_t RHybridHeap::DlInfo(struct HeapInfo* i, SWalkInfo* wi) const
+{
+ TInt max = ((GM->iTopSize-1) & ~CHUNK_ALIGN_MASK) - CHUNK_OVERHEAD;
+ if ( max < 0 )
+ max = 0;
+ else ++i->iFreeN; // iTop always free
+ i->iFreeBytes += max;
+
+ Walk(wi, GM->iTop, max, EGoodFreeCell, EDougLeaAllocator); // Introduce DL iTop buffer to the walk function
+
+ for (mchunkptr q = ALIGN_AS_CHUNK(GM->iSeg.iBase); q != GM->iTop; q = NEXT_CHUNK(q))
+ {
+ TInt sz = CHUNKSIZE(q);
+ if (!CINUSE(q))
+ {
+ if ( sz > max )
+ max = sz;
+ i->iFreeBytes += sz;
+ ++i->iFreeN;
+ Walk(wi, CHUNK2MEM(q), sz, EGoodFreeCell, EDougLeaAllocator); // Introduce DL free buffer to the walk function
+ }
+ else
+ {
+ i->iAllocBytes += sz - CHUNK_OVERHEAD;
+ ++i->iAllocN;
+ Walk(wi, CHUNK2MEM(q), (sz- CHUNK_OVERHEAD), EGoodAllocatedCell, EDougLeaAllocator); // Introduce DL allocated buffer to the walk function
+ }
+ }
+ return max; // return largest available chunk size
+}
+
+//
+// get statistics about the state of the allocator
+//
+TInt RHybridHeap::GetInfo(struct HeapInfo* i, SWalkInfo* wi) const
+{
+ memset(i,0,sizeof(HeapInfo));
+ i->iFootprint = iChunkSize;
+ i->iMaxSize = iMaxLength;
+#ifndef __KERNEL_MODE__
+ PagedInfo(i, wi);
+ SlabInfo(i, wi);
+#endif
+ return DlInfo(i,wi);
+}
+
+//
+// Methods to commit/decommit memory pages from chunk
+//
+
+
+void* RHybridHeap::Map(void* p, TInt sz)
+//
+// allocate pages in the chunk
+// if p is NULL, Find an allocate the required number of pages (which must lie in the lower half)
+// otherwise commit the pages specified
+//
+{
+ HEAP_ASSERT(sz > 0);
+
+ if ( iChunkSize + sz > iMaxLength)
+ return 0;
+
+#ifdef __KERNEL_MODE__
+
+ TInt r = ((DChunk*)iChunkHandle)->Adjust(iChunkSize + iOffset + sz);
+ if (r < 0)
+ return 0;
+
+ iChunkSize += sz;
+
+#else
+
+ RChunk chunk;
+ chunk.SetHandle(iChunkHandle);
+ if ( p )
+ {
+ TInt r;
+ if ( iUseAdjust )
+ r = chunk.Adjust(iChunkSize + sz);
+ else
+ {
+ HEAP_ASSERT(sz == Ceiling(sz, iPageSize));
+ HEAP_ASSERT(p == Floor(p, iPageSize));
+ r = chunk.Commit(iOffset + PtrDiff(p, this),sz);
+ }
+ if (r < 0)
+ return 0;
+ }
+ else
+ {
+ TInt r = chunk.Allocate(sz);
+ if (r < 0)
+ return 0;
+ if (r > iOffset)
+ {
+ // can't allow page allocations in DL zone
+ chunk.Decommit(r, sz);
+ return 0;
+ }
+ p = Offset(this, r - iOffset);
+ }
+ iChunkSize += sz;
+
+ if (iChunkSize >= iSlabInitThreshold)
+ { // set up slab system now that heap is large enough
+ SlabConfig(iSlabConfigBits);
+ iSlabInitThreshold = KMaxTInt32;
+ }
+
+#endif // __KERNEL_MODE__
+
+#ifdef ENABLE_BTRACE
+ if(iChunkSize > iHighWaterMark)
+ {
+ iHighWaterMark = Ceiling(iChunkSize,16*iPageSize);
+ TUint32 traceData[6];
+ traceData[0] = iChunkHandle;
+ traceData[1] = iMinLength;
+ traceData[2] = iMaxLength;
+ traceData[3] = sz;
+ traceData[4] = iChunkSize;
+ traceData[5] = iHighWaterMark;
+ BTraceContextN(BTrace::ETest1, 90, (TUint32)this, 33, traceData, sizeof(traceData));
+ }
+#endif
+
+ return p;
+}
+
+void RHybridHeap::Unmap(void* p, TInt sz)
+{
+ HEAP_ASSERT(sz > 0);
+
+#ifdef __KERNEL_MODE__
+
+ (void)p;
+ HEAP_ASSERT(sz == Ceiling(sz, iPageSize));
+#if defined(_DEBUG)
+ TInt r =
+#endif
+ ((DChunk*)iChunkHandle)->Adjust(iChunkSize + iOffset - sz);
+ HEAP_ASSERT(r >= 0);
+
+#else
+
+ RChunk chunk;
+ chunk.SetHandle(iChunkHandle);
+ if ( iUseAdjust )
+ {
+ HEAP_ASSERT(sz == Ceiling(sz, iPageSize));
+#if defined(_DEBUG)
+ TInt r =
+#endif
+ chunk.Adjust(iChunkSize - sz);
+ HEAP_ASSERT(r >= 0);
+ }
+ else
+ {
+ HEAP_ASSERT(sz == Ceiling(sz, iPageSize));
+ HEAP_ASSERT(p == Floor(p, iPageSize));
+#if defined(_DEBUG)
+ TInt r =
+#endif
+ chunk.Decommit(PtrDiff(p, Offset(this,-iOffset)), sz);
+ HEAP_ASSERT(r >= 0);
+ }
+#endif // __KERNEL_MODE__
+
+ iChunkSize -= sz;
+}
+
+
+#ifndef __KERNEL_MODE__
+//
+// Slab allocator code
+//
+
+//inline slab* slab::SlabFor(void* p)
+slab* slab::SlabFor( const void* p)
+{
+ return (slab*)(Floor(p, SLABSIZE));
+}
+
+//
+// Remove slab s from its tree/heap (not necessarily the root), preserving the address order
+// invariant of the heap
+//
+void RHybridHeap::TreeRemove(slab* s)
+{
+ slab** r = s->iParent;
+ slab* c1 = s->iChild1;
+ slab* c2 = s->iChild2;
+ for (;;)
+ {
+ if (!c2)
+ {
+ *r = c1;
+ if (c1)
+ c1->iParent = r;
+ return;
+ }
+ if (!c1)
+ {
+ *r = c2;
+ c2->iParent = r;
+ return;
+ }
+ if (c1 > c2)
+ {
+ slab* c3 = c1;
+ c1 = c2;
+ c2 = c3;
+ }
+ slab* newc2 = c1->iChild2;
+ *r = c1;
+ c1->iParent = r;
+ c1->iChild2 = c2;
+ c2->iParent = &c1->iChild2;
+ s = c1;
+ c1 = s->iChild1;
+ c2 = newc2;
+ r = &s->iChild1;
+ }
+}
+//
+// Insert slab s into the tree/heap rooted at r, preserving the address ordering
+// invariant of the heap
+//
+void RHybridHeap::TreeInsert(slab* s,slab** r)
+{
+ slab* n = *r;
+ for (;;)
+ {
+ if (!n)
+ { // tree empty
+ *r = s;
+ s->iParent = r;
+ s->iChild1 = s->iChild2 = 0;
+ break;
+ }
+ if (s < n)
+ { // insert between iParent and n
+ *r = s;
+ s->iParent = r;
+ s->iChild1 = n;
+ s->iChild2 = 0;
+ n->iParent = &s->iChild1;
+ break;
+ }
+ slab* c1 = n->iChild1;
+ slab* c2 = n->iChild2;
+ if ((c1 - 1) > (c2 - 1))
+ {
+ r = &n->iChild1;
+ n = c1;
+ }
+ else
+ {
+ r = &n->iChild2;
+ n = c2;
+ }
+ }
+}
+
+void* RHybridHeap::AllocNewSlab(slabset& allocator)
+//
+// Acquire and initialise a new slab, returning a cell from the slab
+// The strategy is:
+// 1. Use the lowest address free slab, if available. This is done by using the lowest slab
+// in the page at the root of the iPartialPage heap (which is address ordered). If the
+// is now fully used, remove it from the iPartialPage heap.
+// 2. Allocate a new page for iSlabs if no empty iSlabs are available
+//
+{
+ page* p = page::PageFor(iPartialPage);
+ if (!p)
+ return AllocNewPage(allocator);
+
+ unsigned h = p->iSlabs[0].iHeader;
+ unsigned pagemap = SlabHeaderPagemap(h);
+ HEAP_ASSERT(&p->iSlabs[HIBIT(pagemap)] == iPartialPage);
+
+ unsigned slabix = LOWBIT(pagemap);
+ p->iSlabs[0].iHeader = h &~ (0x100<<slabix);
+ if (!(pagemap &~ (1<<slabix)))
+ {
+ TreeRemove(iPartialPage); // last free slab in page
+ }
+
+ return InitNewSlab(allocator, &p->iSlabs[slabix]);
+}
+
+/**Defination of this functionis not there in proto code***/
+#if 0
+void RHybridHeap::partial_insert(slab* s)
+{
+ // slab has had first cell freed and needs to be linked back into iPartial tree
+ slabset& ss = iSlabAlloc[iSizeMap[s->clz]];
+
+ HEAP_ASSERT(s->used == slabfull);
+ s->used = ss.fulluse - s->clz; // full-1 loading
+ TreeInsert(s,&ss.iPartial);
+ CHECKTREE(&ss.iPartial);
+}
+/**Defination of this functionis not there in proto code***/
+#endif
+
+void* RHybridHeap::AllocNewPage(slabset& allocator)
+//
+// Acquire and initialise a new page, returning a cell from a new slab
+// The iPartialPage tree is empty (otherwise we'd have used a slab from there)
+// The iPartialPage link is put in the highest addressed slab in the page, and the
+// lowest addressed slab is used to fulfill the allocation request
+//
+{
+ page* p = iSparePage;
+ if (p)
+ iSparePage = 0;
+ else
+ {
+ p = static_cast<page*>(Map(0, iPageSize));
+ if (!p)
+ return 0;
+ }
+ HEAP_ASSERT(p == Floor(p, iPageSize));
+ // Store page allocated for slab into paged_bitmap (for RHybridHeap::Reset())
+ if (!PagedSetSize(p, iPageSize))
+ {
+ Unmap(p, iPageSize);
+ return 0;
+ }
+ p->iSlabs[0].iHeader = ((1<<3) + (1<<2) + (1<<1))<<8; // set pagemap
+ p->iSlabs[3].iParent = &iPartialPage;
+ p->iSlabs[3].iChild1 = p->iSlabs[3].iChild2 = 0;
+ iPartialPage = &p->iSlabs[3];
+ return InitNewSlab(allocator,&p->iSlabs[0]);
+}
+
+void RHybridHeap::FreePage(page* p)
+//
+// Release an unused page to the OS
+// A single page is cached for reuse to reduce thrashing
+// the OS allocator.
+//
+{
+ HEAP_ASSERT(Ceiling(p, iPageSize) == p);
+ if (!iSparePage)
+ {
+ iSparePage = p;
+ return;
+ }
+
+ // unmapped slab page must be cleared from paged_bitmap, too
+ PagedZapSize(p, iPageSize); // clear page map
+
+ Unmap(p, iPageSize);
+}
+
+void RHybridHeap::FreeSlab(slab* s)
+//
+// Release an empty slab to the slab manager
+// The strategy is:
+// 1. The page containing the slab is checked to see the state of the other iSlabs in the page by
+// inspecting the pagemap field in the iHeader of the first slab in the page.
+// 2. The pagemap is updated to indicate the new unused slab
+// 3. If this is the only unused slab in the page then the slab iHeader is used to add the page to
+// the iPartialPage tree/heap
+// 4. If all the iSlabs in the page are now unused the page is release back to the OS
+// 5. If this slab has a higher address than the one currently used to track this page in
+// the iPartialPage heap, the linkage is moved to the new unused slab
+//
+{
+ TreeRemove(s);
+ CHECKTREE(s->iParent);
+ HEAP_ASSERT(SlabHeaderUsedm4(s->iHeader) == SlabHeaderSize(s->iHeader)-4);
+
+ page* p = page::PageFor(s);
+ unsigned h = p->iSlabs[0].iHeader;
+ int slabix = s - &p->iSlabs[0];
+ unsigned pagemap = SlabHeaderPagemap(h);
+ p->iSlabs[0].iHeader = h | (0x100<<slabix);
+ if (pagemap == 0)
+ { // page was full before, use this slab as link in empty heap
+ TreeInsert(s, &iPartialPage);
+ }
+ else
+ { // Find the current empty-link slab
+ slab* sl = &p->iSlabs[HIBIT(pagemap)];
+ pagemap ^= (1<<slabix);
+ if (pagemap == 0xf)
+ { // page is now empty so recycle page to os
+ TreeRemove(sl);
+ FreePage(p);
+ return;
+ }
+ // ensure the free list link is in highest address slab in page
+ if (s > sl)
+ { // replace current link with new one. Address-order tree so position stays the same
+ slab** r = sl->iParent;
+ slab* c1 = sl->iChild1;
+ slab* c2 = sl->iChild2;
+ s->iParent = r;
+ s->iChild1 = c1;
+ s->iChild2 = c2;
+ *r = s;
+ if (c1)
+ c1->iParent = &s->iChild1;
+ if (c2)
+ c2->iParent = &s->iChild2;
+ }
+ CHECK(if (s < sl) s=sl);
+ }
+ HEAP_ASSERT(SlabHeaderPagemap(p->iSlabs[0].iHeader) != 0);
+ HEAP_ASSERT(HIBIT(SlabHeaderPagemap(p->iSlabs[0].iHeader)) == unsigned(s - &p->iSlabs[0]));
+}
+
+
+void RHybridHeap::SlabInit()
+{
+ iSlabThreshold=0;
+ iPartialPage = 0;
+ iFullSlab = 0;
+ iSparePage = 0;
+ memset(&iSizeMap[0],0xff,sizeof(iSizeMap));
+ memset(&iSlabAlloc[0],0,sizeof(iSlabAlloc));
+}
+
+void RHybridHeap::SlabConfig(unsigned slabbitmap)
+{
+ HEAP_ASSERT((slabbitmap & ~EOkBits) == 0);
+ HEAP_ASSERT(MAXSLABSIZE <= 60);
+
+ unsigned int ix = 0xff;
+ unsigned int bit = 1<<((MAXSLABSIZE>>2)-1);
+ for (int sz = MAXSLABSIZE; sz >= 0; sz -= 4, bit >>= 1)
+ {
+ if (slabbitmap & bit)
+ {
+ if (ix == 0xff)
+ iSlabThreshold=sz+1;
+ ix = (sz>>2)-1;
+ }
+ iSizeMap[sz>>2] = (TUint8) ix;
+ }
+}
+
+
+void* RHybridHeap::SlabAllocate(slabset& ss)
+//
+// Allocate a cell from the given slabset
+// Strategy:
+// 1. Take the partially full slab at the iTop of the heap (lowest address).
+// 2. If there is no such slab, allocate from a new slab
+// 3. If the slab has a non-empty freelist, pop the cell from the front of the list and update the slab
+// 4. Otherwise, if the slab is not full, return the cell at the end of the currently used region of
+// the slab, updating the slab
+// 5. Otherwise, release the slab from the iPartial tree/heap, marking it as 'floating' and go back to
+// step 1
+//
+{
+ for (;;)
+ {
+ slab *s = ss.iPartial;
+ if (!s)
+ break;
+ unsigned h = s->iHeader;
+ unsigned free = h & 0xff; // extract free cell positioning
+ if (free)
+ {
+ HEAP_ASSERT(((free<<2)-sizeof(slabhdr))%SlabHeaderSize(h) == 0);
+ void* p = Offset(s,free<<2);
+ free = *(unsigned char*)p; // get next pos in free list
+ h += (h&0x3C000)<<6; // update usedm4
+ h &= ~0xff;
+ h |= free; // update freelist
+ s->iHeader = h;
+ HEAP_ASSERT(SlabHeaderFree(h) == 0 || ((SlabHeaderFree(h)<<2)-sizeof(slabhdr))%SlabHeaderSize(h) == 0);
+ HEAP_ASSERT(SlabHeaderUsedm4(h) <= 0x3F8u);
+ HEAP_ASSERT((SlabHeaderUsedm4(h)+4)%SlabHeaderSize(h) == 0);
+ return p;
+ }
+ unsigned h2 = h + ((h&0x3C000)<<6);
+// if (h2 < 0xfc00000)
+ if (h2 < MAXUSEDM4BITS)
+ {
+ HEAP_ASSERT((SlabHeaderUsedm4(h2)+4)%SlabHeaderSize(h2) == 0);
+ s->iHeader = h2;
+ return Offset(s,(h>>18) + sizeof(unsigned) + sizeof(slabhdr));
+ }
+ h |= FLOATING_BIT; // mark the slab as full-floating
+ s->iHeader = h;
+ TreeRemove(s);
+ slab* c = iFullSlab; // add to full list
+ iFullSlab = s;
+ s->iParent = &iFullSlab;
+ s->iChild1 = c;
+ s->iChild2 = 0;
+ if (c)
+ c->iParent = &s->iChild1;
+
+ CHECKTREE(&ss.iPartial);
+ // go back and try the next slab...
+ }
+ // no iPartial iSlabs found, so allocate from a new slab
+ return AllocNewSlab(ss);
+}
+
+void RHybridHeap::SlabFree(void* p)
+//
+// Free a cell from the slab allocator
+// Strategy:
+// 1. Find the containing slab (round down to nearest 1KB boundary)
+// 2. Push the cell into the slab's freelist, and update the slab usage count
+// 3. If this is the last allocated cell, free the slab to the main slab manager
+// 4. If the slab was full-floating then insert the slab in it's respective iPartial tree
+//
+{
+ HEAP_ASSERT(LowBits(p,3)==0);
+ slab* s = slab::SlabFor(p);
+ CHECKSLAB(s,ESlabAllocator,p);
+ CHECKSLABBFR(s,p);
+
+ unsigned pos = LowBits(p, SLABSIZE);
+ unsigned h = s->iHeader;
+ HEAP_ASSERT(SlabHeaderUsedm4(h) != 0x3fC); // slab is empty already
+ HEAP_ASSERT((pos-sizeof(slabhdr))%SlabHeaderSize(h) == 0);
+ *(unsigned char*)p = (unsigned char)h;
+ h &= ~0xFF;
+ h |= (pos>>2);
+ unsigned size = h & 0x3C000;
+ if (int(h) >= 0)
+ {
+ h -= size<<6;
+ if (int(h)>=0)
+ {
+ s->iHeader = h;
+ return;
+ }
+ FreeSlab(s);
+ return;
+ }
+ h -= size<<6;
+ h &= ~FLOATING_BIT;
+ s->iHeader = h;
+ slab** full = s->iParent; // remove from full list
+ slab* c = s->iChild1;
+ *full = c;
+ if (c)
+ c->iParent = full;
+
+ slabset& ss = iSlabAlloc[iSizeMap[size>>14]];
+ TreeInsert(s,&ss.iPartial);
+ CHECKTREE(&ss.iPartial);
+}
+
+void* RHybridHeap::InitNewSlab(slabset& allocator, slab* s)
+//
+// initialise an empty slab for this allocator and return the fist cell
+// pre-condition: the slabset has no iPartial iSlabs for allocation
+//
+{
+ HEAP_ASSERT(allocator.iPartial==0);
+ TInt size = 4 + ((&allocator-&iSlabAlloc[0])<<2); // infer size from slab allocator address
+ unsigned h = s->iHeader & 0xF00; // preserve pagemap only
+ h |= (size<<12); // set size
+ h |= (size-4)<<18; // set usedminus4 to one object minus 4
+ s->iHeader = h;
+ allocator.iPartial = s;
+ s->iParent = &allocator.iPartial;
+ s->iChild1 = s->iChild2 = 0;
+ return Offset(s,sizeof(slabhdr));
+}
+
+const unsigned char slab_bitcount[16] = {0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4};
+
+const unsigned char slab_ext_frag[16] =
+{
+ 0,
+ 16 + (1008 % 4),
+ 16 + (1008 % 8),
+ 16 + (1008 % 12),
+ 16 + (1008 % 16),
+ 16 + (1008 % 20),
+ 16 + (1008 % 24),
+ 16 + (1008 % 28),
+ 16 + (1008 % 32),
+ 16 + (1008 % 36),
+ 16 + (1008 % 40),
+ 16 + (1008 % 44),
+ 16 + (1008 % 48),
+ 16 + (1008 % 52),
+ 16 + (1008 % 56),
+ 16 + (1008 % 60)
+};
+
+void RHybridHeap::TreeWalk(slab* const* root, void (*f)(slab*, struct HeapInfo*, SWalkInfo*), struct HeapInfo* i, SWalkInfo* wi)
+{
+ // iterative walk around the tree at root
+
+ slab* s = *root;
+ if (!s)
+ return;
+
+ for (;;)
+ {
+ slab* c;
+ while ((c = s->iChild1) != 0)
+ s = c; // walk down left side to end
+ for (;;)
+ {
+ f(s, i, wi);
+ c = s->iChild2;
+ if (c)
+ { // one step down right side, now try and walk down left
+ s = c;
+ break;
+ }
+ for (;;)
+ { // loop to walk up right side
+ slab** pp = s->iParent;
+ if (pp == root)
+ return;
+ s = slab::SlabFor(pp);
+ if (pp == &s->iChild1)
+ break;
+ }
+ }
+ }
+}
+
+void RHybridHeap::SlabEmptyInfo(slab* s, struct HeapInfo* i, SWalkInfo* wi)
+{
+ Walk(wi, s, SLABSIZE, EGoodFreeCell, EEmptySlab); // Introduce an empty slab to the walk function
+ int nslab = slab_bitcount[SlabHeaderPagemap(page::PageFor(s)->iSlabs[0].iHeader)];
+ i->iFreeN += nslab;
+ i->iFreeBytes += nslab << SLABSHIFT;
+}
+
+void RHybridHeap::SlabPartialInfo(slab* s, struct HeapInfo* i, SWalkInfo* wi)
+{
+ Walk(wi, s, SLABSIZE, EGoodAllocatedCell, EPartialFullSlab); // Introduce a full slab to the walk function
+ unsigned h = s->iHeader;
+ unsigned used = SlabHeaderUsedm4(h)+4;
+ unsigned size = SlabHeaderSize(h);
+ unsigned free = 1024 - slab_ext_frag[size>>2] - used;
+ i->iFreeN += (free/size);
+ i->iFreeBytes += free;
+ i->iAllocN += (used/size);
+ i->iAllocBytes += used;
+}
+
+void RHybridHeap::SlabFullInfo(slab* s, struct HeapInfo* i, SWalkInfo* wi)
+{
+ Walk(wi, s, SLABSIZE, EGoodAllocatedCell, EFullSlab); // Introduce a full slab to the walk function
+ unsigned h = s->iHeader;
+ unsigned used = SlabHeaderUsedm4(h)+4;
+ unsigned size = SlabHeaderSize(h);
+ HEAP_ASSERT(1024 - slab_ext_frag[size>>2] - used == 0);
+ i->iAllocN += (used/size);
+ i->iAllocBytes += used;
+}
+
+void RHybridHeap::SlabInfo(struct HeapInfo* i, SWalkInfo* wi) const
+{
+ if (iSparePage)
+ {
+ i->iFreeBytes += iPageSize;
+ i->iFreeN = 4;
+ Walk(wi, iSparePage, iPageSize, EGoodFreeCell, ESlabSpare); // Introduce Slab spare page to the walk function
+ }
+ TreeWalk(&iFullSlab, &SlabFullInfo, i, wi);
+ for (int ix = 0; ix < (MAXSLABSIZE>>2); ++ix)
+ TreeWalk(&iSlabAlloc[ix].iPartial, &SlabPartialInfo, i, wi);
+ TreeWalk(&iPartialPage, &SlabEmptyInfo, i, wi);
+}
+
+
+//
+// Bitmap class implementation for large page allocator
+//
+inline unsigned char* paged_bitmap::Addr() const {return iBase;}
+inline unsigned paged_bitmap::Size() const {return iNbits;}
+//
+
+void paged_bitmap::Init(unsigned char* p, unsigned size, unsigned bit)
+{
+ iBase = p;
+ iNbits=size;
+ int bytes=Ceiling(size,8)>>3;
+ memset(p,bit?0xff:0,bytes);
+}
+
+inline void paged_bitmap::Set(unsigned ix, unsigned bit)
+{
+ if (bit)
+ iBase[ix>>3] |= (1<<(ix&7));
+ else
+ iBase[ix>>3] &= ~(1<<(ix&7));
+}
+
+inline unsigned paged_bitmap::operator[](unsigned ix) const
+{
+ return 1U&(iBase[ix>>3] >> (ix&7));
+}
+
+void paged_bitmap::Setn(unsigned ix, unsigned len, unsigned bit)
+{
+ int l=len;
+ while (--l>=0)
+ Set(ix++,bit);
+}
+
+void paged_bitmap::Set(unsigned ix, unsigned len, unsigned val)
+{
+ int l=len;
+ while (--l>=0)
+ {
+ Set(ix++,val&1);
+ val>>=1;
+ }
+}
+
+unsigned paged_bitmap::Bits(unsigned ix, unsigned len) const
+{
+ int l=len;
+ unsigned val=0;
+ unsigned bit=0;
+ while (--l>=0)
+ val |= (*this)[ix++]<<bit++;
+ return val;
+}
+
+bool paged_bitmap::Is(unsigned ix, unsigned len, unsigned bit) const
+{
+ unsigned i2 = ix+len;
+ if (i2 > iNbits)
+ return false;
+ for (;;)
+ {
+ if ((*this)[ix] != bit)
+ return false;
+ if (++ix==i2)
+ return true;
+ }
+}
+
+int paged_bitmap::Find(unsigned start, unsigned bit) const
+{
+ if (start<iNbits) do
+ {
+ if ((*this)[start]==bit)
+ return start;
+ } while (++start<iNbits);
+ return -1;
+}
+
+
+//
+// Page allocator code
+//
+void RHybridHeap::PagedInit(TInt aPagePower)
+{
+ if (aPagePower > 0)
+ {
+ if (aPagePower < MINPAGEPOWER)
+ aPagePower = MINPAGEPOWER;
+ }
+ else aPagePower = 31;
+
+ iPageThreshold = aPagePower;
+ /*-------------------------------------------------------------
+ * Initialize page bitmap
+ *-------------------------------------------------------------*/
+ iPageMap.Init((unsigned char*)&iBitMapBuffer, MAXSMALLPAGEBITS, 0);
+}
+
+void* RHybridHeap::PagedAllocate(unsigned size)
+{
+ TInt nbytes = Ceiling(size, iPageSize);
+ void* p = Map(0, nbytes);
+ if (!p)
+ return 0;
+ if (!PagedSetSize(p, nbytes))
+ {
+ Unmap(p, nbytes);
+ return 0;
+ }
+ return p;
+}
+
+void* RHybridHeap::PagedReallocate(void* p, unsigned size, TInt mode)
+{
+
+ HEAP_ASSERT(Ceiling(p, iPageSize) == p);
+ unsigned nbytes = Ceiling(size, iPageSize);
+
+ unsigned osize = PagedSize(p);
+ if ( nbytes == 0 ) // Special case to handle shrinking below min page threshold
+ nbytes = Min((1 << MINPAGEPOWER), osize);
+
+ if (osize == nbytes)
+ return p;
+
+ if (nbytes < osize)
+ { // shrink in place, unmap final pages and rewrite the pagemap
+ Unmap(Offset(p, nbytes), osize-nbytes);
+ // zap old code and then write new code (will not fail)
+ PagedZapSize(p, osize);
+
+ TBool check = PagedSetSize(p, nbytes);
+ __ASSERT_ALWAYS(check, HEAP_PANIC(ETHeapBadCellAddress));
+
+ return p;
+ }
+
+ // nbytes > osize
+ // try and extend current region first
+
+ void* newp = Map(Offset(p, osize), nbytes-osize);
+ if (newp)
+ { // In place growth. Possibility that pagemap may have to grow AND then fails
+ if (!PagedSetSize(p, nbytes))
+ { // must release extra mapping
+ Unmap(Offset(p, osize), nbytes-osize);
+ return 0;
+ }
+ // if successful, the new length code will have overwritten the old one (it is at least as long)
+ return p;
+ }
+
+ // fallback to allocate/copy/free
+ if (mode & ENeverMove)
+ return 0; // not allowed to move cell
+
+ newp = PagedAllocate(nbytes);
+ if (!newp)
+ return 0;
+ memcpy(newp, p, osize);
+ PagedFree(p);
+ return newp;
+}
+
+void RHybridHeap::PagedFree(void* p)
+{
+ HEAP_ASSERT(Ceiling(p, iPageSize) == p);
+
+
+ unsigned size = PagedSize(p);
+
+ PagedZapSize(p, size); // clear page map
+ Unmap(p, size);
+}
+
+void RHybridHeap::PagedInfo(struct HeapInfo* i, SWalkInfo* wi) const
+{
+ for (int ix = 0;(ix = iPageMap.Find(ix,1)) >= 0;)
+ {
+ int npage = PagedDecode(ix);
+ // Introduce paged buffer to the walk function
+ TAny* bfr = Bitmap2addr(ix);
+ int len = npage << PAGESHIFT;
+ if ( len > iPageSize )
+ { // If buffer is not larger than one page it must be a slab page mapped into bitmap
+ i->iAllocBytes += len;
+ ++i->iAllocN;
+ Walk(wi, bfr, len, EGoodAllocatedCell, EPageAllocator);
+ }
+ ix += (npage<<1);
+ }
+}
+
+void RHybridHeap::ResetBitmap()
+/*---------------------------------------------------------
+ * Go through paged_bitmap and unmap all buffers to system
+ * This method is called from RHybridHeap::Reset() to unmap all page
+ * allocated - and slab pages which are stored in bitmap, too
+ *---------------------------------------------------------*/
+{
+ unsigned iNbits = iPageMap.Size();
+ if ( iNbits )
+ {
+ for (int ix = 0;(ix = iPageMap.Find(ix,1)) >= 0;)
+ {
+ int npage = PagedDecode(ix);
+ void* p = Bitmap2addr(ix);
+ unsigned size = PagedSize(p);
+ PagedZapSize(p, size); // clear page map
+ Unmap(p, size);
+ ix += (npage<<1);
+ }
+ if ( (TInt)iNbits > MAXSMALLPAGEBITS )
+ {
+ // unmap page reserved for enlarged bitmap
+ Unmap(iPageMap.Addr(), (iNbits >> 3) );
+ }
+ }
+}
+
+TBool RHybridHeap::CheckBitmap(void* aBfr, TInt aSize, TUint32& aDummy, TInt& aNPages)
+/*---------------------------------------------------------
+ * If aBfr = NULL
+ * Go through paged_bitmap and unmap all buffers to system
+ * and assure that by reading the first word of each page of aBfr
+ * that aBfr is still accessible
+ * else
+ * Assure that specified buffer is mapped with correct length in
+ * page map
+ *---------------------------------------------------------*/
+{
+ TBool ret;
+ if ( aBfr )
+ {
+ __ASSERT_ALWAYS((Ceiling(aBfr, iPageSize) == aBfr), HEAP_PANIC(ETHeapBadCellAddress));
+ ret = ( aSize == (TInt)PagedSize(aBfr));
+ }
+ else
+ {
+ ret = ETrue;
+ unsigned iNbits = iPageMap.Size();
+ if ( iNbits )
+ {
+ TInt npage;
+ aNPages = 0;
+ for (int ix = 0;(ix = iPageMap.Find(ix,1)) >= 0;)
+ {
+ npage = PagedDecode(ix);
+ aNPages += npage;
+ void* p = Bitmap2addr(ix);
+ __ASSERT_ALWAYS((Ceiling(p, iPageSize) == p), HEAP_PANIC(ETHeapBadCellAddress));
+ unsigned s = PagedSize(p);
+ __ASSERT_ALWAYS((Ceiling(s, iPageSize) == s), HEAP_PANIC(ETHeapBadCellAddress));
+ while ( s )
+ {
+ aDummy += *(TUint32*)((TUint8*)p + (s-iPageSize));
+ s -= iPageSize;
+ }
+ ix += (npage<<1);
+ }
+ if ( (TInt)iNbits > MAXSMALLPAGEBITS )
+ {
+ // add enlarged bitmap page(s) to total page count
+ npage = (iNbits >> 3);
+ __ASSERT_ALWAYS((Ceiling(npage, iPageSize) == npage), HEAP_PANIC(ETHeapBadCellAddress));
+ aNPages += (npage / iPageSize);
+ }
+ }
+ }
+
+ return ret;
+}
+
+
+// The paged allocations are tracked in a bitmap which has 2 bits per page
+// this allows us to store allocations as small as 4KB
+// The presence and size of an allocation is encoded as follows:
+// let N = number of pages in the allocation, then
+// 10 : N = 1 // 4KB
+// 110n : N = 2 + n // 8-12KB
+// 1110nnnn : N = nnnn // 16-60KB
+// 1111n[18] : N = n[18] // 64KB-1GB
+
+const struct etab { unsigned char offset, len, codelen, code;} encode_table[] =
+{
+ {1,2,2,0x1},
+ {2,4,3,0x3},
+ {0,8,4,0x7},
+ {0,22,4,0xf}
+};
+
+// Return code length for specified allocation Size(assumed to be aligned to pages)
+inline unsigned paged_codelen(unsigned size, unsigned pagesz)
+{
+ HEAP_ASSERT(size == Ceiling(size, pagesz));
+
+ if (size == pagesz)
+ return 2;
+ else if (size < 4*pagesz)
+ return 4;
+ else if (size < 16*pagesz)
+ return 8;
+ else
+ return 22;
+}
+
+inline const etab& paged_coding(unsigned npage)
+{
+ if (npage < 4)
+ return encode_table[npage>>1];
+ else if (npage < 16)
+ return encode_table[2];
+ else
+ return encode_table[3];
+}
+
+bool RHybridHeap::PagedEncode(unsigned pos, unsigned npage)
+{
+ const etab& e = paged_coding(npage);
+ if (pos + e.len > iPageMap.Size())
+ {
+ // need to grow the page bitmap to fit the cell length into the map
+ // if we outgrow original bitmap buffer in RHybridHeap metadata, then just get enough pages to cover the full space:
+ // * initial 68 byte bitmap mapped (68*8*4kB):2 = 1,1MB
+ // * 4KB can Map(4096*8*4kB):2 = 64MB
+ unsigned maxsize = Ceiling(iMaxLength, iPageSize);
+ unsigned mapbits = maxsize >> (PAGESHIFT-1);
+ maxsize = Ceiling(mapbits>>3, iPageSize);
+ void* newb = Map(0, maxsize);
+ if (!newb)
+ return false;
+
+ unsigned char* oldb = iPageMap.Addr();
+ iPageMap.Init((unsigned char*)newb, (maxsize<<3), 0);
+ memcpy(newb, oldb, Ceiling(MAXSMALLPAGEBITS,8)>>3);
+ }
+ // encode the allocation block size into the bitmap, starting at the bit for the start page
+ unsigned bits = e.code;
+ bits |= (npage - e.offset) << e.codelen;
+ iPageMap.Set(pos, e.len, bits);
+ return true;
+}
+
+unsigned RHybridHeap::PagedDecode(unsigned pos) const
+{
+ __ASSERT_ALWAYS(pos + 2 <= iPageMap.Size(), HEAP_PANIC(ETHeapBadCellAddress));
+
+ unsigned bits = iPageMap.Bits(pos,2);
+ __ASSERT_ALWAYS(bits & 1, HEAP_PANIC(ETHeapBadCellAddress));
+ bits >>= 1;
+ if (bits == 0)
+ return 1;
+ __ASSERT_ALWAYS(pos + 4 <= iPageMap.Size(), HEAP_PANIC(ETHeapBadCellAddress));
+ bits = iPageMap.Bits(pos+2,2);
+ if ((bits & 1) == 0)
+ return 2 + (bits>>1);
+ else if ((bits>>1) == 0)
+ {
+ __ASSERT_ALWAYS(pos + 8 <= iPageMap.Size(), HEAP_PANIC(ETHeapBadCellAddress));
+ return iPageMap.Bits(pos+4, 4);
+ }
+ else
+ {
+ __ASSERT_ALWAYS(pos + 22 <= iPageMap.Size(), HEAP_PANIC(ETHeapBadCellAddress));
+ return iPageMap.Bits(pos+4, 18);
+ }
+}
+
+inline void RHybridHeap::PagedZapSize(void* p, unsigned size)
+{iPageMap.Setn(PtrDiff(p, iMemBase) >> (PAGESHIFT-1), paged_codelen(size, iPageSize) ,0);}
+
+inline unsigned RHybridHeap::PagedSize(void* p) const
+ { return PagedDecode(PtrDiff(p, iMemBase) >> (PAGESHIFT-1)) << PAGESHIFT; }
+
+inline bool RHybridHeap::PagedSetSize(void* p, unsigned size)
+{ return PagedEncode(PtrDiff(p, iMemBase) >> (PAGESHIFT-1), size >> PAGESHIFT); }
+
+inline void* RHybridHeap::Bitmap2addr(unsigned pos) const
+ { return iMemBase + (1 << (PAGESHIFT-1))*pos; }
+
+
+#ifndef QT_SYMBIAN4_ALLOCATOR_UNWANTED_CODE
+//////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
+/**
+Constructor where minimum and maximum length of the heap can be defined.
+It defaults the chunk heap to be created to have use a new local chunk,
+to have a grow by value of KMinHeapGrowBy, to be unaligned, not to be
+single threaded and not to have any mode flags set.
+
+@param aMinLength The minimum length of the heap to be created.
+@param aMaxLength The maximum length to which the heap to be created can grow.
+ If the supplied value is less than a page size, then it
+ is discarded and the page size is used instead.
+*/
+EXPORT_C TChunkHeapCreateInfo::TChunkHeapCreateInfo(TInt aMinLength, TInt aMaxLength) :
+ iVersionNumber(EVersion0), iMinLength(aMinLength), iMaxLength(aMaxLength),
+iAlign(0), iGrowBy(1), iSingleThread(EFalse),
+iOffset(0), iPaging(EUnspecified), iMode(0), iName(NULL)
+{
+}
+
+
+/**
+Sets the chunk heap to create a new chunk with the specified name.
+
+This overriddes any previous call to TChunkHeapCreateInfo::SetNewChunkHeap() or
+TChunkHeapCreateInfo::SetExistingChunkHeap() for this TChunkHeapCreateInfo object.
+
+@param aName The name to be given to the chunk heap to be created
+If NULL, the function constructs a local chunk to host the heap.
+If not NULL, a pointer to a descriptor containing the name to be
+assigned to the global chunk hosting the heap.
+*/
+EXPORT_C void TChunkHeapCreateInfo::SetCreateChunk(const TDesC* aName)
+{
+ iName = (TDesC*)aName;
+ iChunk.SetHandle(KNullHandle);
+}
+
+
+/**
+Sets the chunk heap to be created to use the chunk specified.
+
+This overriddes any previous call to TChunkHeapCreateInfo::SetNewChunkHeap() or
+TChunkHeapCreateInfo::SetExistingChunkHeap() for this TChunkHeapCreateInfo object.
+
+@param aChunk A handle to the chunk to use for the heap.
+*/
+EXPORT_C void TChunkHeapCreateInfo::SetUseChunk(const RChunk aChunk)
+{
+ iName = NULL;
+ iChunk = aChunk;
+}
+
+EXPORT_C RHeap* UserHeap::FixedHeap(TAny* aBase, TInt aMaxLength, TInt aAlign, TBool aSingleThread)
+/**
+Creates a fixed length heap at a specified location.
+
+On successful return from this function, the heap is ready to use. This assumes that
+the memory pointed to by aBase is mapped and able to be used. You must ensure that you
+pass in a large enough value for aMaxLength. Passing in a value that is too small to
+hold the metadata for the heap (~1 KB) will result in the size being rounded up and the
+heap thereby running over the end of the memory assigned to it. But then if you were to
+pass in such as small value then you would not be able to do any allocations from the
+heap anyway. Moral of the story: Use a sensible value for aMaxLength!
+
+@param aBase A pointer to the location where the heap is to be constructed.
+@param aMaxLength The maximum length in bytes to which the heap can grow. If the
+ supplied value is too small to hold the heap's metadata, it
+ will be increased.
+@param aAlign From Symbian^4 onwards, this value is ignored but EABI 8
+ byte alignment is guaranteed for all allocations 8 bytes or
+ more in size. 4 byte allocations will be aligned to a 4
+ byte boundary. Best to pass in zero.
+@param aSingleThread EFalse if the heap is to be accessed from multiple threads.
+ This will cause internal locks to be created, guaranteeing
+ thread safety.
+
+@return A pointer to the new heap, or NULL if the heap could not be created.
+
+@panic USER 56 if aMaxLength is negative.
+*/
+{
+ __ASSERT_ALWAYS( aMaxLength>=0, ::Panic(ETHeapMaxLengthNegative));
+ if ( aMaxLength < (TInt)sizeof(RHybridHeap) )
+ aMaxLength = sizeof(RHybridHeap);
+
+ RHybridHeap* h = new(aBase) RHybridHeap(aMaxLength, aAlign, aSingleThread);
+
+ if (!aSingleThread)
+ {
+ TInt r = h->iLock.CreateLocal();
+ if (r!=KErrNone)
+ return NULL; // No need to delete the RHybridHeap instance as the new above is only a placement new
+ h->iHandles = (TInt*)&h->iLock;
+ h->iHandleCount = 1;
+ }
+ return h;
+}
+
+/**
+Creates a chunk heap of the type specified by the parameter aCreateInfo.
+
+@param aCreateInfo A reference to a TChunkHeapCreateInfo object specifying the
+type of chunk heap to create.
+
+@return A pointer to the new heap or NULL if the heap could not be created.
+
+@panic USER 41 if the heap's specified minimum length is greater than the specified maximum length.
+@panic USER 55 if the heap's specified minimum length is negative.
+@panic USER 172 if the heap's specified alignment is not a power of 2 or is less than the size of a TAny*.
+*/
+EXPORT_C RHeap* UserHeap::ChunkHeap(const TChunkHeapCreateInfo& aCreateInfo)
+{
+ // aCreateInfo must have been configured to use a new chunk or an exiting chunk.
+ __ASSERT_ALWAYS(!(aCreateInfo.iMode & (TUint32)~EChunkHeapMask), ::Panic(EHeapCreateInvalidMode));
+ RHeap* h = NULL;
+
+ if (aCreateInfo.iChunk.Handle() == KNullHandle)
+ {
+ // A new chunk is to be created for this heap.
+
+ __ASSERT_ALWAYS(aCreateInfo.iMinLength >= 0, ::Panic(ETHeapMinLengthNegative));
+ __ASSERT_ALWAYS(aCreateInfo.iMaxLength >= aCreateInfo.iMinLength, ::Panic(ETHeapCreateMaxLessThanMin));
+
+ TInt maxLength = aCreateInfo.iMaxLength;
+ TInt page_size;
+ GET_PAGE_SIZE(page_size);
+
+ if (maxLength < page_size)
+ maxLength = page_size;
+
+ TChunkCreateInfo chunkInfo;
+#if USE_HYBRID_HEAP
+ if ( aCreateInfo.iOffset )
+ chunkInfo.SetNormal(0, maxLength); // Create DL only heap
+ else
+ {
+ maxLength = 2*maxLength;
+ chunkInfo.SetDisconnected(0, 0, maxLength); // Create hybrid heap
+ }
+#else
+ chunkInfo.SetNormal(0, maxLength); // Create DL only heap
+#endif
+ chunkInfo.SetOwner((aCreateInfo.iSingleThread)? EOwnerThread : EOwnerProcess);
+ if (aCreateInfo.iName)
+ chunkInfo.SetGlobal(*aCreateInfo.iName);
+ // Set the paging attributes of the chunk.
+ if (aCreateInfo.iPaging == TChunkHeapCreateInfo::EPaged)
+ chunkInfo.SetPaging(TChunkCreateInfo::EPaged);
+ if (aCreateInfo.iPaging == TChunkHeapCreateInfo::EUnpaged)
+ chunkInfo.SetPaging(TChunkCreateInfo::EUnpaged);
+ // Create the chunk.
+ RChunk chunk;
+ if (chunk.Create(chunkInfo) != KErrNone)
+ return NULL;
+ // Create the heap using the new chunk.
+ TUint mode = aCreateInfo.iMode | EChunkHeapDuplicate; // Must duplicate the handle.
+ h = OffsetChunkHeap(chunk, aCreateInfo.iMinLength, aCreateInfo.iOffset,
+ aCreateInfo.iGrowBy, maxLength, aCreateInfo.iAlign,
+ aCreateInfo.iSingleThread, mode);
+ chunk.Close();
+ }
+ else
+ {
+ h = OffsetChunkHeap(aCreateInfo.iChunk, aCreateInfo.iMinLength, aCreateInfo.iOffset,
+ aCreateInfo.iGrowBy, aCreateInfo.iMaxLength, aCreateInfo.iAlign,
+ aCreateInfo.iSingleThread, aCreateInfo.iMode);
+ }
+ return h;
+}
+
+
+
+EXPORT_C RHeap* UserHeap::ChunkHeap(const TDesC* aName, TInt aMinLength, TInt aMaxLength, TInt aGrowBy, TInt aAlign, TBool aSingleThread)
+/**
+Creates a heap in a local or global chunk.
+
+The chunk hosting the heap can be local or global.
+
+A local chunk is one which is private to the process creating it and is not
+intended for access by other user processes. A global chunk is one which is
+visible to all processes.
+
+The hosting chunk is local, if the pointer aName is NULL, otherwise the
+hosting chunk is global and the descriptor *aName is assumed to contain
+the name to be assigned to it.
+
+Ownership of the host chunk is vested in the current process.
+
+A minimum and a maximum size for the heap can be specified. On successful
+return from this function, the size of the heap is at least aMinLength.
+If subsequent requests for allocation of memory from the heap cannot be
+satisfied by compressing the heap, the size of the heap is extended in
+increments of aGrowBy until the request can be satisfied. Attempts to extend
+the heap causes the size of the host chunk to be adjusted.
+
+Note that the size of the heap cannot be adjusted by more than aMaxLength.
+
+@param aName If NULL, the function constructs a local chunk to host
+ the heap. If not NULL, a pointer to a descriptor containing
+ the name to be assigned to the global chunk hosting the heap.
+@param aMinLength The minimum length of the heap in bytes. This will be
+ rounded up to the nearest page size by the allocator.
+@param aMaxLength The maximum length in bytes to which the heap can grow. This
+ will be rounded up to the nearest page size by the allocator.
+@param aGrowBy The number of bytes by which the heap will grow when more
+ memory is required. This will be rounded up to the nearest
+ page size by the allocator. If a value is not explicitly
+ specified, the page size is taken by default.
+@param aAlign From Symbian^4 onwards, this value is ignored but EABI 8
+ byte alignment is guaranteed for all allocations 8 bytes or
+ more in size. 4 byte allocations will be aligned to a 4
+ byte boundary. Best to pass in zero.
+@param aSingleThread EFalse if the heap is to be accessed from multiple threads.
+ This will cause internal locks to be created, guaranteeing
+ thread safety.
+
+@return A pointer to the new heap or NULL if the heap could not be created.
+
+@panic USER 41 if aMaxLength is < aMinLength.
+@panic USER 55 if aMinLength is negative.
+@panic USER 56 if aMaxLength is negative.
+*/
+ {
+ TInt page_size;
+ GET_PAGE_SIZE(page_size);
+ TInt minLength = _ALIGN_UP(aMinLength, page_size);
+ TInt maxLength = Max(aMaxLength, minLength);
+
+ TChunkHeapCreateInfo createInfo(minLength, maxLength);
+ createInfo.SetCreateChunk(aName);
+ createInfo.SetGrowBy(aGrowBy);
+ createInfo.SetAlignment(aAlign);
+ createInfo.SetSingleThread(aSingleThread);
+
+ return ChunkHeap(createInfo);
+ }
+#endif // QT_SYMBIAN4_ALLOCATOR_UNWANTED_CODE
+
+EXPORT_C RHeap* UserHeap::ChunkHeap(RChunk aChunk, TInt aMinLength, TInt aGrowBy, TInt aMaxLength, TInt aAlign, TBool aSingleThread, TUint32 aMode)
+/**
+Creates a heap in an existing chunk.
+
+This function is intended to be used to create a heap in a user writable code
+chunk as created by a call to RChunk::CreateLocalCode(). This type of heap can
+be used to hold code fragments from a JIT compiler.
+
+@param aChunk The chunk that will host the heap.
+@param aMinLength The minimum length of the heap in bytes. This will be
+ rounded up to the nearest page size by the allocator.
+@param aGrowBy The number of bytes by which the heap will grow when more
+ memory is required. This will be rounded up to the nearest
+ page size by the allocator. If a value is not explicitly
+ specified, the page size is taken by default.
+@param aMaxLength The maximum length in bytes to which the heap can grow. This
+ will be rounded up to the nearest page size by the allocator.
+ If 0 is passed in, the maximum lengt of the chunk is used.
+@param aAlign From Symbian^4 onwards, this value is ignored but EABI 8
+ byte alignment is guaranteed for all allocations 8 bytes or
+ more in size. 4 byte allocations will be aligned to a 4
+ byte boundary. Best to pass in zero.
+@param aSingleThread EFalse if the heap is to be accessed from multiple threads.
+ This will cause internal locks to be created, guaranteeing
+ thread safety.
+@param aMode Flags controlling the heap creation. See RAllocator::TFlags.
+
+@return A pointer to the new heap or NULL if the heap could not be created.
+
+@see UserHeap::OffsetChunkHeap()
+*/
+ {
+ return OffsetChunkHeap(aChunk, aMinLength, 0, aGrowBy, aMaxLength, aAlign, aSingleThread, aMode);
+ }
+
+EXPORT_C RHeap* UserHeap::OffsetChunkHeap(RChunk aChunk, TInt aMinLength, TInt aOffset, TInt aGrowBy, TInt aMaxLength, TInt aAlign, TBool aSingleThread, TUint32 aMode)
+/**
+Creates a heap in an existing chunk, offset from the beginning of the chunk.
+
+This function is intended to be used to create a heap using a chunk which has
+some of its memory already used, at the start of that that chunk. The maximum
+length to which the heap can grow is the maximum size of the chunk, minus the
+data at the start of the chunk.
+
+The offset at which to create the heap is passed in as the aOffset parameter.
+Legacy heap implementations always respected the aOffset value, however more
+modern heap implementations are more sophisticated and cannot necessarily respect
+this value. Therefore, if possible, you should always use an aOffset of 0 unless
+you have a very explicit requirement for using a non zero value. Using a non zero
+value will result in a less efficient heap algorithm being used in order to respect
+the offset.
+
+Another issue to consider when using this function is the type of the chunk passed
+in. In order for the most efficient heap algorithms to be used, the chunk passed
+in should always be a disconnected chunk. Passing in a non disconnected chunk will
+again result in a less efficient heap algorithm being used.
+
+Finally, another requirement for the most efficient heap algorithms to be used is
+for the heap to be able to expand. Therefore, unless you have a specific reason to
+do so, always specify aMaxLength > aMinLength.
+
+So, if possible, use aOffset == zero, aMaxLength > aMinLength and a disconnected
+chunk for best results!
+
+@param aChunk The chunk that will host the heap.
+@param aMinLength The minimum length of the heap in bytes. This will be
+ rounded up to the nearest page size by the allocator.
+@param aOffset The offset in bytes from the start of the chunk at which to
+ create the heap. If used (and it shouldn't really be!)
+ then it will be rounded up to a multiple of 8, to respect
+ EABI 8 byte alignment requirements.
+@param aGrowBy The number of bytes by which the heap will grow when more
+ memory is required. This will be rounded up to the nearest
+ page size by the allocator. If a value is not explicitly
+ specified, the page size is taken by default.
+@param aMaxLength The maximum length in bytes to which the heap can grow. This
+ will be rounded up to the nearest page size by the allocator.
+ If 0 is passed in, the maximum length of the chunk is used.
+@param aAlign From Symbian^4 onwards, this value is ignored but EABI 8
+ byte alignment is guaranteed for all allocations 8 bytes or
+ more in size. 4 byte allocations will be aligned to a 4
+ byte boundary. Best to pass in zero.
+@param aSingleThread EFalse if the heap is to be accessed from multiple threads.
+ This will cause internal locks to be created, guaranteeing
+ thread safety.
+@param aMode Flags controlling the heap creation. See RAllocator::TFlags.
+
+@return A pointer to the new heap or NULL if the heap could not be created.
+
+@panic USER 41 if aMaxLength is < aMinLength.
+@panic USER 55 if aMinLength is negative.
+@panic USER 56 if aMaxLength is negative.
+@panic USER 168 if aOffset is negative.
+*/
+ {
+ TBool dlOnly = EFalse;
+ TInt pageSize;
+ GET_PAGE_SIZE(pageSize);
+ TInt align = RHybridHeap::ECellAlignment; // Always use EABI 8 byte alignment
+
+ __ASSERT_ALWAYS(aMinLength>=0, ::Panic(ETHeapMinLengthNegative));
+ __ASSERT_ALWAYS(aMaxLength>=0, ::Panic(ETHeapMaxLengthNegative));
+
+ if ( aMaxLength > 0 )
+ __ASSERT_ALWAYS(aMaxLength>=aMinLength, ::Panic(ETHeapCreateMaxLessThanMin));
+
+ // Stick to EABI alignment for the start offset, if any
+ aOffset = _ALIGN_UP(aOffset, align);
+
+ // Using an aOffset > 0 means that we can't use the hybrid allocator and have to revert to Doug Lea only
+ if (aOffset > 0)
+ dlOnly = ETrue;
+
+ // Ensure that the minimum length is enough to hold the RHybridHeap object itself
+ TInt minCell = _ALIGN_UP(Max((TInt)RHybridHeap::EAllocCellSize, (TInt)RHybridHeap::EFreeCellSize), align);
+ TInt hybridHeapSize = (sizeof(RHybridHeap) + minCell);
+ if (aMinLength < hybridHeapSize)
+ aMinLength = hybridHeapSize;
+
+ // Round the minimum length up to a multiple of the page size, taking into account that the
+ // offset takes up a part of the chunk's memory
+ aMinLength = _ALIGN_UP((aMinLength + aOffset), pageSize);
+
+ // If aMaxLength is 0 then use the entire chunk
+ TInt chunkSize = aChunk.MaxSize();
+ if (aMaxLength == 0)
+ {
+ aMaxLength = chunkSize;
+ }
+ // Otherwise round the maximum length up to a multiple of the page size, taking into account that
+ // the offset takes up a part of the chunk's memory. We also clip the maximum length to the chunk
+ // size, so the user may get a little less than requested if the chunk size is not large enough
+ else
+ {
+ aMaxLength = _ALIGN_UP((aMaxLength + aOffset), pageSize);
+ if (aMaxLength > chunkSize)
+ aMaxLength = chunkSize;
+ }
+
+ // If the rounded up values don't make sense then a crazy aMinLength or aOffset must have been passed
+ // in, so fail the heap creation
+ if (aMinLength > aMaxLength)
+ return NULL;
+
+ // Adding the offset into the minimum and maximum length was only necessary for ensuring a good fit of
+ // the heap into the chunk. Re-adjust them now back to non offset relative sizes
+ aMinLength -= aOffset;
+ aMaxLength -= aOffset;
+
+ // If we are still creating the hybrid allocator (call parameter
+ // aOffset is 0 and aMaxLength > aMinLength), we must reduce heap
+ // aMaxLength size to the value aMaxLength/2 and set the aOffset to point in the middle of chunk.
+ TInt offset = aOffset;
+ TInt maxLength = aMaxLength;
+ if (!dlOnly && (aMaxLength > aMinLength))
+ maxLength = offset = _ALIGN_UP(aMaxLength >> 1, pageSize);
+
+ // Try to use commit to map aMinLength physical memory for the heap, taking into account the offset. If
+ // the operation fails, suppose that the chunk is not a disconnected heap and try to map physical memory
+ // with adjust. In this case, we also can't use the hybrid allocator and have to revert to Doug Lea only
+ TBool useAdjust = EFalse;
+ TInt r = aChunk.Commit(offset, aMinLength);
+ if (r == KErrGeneral)
+ {
+ dlOnly = useAdjust = ETrue;
+ r = aChunk.Adjust(aMinLength);
+ if (r != KErrNone)
+ return NULL;
+ }
+ else if (r == KErrNone)
+ {
+ // We have a disconnected chunk reset aOffset and aMaxlength
+ aOffset = offset;
+ aMaxLength = maxLength;
+ }
+
+ else
+ return NULL;
+
+ // Parameters have been mostly verified and we know whether to use the hybrid allocator or Doug Lea only. The
+ // constructor for the hybrid heap will automatically drop back to Doug Lea if it determines that aMinLength
+ // == aMaxLength, so no need to worry about that requirement here. The user specified alignment is not used but
+ // is passed in so that it can be sanity checked in case the user is doing something totally crazy with it
+ RHybridHeap* h = new (aChunk.Base() + aOffset) RHybridHeap(aChunk.Handle(), aOffset, aMinLength, aMaxLength,
+ aGrowBy, aAlign, aSingleThread, dlOnly, useAdjust);
+
+ if (h->ConstructLock(aMode) != KErrNone)
+ return NULL;
+
+ // Return the heap address
+ return h;
+ }
+
+#define UserTestDebugMaskBit(bit) (TBool)(UserSvr::DebugMask(bit>>5) & (1<<(bit&31)))
+
+_LIT(KLitDollarHeap,"$HEAP");
+EXPORT_C TInt UserHeap::CreateThreadHeap(SStdEpocThreadCreateInfo& aInfo, RHeap*& aHeap, TInt aAlign, TBool aSingleThread)
+/**
+@internalComponent
+*/
+//
+// Create a user-side heap
+//
+{
+ TInt page_size;
+ GET_PAGE_SIZE(page_size);
+ TInt minLength = _ALIGN_UP(aInfo.iHeapInitialSize, page_size);
+ TInt maxLength = Max(aInfo.iHeapMaxSize, minLength);
+#ifdef ENABLE_BTRACE
+ if (UserTestDebugMaskBit(96)) // 96 == KUSERHEAPTRACE in nk_trace.h
+ aInfo.iFlags |= ETraceHeapAllocs;
+#endif // ENABLE_BTRACE
+ // Create the thread's heap chunk.
+ RChunk c;
+#ifndef NO_NAMED_LOCAL_CHUNKS
+ TChunkCreateInfo createInfo;
+
+ createInfo.SetThreadHeap(0, maxLength, KLitDollarHeap()); // Initialise with no memory committed.
+#if USE_HYBRID_HEAP
+ //
+ // Create disconnected chunk for hybrid heap with double max length value
+ //
+ maxLength = 2*maxLength;
+ createInfo.SetDisconnected(0, 0, maxLength);
+#endif
+#ifdef SYMBIAN_WRITABLE_DATA_PAGING
+ // Set the paging policy of the heap chunk based on the thread's paging policy.
+ TUint pagingflags = aInfo.iFlags & EThreadCreateFlagPagingMask;
+ switch (pagingflags)
+ {
+ case EThreadCreateFlagPaged:
+ createInfo.SetPaging(TChunkCreateInfo::EPaged);
+ break;
+ case EThreadCreateFlagUnpaged:
+ createInfo.SetPaging(TChunkCreateInfo::EUnpaged);
+ break;
+ case EThreadCreateFlagPagingUnspec:
+ // Leave the chunk paging policy unspecified so the process's
+ // paging policy is used.
+ break;
+ }
+#endif // SYMBIAN_WRITABLE_DATA_PAGING
+
+ TInt r = c.Create(createInfo);
+#else
+ TInt r = c.CreateDisconnectedLocal(0, 0, maxLength * 2);
+#endif
+ if (r!=KErrNone)
+ return r;
+
+ aHeap = ChunkHeap(c, minLength, page_size, maxLength, aAlign, aSingleThread, EChunkHeapSwitchTo|EChunkHeapDuplicate);
+ c.Close();
+
+ if ( !aHeap )
+ return KErrNoMemory;
+
+#ifdef ENABLE_BTRACE
+ if (aInfo.iFlags & ETraceHeapAllocs)
+ {
+ aHeap->iFlags |= RHeap::ETraceAllocs;
+ BTraceContext8(BTrace::EHeap, BTrace::EHeapCreate,(TUint32)aHeap, RHybridHeap::EAllocCellSize);
+ TInt chunkId = ((RHandleBase&)((RHybridHeap*)aHeap)->iChunkHandle).BTraceId();
+ BTraceContext8(BTrace::EHeap, BTrace::EHeapChunkCreate, (TUint32)aHeap, chunkId);
+ }
+ if (aInfo.iFlags & EMonitorHeapMemory)
+ aHeap->iFlags |= RHeap::EMonitorMemory;
+#endif // ENABLE_BTRACE
+
+ return KErrNone;
+}
+
+#endif // __KERNEL_MODE__
+
+#endif /* QT_USE_NEW_SYMBIAN_ALLOCATOR */
diff --git a/src/corelib/arch/symbian/heap_hybrid_p.h b/src/corelib/arch/symbian/heap_hybrid_p.h
new file mode 100644
index 0000000..736af72
--- /dev/null
+++ b/src/corelib/arch/symbian/heap_hybrid_p.h
@@ -0,0 +1,402 @@
+/****************************************************************************
+**
+** Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef __HEAP_HYBRID_H__
+#define __HEAP_HYBRID_H__
+
+#include <e32cmn.h>
+
+#ifdef __WINS__
+#define USE_HYBRID_HEAP 0
+#else
+#define USE_HYBRID_HEAP 1
+#endif
+
+// This stuff is all temporary in order to prevent having to include dla.h from heap_hybrid.h, which causes
+// problems due to its definition of size_t (and possibly other types). This is unfortunate but we cannot
+// pollute the namespace with these types or it will cause problems with Open C and other POSIX compatibility
+// efforts in Symbian
+
+#define NSMALLBINS (32U)
+#define NTREEBINS (32U)
+
+#ifndef MALLOC_ALIGNMENT
+ #define MALLOC_ALIGNMENT ((TUint)8U)
+#endif /* MALLOC_ALIGNMENT */
+
+#define CHUNK_OVERHEAD (sizeof(TUint))
+
+typedef unsigned int bindex_t;
+typedef unsigned int binmap_t;
+typedef struct malloc_chunk* mchunkptr;
+typedef struct malloc_segment msegment;
+typedef struct malloc_state* mstate;
+typedef struct malloc_tree_chunk* tbinptr;
+typedef struct malloc_tree_chunk* tchunkptr;
+
+struct malloc_segment {
+ TUint8* iBase; /* base address */
+ TUint iSize; /* allocated size */
+};
+
+struct malloc_state {
+ binmap_t iSmallMap;
+ binmap_t iTreeMap;
+ TUint iDvSize;
+ TUint iTopSize;
+ mchunkptr iDv;
+ mchunkptr iTop;
+ TUint iTrimCheck;
+ mchunkptr iSmallBins[(NSMALLBINS+1)*2];
+ tbinptr iTreeBins[NTREEBINS];
+ msegment iSeg;
+ };
+
+class RHybridHeap : public RHeap
+ {
+
+public:
+ // declarations copied from Symbian^4 RAllocator and RHeap
+ typedef void (*TWalkFunc)(TAny*, RHeap::TCellType, TAny*, TInt);
+ enum TFlags {ESingleThreaded=1, EFixedSize=2, ETraceAllocs=4, EMonitorMemory=8,};
+ enum TAllocDebugOp
+ {
+ ECount, EMarkStart, EMarkEnd, ECheck, ESetFail, ECopyDebugInfo, ESetBurstFail, EGetFail,
+ EGetSize=48, EGetMaxLength, EGetBase, EAlignInteger, EAlignAddr
+ };
+ enum TDebugOp { EWalk = 128, EHybridHeap };
+ enum TAllocFail
+ {
+ ERandom, ETrueRandom, EDeterministic, EHybridNone, EFailNext, EReset, EBurstRandom,
+ EBurstTrueRandom, EBurstDeterministic, EBurstFailNext, ECheckFailure,
+ };
+
+ struct HeapInfo
+ {
+ unsigned iFootprint;
+ unsigned iMaxSize;
+ unsigned iAllocBytes;
+ unsigned iAllocN;
+ unsigned iFreeBytes;
+ unsigned iFreeN;
+ };
+
+ struct SHeapCellInfo { RHybridHeap* iHeap; TInt iTotalAlloc; TInt iTotalAllocSize; TInt iTotalFree; TInt iLevelAlloc; SDebugCell* iStranded; };
+
+
+ /**
+ @internalComponent
+ */
+ enum TAllocatorType
+ {ESlabAllocator, EDougLeaAllocator, EPageAllocator, EFullSlab=0x80, EPartialFullSlab=0x40, EEmptySlab=0x20, ESlabSpare=0x10, ESlabMask=0xf0};
+
+
+ /**
+ @internalComponent
+ */
+ struct SWalkInfo {
+ /**
+ Walk function address shall be called
+ */
+ TWalkFunc iFunction;
+
+ /**
+ The first parameter for callback function
+ */
+ TAny* iParam;
+ /**
+ Pointer to RHybridHeap object
+ */
+ RHybridHeap* iHeap;
+ };
+
+ /**
+ @internalComponent
+ */
+ struct SConfig {
+ /**
+ Required slab configuration ( bit 0=4, bit 1=8 ..
+ bit 13 = 56)
+ */
+ TUint32 iSlabBits;
+ /**
+ Delayed slab threshold in bytes (0 = no threshold)
+ */
+ TInt iDelayedSlabThreshold;
+ /**
+ 2^n is smallest size allocated in paged allocator (14-31 = 16 Kb --> )
+ */
+ TInt iPagePower;
+
+ };
+
+ /**
+ @internalComponent
+
+ This structure is used by test code for configuring the allocators and obtaining information
+ from them in order to ensure they are behaving as required. This is internal test specific
+ code and is liable to be changed without warning at any time. You should under no circumstances
+ be using it!
+ */
+ struct STestCommand
+ {
+ TInt iCommand; // The test related command to be executed
+
+ union
+ {
+ SConfig iConfig; // Configuration used by test code only
+ TAny* iData; // Extra supporting data for the test command
+ };
+ };
+
+ /**
+ @internalComponent
+
+ Commands used by test code for configuring the allocators and obtaining information them them
+ */
+ enum TTestCommand { EGetConfig, ESetConfig, EHeapMetaData, ETestData };
+
+ virtual TAny* Alloc(TInt aSize);
+ virtual void Free(TAny* aPtr);
+ virtual TAny* ReAlloc(TAny* aPtr, TInt aSize, TInt aMode=0);
+ virtual TInt AllocLen(const TAny* aCell) const;
+#ifndef __KERNEL_MODE__
+ virtual TInt Compress();
+ virtual void Reset();
+ virtual TInt AllocSize(TInt& aTotalAllocSize) const;
+ virtual TInt Available(TInt& aBiggestBlock) const;
+#endif
+ virtual TInt DebugFunction(TInt aFunc, TAny* a1=NULL, TAny* a2=NULL);
+protected:
+ virtual TInt Extension_(TUint aExtensionId, TAny*& a0, TAny* a1);
+
+public:
+ TAny* operator new(TUint aSize, TAny* aBase) __NO_THROW;
+ void operator delete(TAny*, TAny*);
+
+private:
+ TInt DoCountAllocFree(TInt& aFree);
+ TInt DoCheckHeap(SCheckInfo* aInfo);
+ void DoMarkStart();
+ TUint32 DoMarkEnd(TInt aExpected);
+ void DoSetAllocFail(TAllocFail aType, TInt aRate);
+ TBool CheckForSimulatedAllocFail();
+ void DoSetAllocFail(TAllocFail aType, TInt aRate, TUint aBurst);
+
+ void Lock() const;
+ void Unlock() const;
+ TInt ChunkHandle() const;
+
+ RHybridHeap(TInt aChunkHandle, TInt aOffset, TInt aMinLength, TInt aMaxLength, TInt aGrowBy, TInt aAlign, TBool aSingleThread, TBool aDlOnly, TBool aUseAdjust);
+ RHybridHeap(TInt aMaxLength, TInt aAlign=0, TBool aSingleThread=ETrue);
+ RHybridHeap();
+
+ void Init(TInt aBitmapSlab, TInt aPagePower);
+ inline void InitBins(mstate m);
+ inline void InitTop(mstate m, mchunkptr p, TUint psize);
+ void* SysAlloc(mstate m, TUint nb);
+ int SysTrim(mstate m, TUint pad);
+ void* TmallocLarge(mstate m, TUint nb);
+ void* TmallocSmall(mstate m, TUint nb);
+ /*MACROS converted functions*/
+ static inline void UnlinkFirstSmallChunk(mstate M,mchunkptr B,mchunkptr P,bindex_t& I);
+ static inline void InsertSmallChunk(mstate M,mchunkptr P, TUint S);
+ static inline void InsertChunk(mstate M,mchunkptr P,TUint S);
+ static inline void UnlinkLargeChunk(mstate M,tchunkptr X);
+ static inline void UnlinkSmallChunk(mstate M, mchunkptr P,TUint S);
+ static inline void UnlinkChunk(mstate M, mchunkptr P, TUint S);
+ static inline void ComputeTreeIndex(TUint S, bindex_t& I);
+ static inline void InsertLargeChunk(mstate M,tchunkptr X,TUint S);
+ static inline void ReplaceDv(mstate M, mchunkptr P, TUint S);
+ static inline void ComputeBit2idx(binmap_t X,bindex_t& I);
+
+ void DoComputeTreeIndex(TUint S, bindex_t& I);
+ void DoCheckAnyChunk(mstate m, mchunkptr p);
+ void DoCheckTopChunk(mstate m, mchunkptr p);
+ void DoCheckInuseChunk(mstate m, mchunkptr p);
+ void DoCheckFreeChunk(mstate m, mchunkptr p);
+ void DoCheckMallocedChunk(mstate m, void* mem, TUint s);
+ void DoCheckTree(mstate m, tchunkptr t);
+ void DoCheckTreebin(mstate m, bindex_t i);
+ void DoCheckSmallbin(mstate m, bindex_t i);
+ TInt BinFind(mstate m, mchunkptr x);
+ TUint TraverseAndCheck(mstate m);
+ void DoCheckMallocState(mstate m);
+
+ TInt GetInfo(struct HeapInfo* i, SWalkInfo* wi=NULL) const;
+ void InitDlMalloc(TUint capacity, int locked);
+ void* DlMalloc(TUint);
+ void DlFree(void*);
+ void* DlRealloc(void*, TUint, TInt);
+ TUint DlInfo(struct HeapInfo* i, SWalkInfo* wi) const;
+ void DoCheckCommittedSize(TInt aNPages, mstate aM);
+
+ TAny* ReAllocImpl(TAny* aPtr, TInt aSize, TInt aMode);
+ void Construct(TBool aSingleThread, TBool aDLOnly, TBool aUseAdjust, TInt aAlign);
+#ifndef __KERNEL_MODE__
+ TInt ConstructLock(TUint32 aMode);
+#endif
+ static void Walk(SWalkInfo* aInfo, TAny* aBfr, TInt aLth, TCellType aBfrType, TAllocatorType aAlloctorType);
+ static void WalkCheckCell(TAny* aPtr, TCellType aType, TAny* aCell, TInt aLen);
+ void* Map(void* p, TInt sz);
+ void Unmap(void* p,TInt sz);
+
+private:
+ TInt iMinLength;
+ TInt iOffset; // offset of RHeap object from chunk base
+ TInt iGrowBy;
+ TInt iMinCell;
+ TInt iPageSize;
+
+ // Temporarily commented out and exported from RHeap to prevent source breaks from req417-52840.
+ // This will be moved with another REQ after submission and subsequent fixing of bad code
+ //TInt iNestingLevel;
+ TInt iAllocCount;
+ // Temporarily commented out. See comment above regarding req417-52840 source breaks
+ //TAllocFail iFailType;
+ TInt iFailRate;
+ TBool iFailed;
+ TInt iFailAllocCount;
+ TInt iRand;
+ // Temporarily commented out. See comment above regarding req417-52840 source breaks
+ //TAny* iTestData;
+
+ TInt iChunkSize;
+ TInt iHighWaterMark;
+ TBool iUseAdjust;
+ TBool iDLOnly;
+
+ malloc_state iGlobalMallocState;
+
+#ifdef __KERNEL_MODE__
+
+ friend class RHeapK;
+
+#else
+
+ friend class UserHeap;
+ friend class HybridHeap;
+ friend class TestHybridHeap;
+
+private:
+
+ static void TreeRemove(slab* s);
+ static void TreeInsert(slab* s,slab** r);
+
+ enum {EOkBits = (1<<(MAXSLABSIZE>>2))-1};
+
+ void SlabInit();
+ void SlabConfig(unsigned slabbitmap);
+ void* SlabAllocate(slabset& allocator);
+ void SlabFree(void* p);
+ void* AllocNewSlab(slabset& allocator);
+ void* AllocNewPage(slabset& allocator);
+ void* InitNewSlab(slabset& allocator, slab* s);
+ void FreeSlab(slab* s);
+ void FreePage(page* p);
+ void SlabInfo(struct HeapInfo* i, SWalkInfo* wi) const;
+ static void SlabFullInfo(slab* s, struct HeapInfo* i, SWalkInfo* wi);
+ static void SlabPartialInfo(slab* s, struct HeapInfo* i, SWalkInfo* wi);
+ static void SlabEmptyInfo(slab* s, struct HeapInfo* i, SWalkInfo* wi);
+ static void TreeWalk(slab* const* root, void (*f)(slab*, struct HeapInfo*, SWalkInfo*), struct HeapInfo* i, SWalkInfo* wi);
+
+ static void WalkPartialFullSlab(SWalkInfo* aInfo, slab* aSlab, TCellType aBfrType, TInt aLth);
+ static void WalkFullSlab(SWalkInfo* aInfo, slab* aSlab, TCellType aBfrType, TInt aLth);
+ void DoCheckSlab(slab* aSlab, TAllocatorType aSlabType, TAny* aBfr=NULL);
+ void DoCheckSlabTrees();
+ void DoCheckSlabTree(slab** aS, TBool aPartialPage);
+ void BuildPartialSlabBitmap(TUint32* aBitmap, slab* aSlab, TAny* aBfr=NULL);
+
+ static inline unsigned SlabHeaderFree(unsigned h)
+ {return (h&0x000000ff);}
+ static inline unsigned SlabHeaderPagemap(unsigned h)
+ {return (h&0x00000f00)>>8;}
+ static inline unsigned SlabHeaderSize(unsigned h)
+ {return (h&0x0003f000)>>12;}
+ static inline unsigned SlabHeaderUsedm4(unsigned h)
+ {return (h&0x0ffc0000)>>18;}
+ /***paged allocator code***/
+ void PagedInit(TInt aPagePower);
+ void* PagedAllocate(unsigned size);
+ void PagedFree(void* p);
+ void* PagedReallocate(void* p, unsigned size, TInt mode);
+
+ bool PagedEncode(unsigned pos, unsigned npage);
+ unsigned PagedDecode(unsigned pos) const;
+ inline unsigned PagedSize(void* p) const;
+ inline bool PagedSetSize(void* p, unsigned size);
+ inline void PagedZapSize(void* p, unsigned size);
+ inline void* Bitmap2addr(unsigned pos) const;
+ void PagedInfo(struct HeapInfo* i, SWalkInfo* wi) const;
+ void ResetBitmap();
+ TBool CheckBitmap(void* aBfr, TInt aSize, TUint32& aDummy, TInt& aNPages);
+
+private:
+ paged_bitmap iPageMap; // bitmap representing page allocator's pages
+ TUint8* iMemBase; // bottom of paged/slab memory (chunk base)
+ TUint8 iBitMapBuffer[MAXSMALLPAGEBITS>>3]; // buffer for initial page bitmap
+ TInt iSlabThreshold; // allocations < than this are done by the slab allocator
+ TInt iPageThreshold; // 2^n is smallest cell size allocated in paged allocator
+ TInt iSlabInitThreshold; // slab allocator will be used after chunk reaches this size
+ TUint32 iSlabConfigBits; // set of bits that specify which slab sizes to use
+ slab* iPartialPage; // partial-use page tree
+ slab* iFullSlab; // full slabs list (so we can find them when walking)
+ page* iSparePage; // cached, to avoid kernel exec calls for unmapping/remapping
+ TUint8 iSizeMap[(MAXSLABSIZE>>2)+1]; // index of slabset indexes based on size class
+ slabset iSlabAlloc[MAXSLABSIZE>>2]; // array of pointers to slabsets
+
+#endif // __KERNEL_MODE__
+};
+
+#define HEAP_ASSERT(x) __ASSERT_DEBUG(x, HEAP_PANIC(ETHeapBadCellAddress))
+
+template <class T> inline T Floor(const T addr, unsigned aln)
+{return T((unsigned(addr))&~(aln-1));}
+template <class T> inline T Ceiling(T addr, unsigned aln)
+{return T((unsigned(addr)+(aln-1))&~(aln-1));}
+template <class T> inline unsigned LowBits(T addr, unsigned aln)
+{return unsigned(addr)&(aln-1);}
+template <class T1, class T2> inline int PtrDiff(const T1* a1, const T2* a2)
+{return reinterpret_cast<const unsigned char*>(a1) - reinterpret_cast<const unsigned char*>(a2);}
+template <class T> inline T Offset(T addr, unsigned ofs)
+{return T(unsigned(addr)+ofs);}
+
+#endif //__HEAP_HYBRID_H__
diff --git a/src/corelib/arch/symbian/page_alloc_p.h b/src/corelib/arch/symbian/page_alloc_p.h
new file mode 100644
index 0000000..5241d78
--- /dev/null
+++ b/src/corelib/arch/symbian/page_alloc_p.h
@@ -0,0 +1,68 @@
+/****************************************************************************
+**
+** Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef __KERNEL_MODE__
+
+const int MAXSMALLPAGEBITS = 68<<3;
+#define MINPAGEPOWER PAGESHIFT+2
+
+struct paged_bitmap
+{
+ public:
+ inline paged_bitmap() : iBase(0), iNbits(0) {}
+ void Init(unsigned char* p, unsigned size, unsigned bit);
+//
+ inline unsigned char* Addr() const;
+ inline unsigned Size() const;
+//
+ inline void Set(unsigned ix, unsigned bit);
+ inline unsigned operator[](unsigned ix) const;
+ bool Is(unsigned ix, unsigned len, unsigned bit) const;
+ void Set(unsigned ix, unsigned len, unsigned val);
+ void Setn(unsigned ix, unsigned len, unsigned bit);
+ unsigned Bits(unsigned ix, unsigned len) const; // little endian
+ int Find(unsigned start, unsigned bit) const;
+ private:
+ unsigned char* iBase;
+ unsigned iNbits;
+};
+
+#endif // __KERNEL_MODE__
diff --git a/src/corelib/arch/symbian/qt_heapsetup_symbian.cpp b/src/corelib/arch/symbian/qt_heapsetup_symbian.cpp
new file mode 100644
index 0000000..d6b7351
--- /dev/null
+++ b/src/corelib/arch/symbian/qt_heapsetup_symbian.cpp
@@ -0,0 +1,105 @@
+/****************************************************************************
+**
+** Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qt_hybridheap_symbian_p.h"
+
+#ifdef QT_USE_NEW_SYMBIAN_ALLOCATOR
+
+extern const TInt KHeapShrinkHysRatio = 0x800;
+
+/*
+ * \internal
+ * Called from the qtmain.lib application wrapper.
+ * Create a new heap as requested, but use the new allocator
+ */
+Q_CORE_EXPORT TInt qt_symbian_SetupThreadHeap(TBool aNotFirst, SStdEpocThreadCreateInfo& aInfo)
+{
+ TInt r = KErrNone;
+ if (!aInfo.iAllocator && aInfo.iHeapInitialSize>0)
+ {
+ // new heap required
+ RHeap* pH = NULL;
+ r = UserHeap::CreateThreadHeap(aInfo, pH);
+ }
+ else if (aInfo.iAllocator)
+ {
+ // sharing a heap
+ RAllocator* pA = aInfo.iAllocator;
+ pA->Open();
+ User::SwitchAllocator(pA);
+ }
+ return r;
+}
+
+#ifndef NO_NAMED_LOCAL_CHUNKS
+void TChunkCreateInfo::SetThreadHeap(TInt aInitialSize, TInt aMaxSize, const TDesC& aName)
+{
+ iType = TChunkCreate::ENormal | TChunkCreate::EData;
+ iMaxSize = aMaxSize;
+ iInitialBottom = 0;
+ iInitialTop = aInitialSize;
+ iAttributes |= TChunkCreate::ELocalNamed;
+ iName = &aName;
+ iOwnerType = EOwnerThread;
+}
+#endif // NO_NAMED_LOCAL_CHUNKS
+
+void Panic(TCdtPanic reason)
+{
+ _LIT(KCat, "QtHybridHeap");
+ User::Panic(KCat, reason);
+}
+
+#else /* QT_USE_NEW_SYMBIAN_ALLOCATOR */
+
+#include <e32std.h>
+
+/*
+ * \internal
+ * Called from the qtmain.lib application wrapper.
+ * Create a new heap as requested, using the default system allocator
+ */
+Q_CORE_EXPORT TInt qt_symbian_SetupThreadHeap(TBool aNotFirst, SStdEpocThreadCreateInfo& aInfo)
+{
+ return UserHeap::SetupThreadHeap(aNotFirst, aInfo);
+}
+
+#endif /* QT_USE_NEW_SYMBIAN_ALLOCATOR */
diff --git a/src/corelib/arch/symbian/qt_hybridheap_symbian_p.h b/src/corelib/arch/symbian/qt_hybridheap_symbian_p.h
new file mode 100644
index 0000000..5827aca
--- /dev/null
+++ b/src/corelib/arch/symbian/qt_hybridheap_symbian_p.h
@@ -0,0 +1,174 @@
+/****************************************************************************
+**
+** Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QT_HYBRIDHEAP_SYMBIAN_H
+#define QT_HYBRIDHEAP_SYMBIAN_H
+
+#include <qglobal.h>
+
+#if !defined(SYMBIAN_GRAPHICS_WSERV_QT_EFFECTS) && !defined(__WINS__)
+//Enable the (backported) new allocator. When it is available in OS,
+//this flag should be disabled for that OS version onward
+#define QT_USE_NEW_SYMBIAN_ALLOCATOR
+#endif
+
+#ifdef QT_USE_NEW_SYMBIAN_ALLOCATOR
+
+#ifdef Q_CC_RVCT
+#pragma push
+#pragma arm
+#pragma Otime
+#pragma O2
+#endif
+
+#include "common_p.h"
+#ifdef QT_SYMBIAN_HAVE_U32STD_H
+#include <u32std.h>
+#endif
+#ifdef QT_SYMBIAN_HAVE_E32BTRACE_H
+#include <e32btrace.h>
+// enables btrace code compiling into
+#define ENABLE_BTRACE
+#endif
+#ifdef __KERNEL_MODE__
+#include <kernel/kern_priv.h>
+#endif
+#include "dla_p.h"
+#ifndef __KERNEL_MODE__
+#include "slab_p.h"
+#include "page_alloc_p.h"
+#endif
+#include "heap_hybrid_p.h"
+
+// disabling Symbian import/export macros to prevent heap_hybrid.cpp, copied from Symbian^4, from exporting symbols in arm builds
+// this minimises the code changes to heap_hybrid.cpp to ease future integration
+#undef UEXPORT_C
+#define UEXPORT_C
+#undef EXPORT_C
+#define EXPORT_C
+#undef IMPORT_D
+#define IMPORT_D
+
+// disabling code ported from Symbian^4 that we don't want/can't have in earlier platforms
+#define QT_SYMBIAN4_ALLOCATOR_UNWANTED_CODE
+
+#if defined(SYMBIAN_VERSION_9_2) || defined(SYMBIAN_VERSION_9_1)
+#define NO_NAMED_LOCAL_CHUNKS
+#endif
+
+// disabling the BTrace components of heap checking macros
+#ifndef ENABLE_BTRACE
+inline int noBTrace() {return 0;}
+#define BTraceContext12(a,b,c,d,e) noBTrace()
+#endif
+
+#ifndef QT_SYMBIAN_HAVE_U32STD_H
+struct SThreadCreateInfo
+ {
+ TAny* iHandle;
+ TInt iType;
+ TThreadFunction iFunction;
+ TAny* iPtr;
+ TAny* iSupervisorStack;
+ TInt iSupervisorStackSize;
+ TAny* iUserStack;
+ TInt iUserStackSize;
+ TInt iInitialThreadPriority;
+ TPtrC iName;
+ TInt iTotalSize; // Size including any extras (must be a multiple of 8 bytes)
+ };
+
+struct SStdEpocThreadCreateInfo : public SThreadCreateInfo
+ {
+ RAllocator* iAllocator;
+ TInt iHeapInitialSize;
+ TInt iHeapMaxSize;
+ TInt iPadding; // Make structure size a multiple of 8 bytes
+ };
+
+class TChunkCreate
+ {
+public:
+ // Attributes for chunk creation that are used by both euser and the kernel
+ // by classes TChunkCreateInfo and SChunkCreateInfo, respectively.
+ enum TChunkCreateAtt
+ {
+ ENormal = 0x00000000,
+ EDoubleEnded = 0x00000001,
+ EDisconnected = 0x00000002,
+ ECache = 0x00000003,
+ EMappingMask = 0x0000000f,
+ ELocal = 0x00000000,
+ EGlobal = 0x00000010,
+ EData = 0x00000000,
+ ECode = 0x00000020,
+ EMemoryNotOwned = 0x00000040,
+
+ // Force local chunk to be named. Only required for thread heap
+ // chunks, all other local chunks should be nameless.
+ ELocalNamed = 0x000000080,
+
+ // Make global chunk read only to all processes but the controlling owner
+ EReadOnly = 0x000000100,
+
+ // Paging attributes for chunks.
+ EPagingUnspec = 0x00000000,
+ EPaged = 0x80000000,
+ EUnpaged = 0x40000000,
+ EPagingMask = EPaged | EUnpaged,
+
+ EChunkCreateAttMask = EMappingMask | EGlobal | ECode |
+ ELocalNamed | EReadOnly | EPagingMask,
+ };
+public:
+ TUint iAtt;
+ TBool iForceFixed;
+ TInt iInitialBottom;
+ TInt iInitialTop;
+ TInt iMaxSize;
+ TUint8 iClearByte;
+ };
+
+#endif // QT_SYMBIAN_HAVE_U32STD_H
+
+#endif /* QT_USE_NEW_SYMBIAN_ALLOCATOR */
+
+#endif /* QT_HYBRIDHEAP_SYMBIAN_H */
diff --git a/src/corelib/arch/symbian/slab_p.h b/src/corelib/arch/symbian/slab_p.h
new file mode 100644
index 0000000..234a310
--- /dev/null
+++ b/src/corelib/arch/symbian/slab_p.h
@@ -0,0 +1,125 @@
+/****************************************************************************
+**
+** Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef __KERNEL_MODE__
+
+class slab;
+class slabhdr;
+#define MAXSLABSIZE 56
+#define PAGESHIFT 12
+#define PAGESIZE (1<<PAGESHIFT)
+#define SLABSHIFT 10
+#define SLABSIZE (1 << SLABSHIFT)
+#define CELLALIGN 8
+
+
+const unsigned slabfull = 0;
+const TInt slabsperpage = (int)(PAGESIZE/SLABSIZE);
+#define HIBIT(bits) (((unsigned)bits & 0xc) ? 2 + ((unsigned)bits>>3) : ((unsigned) bits>>1))
+
+#define LOWBIT(bits) (((unsigned) bits&3) ? 1 - ((unsigned)bits&1) : 3 - (((unsigned)bits>>2)&1))
+
+#define ZEROBITS(header) (((unsigned)header & 0x70000000) ? 0 : 1)
+
+class slabhdr
+{
+ public:
+ unsigned iHeader;
+ // made up of
+ // bits | 31 | 30..28 | 27..18 | 17..12 | 11..8 | 7..0 |
+ // +----------+--------+--------+--------+---------+----------+
+ // field | floating | zero | used-4 | size | pagemap | free pos |
+ //
+ slab** iParent; // reference to iParent's pointer to this slab in tree
+ slab* iChild1; // 1st iChild in tree
+ slab* iChild2; // 2nd iChild in tree
+};
+
+const TInt KMaxSlabPayload = SLABSIZE - sizeof(slabhdr);
+#define MAXUSEDM4BITS 0x0fc00000
+#define FLOATING_BIT 0x80000000
+
+inline unsigned HeaderFloating(unsigned h)
+{return (h&0x80000000);}
+const unsigned maxuse = (SLABSIZE - sizeof(slabhdr))>>2;
+const unsigned firstpos = sizeof(slabhdr)>>2;
+
+#ifdef _DEBUG
+#define CHECKTREE(x) DoCheckSlabTree(x,EFalse)
+#define CHECKSLAB(s,t,p) DoCheckSlab(s,t,p)
+#define CHECKSLABBFR(s,p) {TUint32 b[4]; BuildPartialSlabBitmap(b,s,p);}
+#else
+#define CHECKTREE(x) (void)0
+#define CHECKSLAB(s,t,p) (void)0
+#define CHECKSLABBFR(s,p) (void)0
+#endif
+
+class slabset
+{
+ public:
+ slab* iPartial;
+};
+
+class slab : public slabhdr
+{
+ public:
+ void Init(unsigned clz);
+ //static slab* SlabFor( void* p);
+ static slab* SlabFor(const void* p) ;
+ unsigned char iPayload[SLABSIZE-sizeof(slabhdr)];
+};
+
+class page
+{
+ public:
+ inline static page* PageFor(slab* s);
+ //slab iSlabs;
+ slab iSlabs[slabsperpage];
+};
+
+
+inline page* page::PageFor(slab* s)
+{
+ return reinterpret_cast<page*>((unsigned(s))&~(PAGESIZE-1));
+}
+
+
+#endif // __KERNEL_MODE__
diff --git a/src/corelib/global/qglobal.h b/src/corelib/global/qglobal.h
index 6ef15d4..35607d5 100644
--- a/src/corelib/global/qglobal.h
+++ b/src/corelib/global/qglobal.h
@@ -2451,7 +2451,6 @@ QT3_SUPPORT Q_CORE_EXPORT const char *qInstallPathSysconf();
#endif
#endif
-
//Symbian does not support data imports from a DLL
#define Q_NO_DATA_RELOCATION
diff --git a/src/declarative/debugger/qdeclarativedebug.cpp b/src/declarative/debugger/qdeclarativedebug.cpp
index 1ffe441..7a5e5f6 100644
--- a/src/declarative/debugger/qdeclarativedebug.cpp
+++ b/src/declarative/debugger/qdeclarativedebug.cpp
@@ -55,10 +55,12 @@ public:
QDeclarativeEngineDebugClient(QDeclarativeDebugConnection *client, QDeclarativeEngineDebugPrivate *p);
protected:
+ virtual void statusChanged(Status status);
virtual void messageReceived(const QByteArray &);
private:
QDeclarativeEngineDebugPrivate *priv;
+ friend class QDeclarativeEngineDebugPrivate;
};
class QDeclarativeEngineDebugPrivate : public QObjectPrivate
@@ -66,7 +68,9 @@ class QDeclarativeEngineDebugPrivate : public QObjectPrivate
Q_DECLARE_PUBLIC(QDeclarativeEngineDebug)
public:
QDeclarativeEngineDebugPrivate(QDeclarativeDebugConnection *);
+ ~QDeclarativeEngineDebugPrivate();
+ void statusChanged(QDeclarativeEngineDebug::Status status);
void message(const QByteArray &);
QDeclarativeEngineDebugClient *client;
@@ -93,12 +97,18 @@ QDeclarativeEngineDebugClient::QDeclarativeEngineDebugClient(QDeclarativeDebugCo
QDeclarativeEngineDebugPrivate *p)
: QDeclarativeDebugClient(QLatin1String("QDeclarativeEngine"), client), priv(p)
{
- setEnabled(true);
+}
+
+void QDeclarativeEngineDebugClient::statusChanged(Status status)
+{
+ if (priv)
+ priv->statusChanged(static_cast<QDeclarativeEngineDebug::Status>(status));
}
void QDeclarativeEngineDebugClient::messageReceived(const QByteArray &data)
{
- priv->message(data);
+ if (priv)
+ priv->message(data);
}
QDeclarativeEngineDebugPrivate::QDeclarativeEngineDebugPrivate(QDeclarativeDebugConnection *c)
@@ -106,6 +116,12 @@ QDeclarativeEngineDebugPrivate::QDeclarativeEngineDebugPrivate(QDeclarativeDebug
{
}
+QDeclarativeEngineDebugPrivate::~QDeclarativeEngineDebugPrivate()
+{
+ if (client)
+ client->priv = 0;
+}
+
int QDeclarativeEngineDebugPrivate::getId()
{
return nextId++;
@@ -228,6 +244,11 @@ void QDeclarativeEngineDebugPrivate::decode(QDataStream &ds, QDeclarativeDebugCo
}
}
+void QDeclarativeEngineDebugPrivate::statusChanged(QDeclarativeEngineDebug::Status status)
+{
+ emit q_func()->statusChanged(status);
+}
+
void QDeclarativeEngineDebugPrivate::message(const QByteArray &data)
{
QDataStream ds(data);
@@ -350,12 +371,19 @@ QDeclarativeEngineDebug::QDeclarativeEngineDebug(QDeclarativeDebugConnection *cl
{
}
+QDeclarativeEngineDebug::Status QDeclarativeEngineDebug::status() const
+{
+ Q_D(const QDeclarativeEngineDebug);
+
+ return static_cast<QDeclarativeEngineDebug::Status>(d->client->status());
+}
+
QDeclarativeDebugPropertyWatch *QDeclarativeEngineDebug::addWatch(const QDeclarativeDebugPropertyReference &property, QObject *parent)
{
Q_D(QDeclarativeEngineDebug);
QDeclarativeDebugPropertyWatch *watch = new QDeclarativeDebugPropertyWatch(parent);
- if (d->client->isConnected()) {
+ if (d->client->status() == QDeclarativeDebugClient::Enabled) {
int queryId = d->getId();
watch->m_queryId = queryId;
watch->m_client = this;
@@ -384,7 +412,7 @@ QDeclarativeDebugObjectExpressionWatch *QDeclarativeEngineDebug::addWatch(const
{
Q_D(QDeclarativeEngineDebug);
QDeclarativeDebugObjectExpressionWatch *watch = new QDeclarativeDebugObjectExpressionWatch(parent);
- if (d->client->isConnected()) {
+ if (d->client->status() == QDeclarativeDebugClient::Enabled) {
int queryId = d->getId();
watch->m_queryId = queryId;
watch->m_client = this;
@@ -407,7 +435,7 @@ QDeclarativeDebugWatch *QDeclarativeEngineDebug::addWatch(const QDeclarativeDebu
Q_D(QDeclarativeEngineDebug);
QDeclarativeDebugWatch *watch = new QDeclarativeDebugWatch(parent);
- if (d->client->isConnected()) {
+ if (d->client->status() == QDeclarativeDebugClient::Enabled) {
int queryId = d->getId();
watch->m_queryId = queryId;
watch->m_client = this;
@@ -443,7 +471,7 @@ void QDeclarativeEngineDebug::removeWatch(QDeclarativeDebugWatch *watch)
d->watched.remove(watch->queryId());
- if (d->client && d->client->isConnected()) {
+ if (d->client && d->client->status() == QDeclarativeDebugClient::Enabled) {
QByteArray message;
QDataStream ds(&message, QIODevice::WriteOnly);
ds << QByteArray("NO_WATCH") << watch->queryId();
@@ -456,7 +484,7 @@ QDeclarativeDebugEnginesQuery *QDeclarativeEngineDebug::queryAvailableEngines(QO
Q_D(QDeclarativeEngineDebug);
QDeclarativeDebugEnginesQuery *query = new QDeclarativeDebugEnginesQuery(parent);
- if (d->client->isConnected()) {
+ if (d->client->status() == QDeclarativeDebugClient::Enabled) {
query->m_client = this;
int queryId = d->getId();
query->m_queryId = queryId;
@@ -478,7 +506,7 @@ QDeclarativeDebugRootContextQuery *QDeclarativeEngineDebug::queryRootContexts(co
Q_D(QDeclarativeEngineDebug);
QDeclarativeDebugRootContextQuery *query = new QDeclarativeDebugRootContextQuery(parent);
- if (d->client->isConnected() && engine.debugId() != -1) {
+ if (d->client->status() == QDeclarativeDebugClient::Enabled && engine.debugId() != -1) {
query->m_client = this;
int queryId = d->getId();
query->m_queryId = queryId;
@@ -500,7 +528,7 @@ QDeclarativeDebugObjectQuery *QDeclarativeEngineDebug::queryObject(const QDeclar
Q_D(QDeclarativeEngineDebug);
QDeclarativeDebugObjectQuery *query = new QDeclarativeDebugObjectQuery(parent);
- if (d->client->isConnected() && object.debugId() != -1) {
+ if (d->client->status() == QDeclarativeDebugClient::Enabled && object.debugId() != -1) {
query->m_client = this;
int queryId = d->getId();
query->m_queryId = queryId;
@@ -523,7 +551,7 @@ QDeclarativeDebugObjectQuery *QDeclarativeEngineDebug::queryObjectRecursive(cons
Q_D(QDeclarativeEngineDebug);
QDeclarativeDebugObjectQuery *query = new QDeclarativeDebugObjectQuery(parent);
- if (d->client->isConnected() && object.debugId() != -1) {
+ if (d->client->status() == QDeclarativeDebugClient::Enabled && object.debugId() != -1) {
query->m_client = this;
int queryId = d->getId();
query->m_queryId = queryId;
@@ -546,7 +574,7 @@ QDeclarativeDebugExpressionQuery *QDeclarativeEngineDebug::queryExpressionResult
Q_D(QDeclarativeEngineDebug);
QDeclarativeDebugExpressionQuery *query = new QDeclarativeDebugExpressionQuery(parent);
- if (d->client->isConnected() && objectDebugId != -1) {
+ if (d->client->status() == QDeclarativeDebugClient::Enabled && objectDebugId != -1) {
query->m_client = this;
query->m_expr = expr;
int queryId = d->getId();
@@ -570,7 +598,7 @@ bool QDeclarativeEngineDebug::setBindingForObject(int objectDebugId, const QStri
{
Q_D(QDeclarativeEngineDebug);
- if (d->client->isConnected() && objectDebugId != -1) {
+ if (d->client->status() == QDeclarativeDebugClient::Enabled && objectDebugId != -1) {
QByteArray message;
QDataStream ds(&message, QIODevice::WriteOnly);
ds << QByteArray("SET_BINDING") << objectDebugId << propertyName << bindingExpression << isLiteralValue;
@@ -585,7 +613,7 @@ bool QDeclarativeEngineDebug::resetBindingForObject(int objectDebugId, const QSt
{
Q_D(QDeclarativeEngineDebug);
- if (d->client->isConnected() && objectDebugId != -1) {
+ if (d->client->status() == QDeclarativeDebugClient::Enabled && objectDebugId != -1) {
QByteArray message;
QDataStream ds(&message, QIODevice::WriteOnly);
ds << QByteArray("RESET_BINDING") << objectDebugId << propertyName;
@@ -601,7 +629,7 @@ bool QDeclarativeEngineDebug::setMethodBody(int objectDebugId, const QString &me
{
Q_D(QDeclarativeEngineDebug);
- if (d->client->isConnected() && objectDebugId != -1) {
+ if (d->client->status() == QDeclarativeDebugClient::Enabled && objectDebugId != -1) {
QByteArray message;
QDataStream ds(&message, QIODevice::WriteOnly);
ds << QByteArray("SET_METHOD_BODY") << objectDebugId << methodName << methodBody;
diff --git a/src/declarative/debugger/qdeclarativedebug_p.h b/src/declarative/debugger/qdeclarativedebug_p.h
index 2b1a115..3d83e8a 100644
--- a/src/declarative/debugger/qdeclarativedebug_p.h
+++ b/src/declarative/debugger/qdeclarativedebug_p.h
@@ -69,7 +69,11 @@ class Q_DECLARATIVE_EXPORT QDeclarativeEngineDebug : public QObject
{
Q_OBJECT
public:
- QDeclarativeEngineDebug(QDeclarativeDebugConnection *, QObject * = 0);
+ enum Status { NotConnected, Unavailable, Enabled };
+
+ explicit QDeclarativeEngineDebug(QDeclarativeDebugConnection *, QObject * = 0);
+
+ Status status() const;
QDeclarativeDebugPropertyWatch *addWatch(const QDeclarativeDebugPropertyReference &,
QObject *parent = 0);
@@ -101,6 +105,7 @@ public:
Q_SIGNALS:
void newObjects();
+ void statusChanged(Status status);
private:
Q_DECLARE_PRIVATE(QDeclarativeEngineDebug)
diff --git a/src/declarative/debugger/qdeclarativedebugclient.cpp b/src/declarative/debugger/qdeclarativedebugclient.cpp
index 2e52b40..977e58e 100644
--- a/src/declarative/debugger/qdeclarativedebugclient.cpp
+++ b/src/declarative/debugger/qdeclarativedebugclient.cpp
@@ -50,6 +50,20 @@
QT_BEGIN_NAMESPACE
+const int protocolVersion = 1;
+const QString serverId = QLatin1String("QDeclarativeDebugServer");
+const QString clientId = QLatin1String("QDeclarativeDebugClient");
+
+class QDeclarativeDebugClientPrivate : public QObjectPrivate
+{
+ Q_DECLARE_PUBLIC(QDeclarativeDebugClient)
+public:
+ QDeclarativeDebugClientPrivate();
+
+ QString name;
+ QDeclarativeDebugConnection *client;
+};
+
class QDeclarativeDebugConnectionPrivate : public QObject
{
Q_OBJECT
@@ -58,40 +72,124 @@ public:
QDeclarativeDebugConnection *q;
QPacketProtocol *protocol;
- QStringList enabled;
+ bool gotHello;
+ QStringList serverPlugins;
QHash<QString, QDeclarativeDebugClient *> plugins;
+
+ void advertisePlugins();
+
public Q_SLOTS:
void connected();
void readyRead();
};
QDeclarativeDebugConnectionPrivate::QDeclarativeDebugConnectionPrivate(QDeclarativeDebugConnection *c)
-: QObject(c), q(c), protocol(0)
+: QObject(c), q(c), protocol(0), gotHello(false)
{
protocol = new QPacketProtocol(q, this);
QObject::connect(c, SIGNAL(connected()), this, SLOT(connected()));
QObject::connect(protocol, SIGNAL(readyRead()), this, SLOT(readyRead()));
}
+void QDeclarativeDebugConnectionPrivate::advertisePlugins()
+{
+ if (!q->isConnected() || !gotHello)
+ return;
+
+ QPacket pack;
+ pack << serverId << 1 << plugins.keys();
+ protocol->send(pack);
+ q->flush();
+}
+
void QDeclarativeDebugConnectionPrivate::connected()
{
QPacket pack;
- pack << QString(QLatin1String("QDeclarativeDebugServer")) << enabled;
+ pack << serverId << 0 << protocolVersion << plugins.keys();
protocol->send(pack);
+ q->flush();
}
void QDeclarativeDebugConnectionPrivate::readyRead()
{
- QPacket pack = protocol->read();
- QString name; QByteArray message;
- pack >> name >> message;
+ if (!gotHello) {
+ QPacket pack = protocol->read();
+ QString name;
+
+ pack >> name;
+
+ bool validHello = false;
+ if (name == clientId) {
+ int op = -1;
+ pack >> op;
+ if (op == 0) {
+ int version = -1;
+ pack >> version;
+ if (version == protocolVersion) {
+ pack >> serverPlugins;
+ validHello = true;
+ }
+ }
+ }
- QHash<QString, QDeclarativeDebugClient *>::Iterator iter =
- plugins.find(name);
- if (iter == plugins.end()) {
- qWarning() << "QDeclarativeDebugConnection: Message received for missing plugin" << name;
- } else {
- (*iter)->messageReceived(message);
+ if (!validHello) {
+ qWarning("QDeclarativeDebugConnection: Invalid hello message");
+ QObject::disconnect(protocol, SIGNAL(readyRead()), this, SLOT(readyRead()));
+ return;
+ }
+
+ gotHello = true;
+
+ QHash<QString, QDeclarativeDebugClient *>::Iterator iter = plugins.begin();
+ for (; iter != plugins.end(); ++iter) {
+ QDeclarativeDebugClient::Status newStatus = QDeclarativeDebugClient::Unavailable;
+ if (serverPlugins.contains(iter.key()))
+ newStatus = QDeclarativeDebugClient::Enabled;
+ iter.value()->statusChanged(newStatus);
+ }
+ }
+
+ while (protocol->packetsAvailable()) {
+ QPacket pack = protocol->read();
+ QString name;
+ pack >> name;
+
+ if (name == clientId) {
+ int op = -1;
+ pack >> op;
+
+ if (op == 1) {
+ // Service Discovery
+ QStringList oldServerPlugins = serverPlugins;
+ pack >> serverPlugins;
+
+ QHash<QString, QDeclarativeDebugClient *>::Iterator iter = plugins.begin();
+ for (; iter != plugins.end(); ++iter) {
+ const QString pluginName = iter.key();
+ QDeclarativeDebugClient::Status newStatus = QDeclarativeDebugClient::Unavailable;
+ if (serverPlugins.contains(pluginName))
+ newStatus = QDeclarativeDebugClient::Enabled;
+
+ if (oldServerPlugins.contains(pluginName)
+ != serverPlugins.contains(pluginName)) {
+ iter.value()->statusChanged(newStatus);
+ }
+ }
+ } else {
+ qWarning() << "QDeclarativeDebugConnection: Unknown control message id" << op;
+ }
+ } else {
+ QByteArray message;
+ pack >> message;
+
+ QHash<QString, QDeclarativeDebugClient *>::Iterator iter =
+ plugins.find(name);
+ if (iter == plugins.end()) {
+ qWarning() << "QDeclarativeDebugConnection: Message received for missing plugin" << name;
+ } else {
+ (*iter)->messageReceived(message);
+ }
+ }
}
}
@@ -100,24 +198,22 @@ QDeclarativeDebugConnection::QDeclarativeDebugConnection(QObject *parent)
{
}
-bool QDeclarativeDebugConnection::isConnected() const
+QDeclarativeDebugConnection::~QDeclarativeDebugConnection()
{
- return state() == ConnectedState;
+ QHash<QString, QDeclarativeDebugClient*>::iterator iter = d->plugins.begin();
+ for (; iter != d->plugins.end(); ++iter) {
+ iter.value()->d_func()->client = 0;
+ iter.value()->statusChanged(QDeclarativeDebugClient::NotConnected);
+ }
}
-class QDeclarativeDebugClientPrivate : public QObjectPrivate
+bool QDeclarativeDebugConnection::isConnected() const
{
- Q_DECLARE_PUBLIC(QDeclarativeDebugClient)
-public:
- QDeclarativeDebugClientPrivate();
-
- QString name;
- QDeclarativeDebugConnection *client;
- bool enabled;
-};
+ return state() == ConnectedState;
+}
QDeclarativeDebugClientPrivate::QDeclarativeDebugClientPrivate()
-: client(0), enabled(false)
+: client(0)
{
}
@@ -137,65 +233,54 @@ QDeclarativeDebugClient::QDeclarativeDebugClient(const QString &name,
d->client = 0;
} else {
d->client->d->plugins.insert(name, this);
+ d->client->d->advertisePlugins();
}
}
-QString QDeclarativeDebugClient::name() const
+QDeclarativeDebugClient::~QDeclarativeDebugClient()
{
Q_D(const QDeclarativeDebugClient);
- return d->name;
+ if (d->client && d->client->d) {
+ d->client->d->plugins.remove(d->name);
+ d->client->d->advertisePlugins();
+ }
}
-bool QDeclarativeDebugClient::isEnabled() const
+QString QDeclarativeDebugClient::name() const
{
Q_D(const QDeclarativeDebugClient);
- return d->enabled;
-}
-
-void QDeclarativeDebugClient::setEnabled(bool e)
-{
- Q_D(QDeclarativeDebugClient);
- if (e == d->enabled)
- return;
-
- d->enabled = e;
-
- if (d->client) {
- if (e)
- d->client->d->enabled.append(d->name);
- else
- d->client->d->enabled.removeAll(d->name);
-
- if (d->client->state() == QTcpSocket::ConnectedState) {
- QPacket pack;
- pack << QString(QLatin1String("QDeclarativeDebugServer"));
- if (e) pack << (int)1;
- else pack << (int)2;
- pack << d->name;
- d->client->d->protocol->send(pack);
- }
- }
+ return d->name;
}
-bool QDeclarativeDebugClient::isConnected() const
+QDeclarativeDebugClient::Status QDeclarativeDebugClient::status() const
{
Q_D(const QDeclarativeDebugClient);
+ if (!d->client
+ || !d->client->isConnected()
+ || !d->client->d->gotHello)
+ return NotConnected;
- if (!d->client)
- return false;
- return d->client->isConnected();
+ if (d->client->d->serverPlugins.contains(d->name))
+ return Enabled;
+
+ return Unavailable;
}
void QDeclarativeDebugClient::sendMessage(const QByteArray &message)
{
Q_D(QDeclarativeDebugClient);
- if (!d->client || !d->client->isConnected())
+ if (status() != Enabled)
return;
QPacket pack;
pack << d->name << message;
d->client->d->protocol->send(pack);
+ d->client->d->q->flush();
+}
+
+void QDeclarativeDebugClient::statusChanged(Status)
+{
}
void QDeclarativeDebugClient::messageReceived(const QByteArray &)
diff --git a/src/declarative/debugger/qdeclarativedebugclient_p.h b/src/declarative/debugger/qdeclarativedebugclient_p.h
index 4144a66..8d1706d 100644
--- a/src/declarative/debugger/qdeclarativedebugclient_p.h
+++ b/src/declarative/debugger/qdeclarativedebugclient_p.h
@@ -57,6 +57,7 @@ class Q_DECLARATIVE_EXPORT QDeclarativeDebugConnection : public QTcpSocket
Q_DISABLE_COPY(QDeclarativeDebugConnection)
public:
QDeclarativeDebugConnection(QObject * = 0);
+ ~QDeclarativeDebugConnection();
bool isConnected() const;
private:
@@ -73,18 +74,19 @@ class Q_DECLARATIVE_EXPORT QDeclarativeDebugClient : public QObject
Q_DISABLE_COPY(QDeclarativeDebugClient)
public:
+ enum Status { NotConnected, Unavailable, Enabled };
+
QDeclarativeDebugClient(const QString &, QDeclarativeDebugConnection *parent);
+ ~QDeclarativeDebugClient();
QString name() const;
- bool isEnabled() const;
- void setEnabled(bool);
-
- bool isConnected() const;
+ Status status() const;
void sendMessage(const QByteArray &);
protected:
+ virtual void statusChanged(Status);
virtual void messageReceived(const QByteArray &);
private:
diff --git a/src/declarative/debugger/qdeclarativedebugservice.cpp b/src/declarative/debugger/qdeclarativedebugservice.cpp
index 1f2bf4f..d2ef00d 100644
--- a/src/declarative/debugger/qdeclarativedebugservice.cpp
+++ b/src/declarative/debugger/qdeclarativedebugservice.cpp
@@ -54,6 +54,30 @@
QT_BEGIN_NAMESPACE
+/*
+ QDeclarativeDebug Protocol (Version 1):
+
+ handshake:
+ 1. Client sends
+ "QDeclarativeDebugServer" 0 version pluginNames
+ version: an int representing the highest protocol version the client knows
+ pluginNames: plugins available on client side
+ 2. Server sends
+ "QDeclarativeDebugClient" 0 version pluginNames
+ version: an int representing the highest protocol version the client & server know
+ pluginNames: plugins available on server side. plugins both in the client and server message are enabled.
+ client plugin advertisement
+ 1. Client sends
+ "QDeclarativeDebugServer" 1 pluginNames
+ server plugin advertisement
+ 1. Server sends
+ "QDeclarativeDebugClient" 1 pluginNames
+ plugin communication:
+ Everything send with a header different to "QDeclarativeDebugServer" is sent to the appropriate plugin.
+ */
+
+const int protocolVersion = 1;
+
class QDeclarativeDebugServerPrivate;
class QDeclarativeDebugServer : public QObject
{
@@ -82,11 +106,13 @@ class QDeclarativeDebugServerPrivate : public QObjectPrivate
public:
QDeclarativeDebugServerPrivate();
+ void advertisePlugins();
+
int port;
QTcpSocket *connection;
QPacketProtocol *protocol;
QHash<QString, QDeclarativeDebugService *> plugins;
- QStringList enabledPlugins;
+ QStringList clientPlugins;
QTcpServer *tcpServer;
bool gotHello;
};
@@ -106,6 +132,19 @@ QDeclarativeDebugServerPrivate::QDeclarativeDebugServerPrivate()
{
}
+void QDeclarativeDebugServerPrivate::advertisePlugins()
+{
+ if (!connection
+ || connection->state() != QTcpSocket::ConnectedState
+ || !gotHello)
+ return;
+
+ QPacket pack;
+ pack << QString(QLatin1String("QDeclarativeDebugClient")) << 1 << plugins.keys();
+ protocol->send(pack);
+ connection->flush();
+}
+
void QDeclarativeDebugServer::listen()
{
Q_D(QDeclarativeDebugServer);
@@ -144,7 +183,9 @@ void QDeclarativeDebugServer::newConnection()
bool QDeclarativeDebugServer::hasDebuggingClient() const
{
Q_D(const QDeclarativeDebugServer);
- return d->gotHello;
+ return d->connection
+ && (d->connection->state() == QTcpSocket::ConnectedState)
+ && d->gotHello;
}
QDeclarativeDebugServer *QDeclarativeDebugServer::instance()
@@ -202,9 +243,13 @@ void QDeclarativeDebugServer::readyRead()
if (!d->gotHello) {
QPacket hello = d->protocol->read();
- QString name;
- hello >> name >> d->enabledPlugins;
- if (name != QLatin1String("QDeclarativeDebugServer")) {
+
+ QString name;
+ int op;
+ hello >> name >> op;
+
+ if (name != QLatin1String("QDeclarativeDebugServer")
+ || op != 0) {
qWarning("QDeclarativeDebugServer: Invalid hello message");
QObject::disconnect(d->protocol, SIGNAL(readyRead()), this, SLOT(readyRead()));
d->protocol->deleteLater();
@@ -213,6 +258,23 @@ void QDeclarativeDebugServer::readyRead()
d->connection = 0;
return;
}
+
+ int version;
+ hello >> version >> d->clientPlugins;
+
+ QHash<QString, QDeclarativeDebugService*>::Iterator iter = d->plugins.begin();
+ for (; iter != d->plugins.end(); ++iter) {
+ QDeclarativeDebugService::Status newStatus = QDeclarativeDebugService::Unavailable;
+ if (d->clientPlugins.contains(iter.key()))
+ newStatus = QDeclarativeDebugService::Enabled;
+ iter.value()->statusChanged(newStatus);
+ }
+
+ QPacket helloAnswer;
+ helloAnswer << QString(QLatin1String("QDeclarativeDebugClient")) << 0 << protocolVersion << d->plugins.keys();
+ d->protocol->send(helloAnswer);
+ d->connection->flush();
+
d->gotHello = true;
qWarning("QDeclarativeDebugServer: Connection established");
}
@@ -226,32 +288,29 @@ void QDeclarativeDebugServer::readyRead()
pack >> name;
if (name == debugServer) {
- int op = -1; QString plugin;
- pack >> op >> plugin;
+ int op = -1;
+ pack >> op;
if (op == 1) {
- // Enable
- if (!d->enabledPlugins.contains(plugin)) {
- d->enabledPlugins.append(plugin);
- QHash<QString, QDeclarativeDebugService *>::Iterator iter =
- d->plugins.find(plugin);
- if (iter != d->plugins.end())
- (*iter)->enabledChanged(true);
- }
-
- } else if (op == 2) {
- // Disable
- if (d->enabledPlugins.contains(plugin)) {
- d->enabledPlugins.removeAll(plugin);
- QHash<QString, QDeclarativeDebugService *>::Iterator iter =
- d->plugins.find(plugin);
- if (iter != d->plugins.end())
- (*iter)->enabledChanged(false);
+ // Service Discovery
+ QStringList oldClientPlugins = d->clientPlugins;
+ pack >> d->clientPlugins;
+
+ QHash<QString, QDeclarativeDebugService*>::Iterator iter = d->plugins.begin();
+ for (; iter != d->plugins.end(); ++iter) {
+ const QString pluginName = iter.key();
+ QDeclarativeDebugService::Status newStatus = QDeclarativeDebugService::Unavailable;
+ if (d->clientPlugins.contains(pluginName))
+ newStatus = QDeclarativeDebugService::Enabled;
+
+ if (oldClientPlugins.contains(pluginName)
+ != d->clientPlugins.contains(pluginName)) {
+ iter.value()->statusChanged(newStatus);
+ }
}
} else {
qWarning("QDeclarativeDebugServer: Invalid control message %d", op);
}
-
} else {
QByteArray message;
pack >> message;
@@ -287,6 +346,16 @@ QDeclarativeDebugService::QDeclarativeDebugService(const QString &name, QObject
d->server = 0;
} else {
d->server->d_func()->plugins.insert(name, this);
+ d->server->d_func()->advertisePlugins();
+ }
+}
+
+QDeclarativeDebugService::~QDeclarativeDebugService()
+{
+ Q_D(const QDeclarativeDebugService);
+ if (d->server) {
+ d->server->d_func()->plugins.remove(d->name);
+ d->server->d_func()->advertisePlugins();
}
}
@@ -296,10 +365,16 @@ QString QDeclarativeDebugService::name() const
return d->name;
}
-bool QDeclarativeDebugService::isEnabled() const
+QDeclarativeDebugService::Status QDeclarativeDebugService::status() const
{
Q_D(const QDeclarativeDebugService);
- return (d->server && d->server->d_func()->enabledPlugins.contains(d->name));
+ if (!d->server
+ || !d->server->hasDebuggingClient())
+ return NotConnected;
+ if (d->server->d_func()->clientPlugins.contains(d->name))
+ return Enabled;
+
+ return Unavailable;
}
namespace {
@@ -413,7 +488,7 @@ void QDeclarativeDebugService::sendMessage(const QByteArray &message)
{
Q_D(QDeclarativeDebugService);
- if (!d->server || !d->server->d_func()->connection)
+ if (status() != Enabled)
return;
QPacket pack;
@@ -422,7 +497,7 @@ void QDeclarativeDebugService::sendMessage(const QByteArray &message)
d->server->d_func()->connection->flush();
}
-void QDeclarativeDebugService::enabledChanged(bool)
+void QDeclarativeDebugService::statusChanged(Status)
{
}
diff --git a/src/declarative/debugger/qdeclarativedebugservice_p.h b/src/declarative/debugger/qdeclarativedebugservice_p.h
index c461ddf..0cadbe5 100644
--- a/src/declarative/debugger/qdeclarativedebugservice_p.h
+++ b/src/declarative/debugger/qdeclarativedebugservice_p.h
@@ -56,12 +56,15 @@ class Q_DECLARATIVE_EXPORT QDeclarativeDebugService : public QObject
Q_OBJECT
Q_DECLARE_PRIVATE(QDeclarativeDebugService)
Q_DISABLE_COPY(QDeclarativeDebugService)
+
public:
- QDeclarativeDebugService(const QString &, QObject *parent = 0);
+ explicit QDeclarativeDebugService(const QString &, QObject *parent = 0);
+ ~QDeclarativeDebugService();
QString name() const;
- bool isEnabled() const;
+ enum Status { NotConnected, Unavailable, Enabled };
+ Status status() const;
void sendMessage(const QByteArray &);
@@ -74,7 +77,7 @@ public:
static bool hasDebuggingClient();
protected:
- virtual void enabledChanged(bool);
+ virtual void statusChanged(Status);
virtual void messageReceived(const QByteArray &);
private:
diff --git a/src/declarative/debugger/qdeclarativedebugtrace.cpp b/src/declarative/debugger/qdeclarativedebugtrace.cpp
index b2b0c8a..03e2d56 100644
--- a/src/declarative/debugger/qdeclarativedebugtrace.cpp
+++ b/src/declarative/debugger/qdeclarativedebugtrace.cpp
@@ -78,7 +78,7 @@ void QDeclarativeDebugTrace::endRange(RangeType t)
void QDeclarativeDebugTrace::addEventImpl(EventType event)
{
- if (!isEnabled())
+ if (status() != Enabled)
return;
QByteArray data;
@@ -89,7 +89,7 @@ void QDeclarativeDebugTrace::addEventImpl(EventType event)
void QDeclarativeDebugTrace::startRangeImpl(RangeType range)
{
- if (!isEnabled())
+ if (status() != Enabled)
return;
QByteArray data;
@@ -100,7 +100,7 @@ void QDeclarativeDebugTrace::startRangeImpl(RangeType range)
void QDeclarativeDebugTrace::rangeDataImpl(RangeType range, const QUrl &u)
{
- if (!isEnabled())
+ if (status() != Enabled)
return;
QByteArray data;
@@ -111,7 +111,7 @@ void QDeclarativeDebugTrace::rangeDataImpl(RangeType range, const QUrl &u)
void QDeclarativeDebugTrace::endRangeImpl(RangeType range)
{
- if (!isEnabled())
+ if (status() != Enabled)
return;
QByteArray data;
diff --git a/src/declarative/graphicsitems/qdeclarativeflickable.cpp b/src/declarative/graphicsitems/qdeclarativeflickable.cpp
index 84b0ccf..33c21b1 100644
--- a/src/declarative/graphicsitems/qdeclarativeflickable.cpp
+++ b/src/declarative/graphicsitems/qdeclarativeflickable.cpp
@@ -456,8 +456,8 @@ QDeclarativeFlickable::~QDeclarativeFlickable()
}
/*!
- \qmlproperty int Flickable::contentX
- \qmlproperty int Flickable::contentY
+ \qmlproperty real Flickable::contentX
+ \qmlproperty real Flickable::contentY
These properties hold the surface coordinate currently at the top-left
corner of the Flickable. For example, if you flick an image up 100 pixels,
@@ -1046,10 +1046,16 @@ void QDeclarativeFlickable::cancelFlick()
void QDeclarativeFlickablePrivate::data_append(QDeclarativeListProperty<QObject> *prop, QObject *o)
{
QGraphicsObject *i = qobject_cast<QGraphicsObject *>(o);
- if (i)
- i->setParentItem(static_cast<QDeclarativeFlickablePrivate*>(prop->data)->contentItem);
- else
+ if (i) {
+ QGraphicsItemPrivate *d = QGraphicsItemPrivate::get(i);
+ if (static_cast<QDeclarativeItemPrivate*>(d)->componentComplete) {
+ i->setParentItem(static_cast<QDeclarativeFlickablePrivate*>(prop->data)->contentItem);
+ } else {
+ d->setParentItemHelper(static_cast<QDeclarativeFlickablePrivate*>(prop->data)->contentItem, 0, 0);
+ }
+ } else {
o->setParent(prop->object);
+ }
}
static inline int children_count_helper(QGraphicsObject *object)
@@ -1071,8 +1077,16 @@ static inline void children_clear_helper(QGraphicsObject *object)
{
QGraphicsItemPrivate *d = QGraphicsItemPrivate::get(object);
int childCount = d->children.count();
- for (int index = 0 ;index < childCount; index++)
- QGraphicsItemPrivate::get(d->children.at(0))->setParentItemHelper(0, /*newParentVariant=*/0, /*thisPointerVariant=*/0);
+ if (static_cast<QDeclarativeItemPrivate*>(d)->componentComplete) {
+ for (int index = 0 ;index < childCount; index++) {
+ d->children.at(0)->setParentItem(0);
+ }
+ } else {
+ for (int index = 0 ;index < childCount; index++) {
+ QGraphicsItemPrivate::get(d->children.at(0))->setParentItemHelper(0, /*newParentVariant=*/0, /*thisPointerVariant=*/0);
+ }
+ }
+
}
int QDeclarativeFlickablePrivate::data_count(QDeclarativeListProperty<QObject> *prop)
@@ -1154,8 +1168,8 @@ void QDeclarativeFlickable::setBoundsBehavior(BoundsBehavior b)
}
/*!
- \qmlproperty int Flickable::contentWidth
- \qmlproperty int Flickable::contentHeight
+ \qmlproperty real Flickable::contentWidth
+ \qmlproperty real Flickable::contentHeight
The dimensions of the content (the surface controlled by Flickable). Typically this
should be set to the combined size of the items placed in the Flickable. Note this
diff --git a/src/declarative/graphicsitems/qdeclarativeitem.cpp b/src/declarative/graphicsitems/qdeclarativeitem.cpp
index e9da4f7..250a43b 100644
--- a/src/declarative/graphicsitems/qdeclarativeitem.cpp
+++ b/src/declarative/graphicsitems/qdeclarativeitem.cpp
@@ -101,7 +101,7 @@ QT_BEGIN_NAMESPACE
The following example moves the Y axis of the \l Rectangle elements while still allowing the \l Row element
to lay the items out as if they had not been transformed:
\qml
- import Qt 4.7
+ import QtQuick 1.0
Row {
Rectangle {
@@ -1614,7 +1614,13 @@ void QDeclarativeItemPrivate::data_append(QDeclarativeListProperty<QObject> *pro
while (mo && mo != &QGraphicsObject::staticMetaObject) mo = mo->d.superdata;
if (mo) {
- QGraphicsItemPrivate::get(static_cast<QGraphicsObject *>(o))->setParentItemHelper(that, 0, 0);
+ QGraphicsObject *graphicsObject = static_cast<QGraphicsObject *>(o);
+ QDeclarativeItemPrivate *contentItemPrivate = static_cast<QDeclarativeItemPrivate *>(QGraphicsItemPrivate::get(graphicsObject));
+ if (contentItemPrivate->componentComplete) {
+ graphicsObject->setParentItem(that);
+ } else {
+ contentItemPrivate->setParentItemHelper(that, /*newParentVariant=*/0, /*thisPointerVariant=*/0);
+ }
} else {
o->setParent(that);
}
@@ -1637,10 +1643,15 @@ static inline QObject *children_at_helper(QDeclarativeListProperty<QObject> *pro
static inline void children_clear_helper(QDeclarativeListProperty<QObject> *prop)
{
- QGraphicsItemPrivate *d = QGraphicsItemPrivate::get(static_cast<QGraphicsObject *>(prop->object));
+ QDeclarativeItemPrivate *d = static_cast<QDeclarativeItemPrivate*>(QGraphicsItemPrivate::get(static_cast<QGraphicsObject *>(prop->object)));
int childCount = d->children.count();
- for (int index = 0 ;index < childCount; index++)
- QGraphicsItemPrivate::get(d->children.at(0))->setParentItemHelper(0, /*newParentVariant=*/0, /*thisPointerVariant=*/0);
+ if (d->componentComplete) {
+ for (int index = 0 ;index < childCount; index++)
+ d->children.at(0)->setParentItem(0);
+ } else {
+ for (int index = 0 ;index < childCount; index++)
+ QGraphicsItemPrivate::get(d->children.at(0))->setParentItemHelper(0, /*newParentVariant=*/0, /*thisPointerVariant=*/0);
+ }
}
int QDeclarativeItemPrivate::data_count(QDeclarativeListProperty<QObject> *prop)
@@ -1705,7 +1716,7 @@ int QDeclarativeItemPrivate::transform_count(QDeclarativeListProperty<QGraphicsT
void QDeclarativeItemPrivate::transform_append(QDeclarativeListProperty<QGraphicsTransform> *list, QGraphicsTransform *item)
{
QGraphicsObject *object = qobject_cast<QGraphicsObject *>(list->object);
- if (object) // QGraphicsItem applies the list in the wrong order, so we prepend.
+ if (object && item) // QGraphicsItem applies the list in the wrong order, so we prepend.
QGraphicsItemPrivate::get(object)->prependGraphicsTransform(item);
}
@@ -1745,8 +1756,8 @@ void QDeclarativeItemPrivate::parentProperty(QObject *o, void *rv, QDeclarativeN
\qmlproperty list<Object> Item::data
\default
- The data property is allows you to freely mix visual children and resources
- of an item. If you assign a visual item to the data list it becomes
+ The data property allows you to freely mix visual children and resources
+ in an item. If you assign a visual item to the data list it becomes
a child and if you assign any other object type, it is added as a resource.
So you can write:
diff --git a/src/declarative/graphicsitems/qdeclarativeitemsmodule.cpp b/src/declarative/graphicsitems/qdeclarativeitemsmodule.cpp
index 783eff1..52703c2 100644
--- a/src/declarative/graphicsitems/qdeclarativeitemsmodule.cpp
+++ b/src/declarative/graphicsitems/qdeclarativeitemsmodule.cpp
@@ -97,7 +97,84 @@ void QDeclarativeItemModule::defineModule()
{
QDeclarativePrivate::RegisterAutoParent autoparent = { 0, &qgraphicsobject_autoParent };
QDeclarativePrivate::qmlregister(QDeclarativePrivate::AutoParentRegistration, &autoparent);
+#ifdef QT_NO_MOVIE
+ qmlRegisterTypeNotAvailable("QtQuick",1,0,"AnimatedImage",
+ qApp->translate("QDeclarativeAnimatedImage","Qt was built without support for QMovie"));
+#else
+ qmlRegisterType<QDeclarativeAnimatedImage>("QtQuick",1,0,"AnimatedImage");
+#endif
+ qmlRegisterType<QDeclarativeBorderImage>("QtQuick",1,0,"BorderImage");
+ qmlRegisterType<QDeclarativeColumn>("QtQuick",1,0,"Column");
+ qmlRegisterType<QDeclarativeDrag>("QtQuick",1,0,"Drag");
+ qmlRegisterType<QDeclarativeFlickable>("QtQuick",1,0,"Flickable");
+ qmlRegisterType<QDeclarativeFlipable>("QtQuick",1,0,"Flipable");
+ qmlRegisterType<QDeclarativeFlow>("QtQuick",1,0,"Flow");
+ qmlRegisterType<QDeclarativeFocusPanel>("QtQuick",1,0,"FocusPanel");
+ qmlRegisterType<QDeclarativeFocusScope>("QtQuick",1,0,"FocusScope");
+ qmlRegisterType<QDeclarativeGradient>("QtQuick",1,0,"Gradient");
+ qmlRegisterType<QDeclarativeGradientStop>("QtQuick",1,0,"GradientStop");
+ qmlRegisterType<QDeclarativeGrid>("QtQuick",1,0,"Grid");
+ qmlRegisterType<QDeclarativeGridView>("QtQuick",1,0,"GridView");
+ qmlRegisterType<QDeclarativeImage>("QtQuick",1,0,"Image");
+ qmlRegisterType<QDeclarativeItem>("QtQuick",1,0,"Item");
+ qmlRegisterType<QDeclarativeLayoutItem>("QtQuick",1,0,"LayoutItem");
+ qmlRegisterType<QDeclarativeListView>("QtQuick",1,0,"ListView");
+ qmlRegisterType<QDeclarativeLoader>("QtQuick",1,0,"Loader");
+ qmlRegisterType<QDeclarativeMouseArea>("QtQuick",1,0,"MouseArea");
+ qmlRegisterType<QDeclarativePath>("QtQuick",1,0,"Path");
+ qmlRegisterType<QDeclarativePathAttribute>("QtQuick",1,0,"PathAttribute");
+ qmlRegisterType<QDeclarativePathCubic>("QtQuick",1,0,"PathCubic");
+ qmlRegisterType<QDeclarativePathLine>("QtQuick",1,0,"PathLine");
+ qmlRegisterType<QDeclarativePathPercent>("QtQuick",1,0,"PathPercent");
+ qmlRegisterType<QDeclarativePathQuad>("QtQuick",1,0,"PathQuad");
+ qmlRegisterType<QDeclarativePathView>("QtQuick",1,0,"PathView");
+#ifndef QT_NO_VALIDATOR
+ qmlRegisterType<QIntValidator>("QtQuick",1,0,"IntValidator");
+ qmlRegisterType<QDoubleValidator>("QtQuick",1,0,"DoubleValidator");
+ qmlRegisterType<QRegExpValidator>("QtQuick",1,0,"RegExpValidator");
+#endif
+ qmlRegisterType<QDeclarativeRectangle>("QtQuick",1,0,"Rectangle");
+ qmlRegisterType<QDeclarativeRepeater>("QtQuick",1,0,"Repeater");
+ qmlRegisterType<QGraphicsRotation>("QtQuick",1,0,"Rotation");
+ qmlRegisterType<QDeclarativeRow>("QtQuick",1,0,"Row");
+ qmlRegisterType<QDeclarativeTranslate>("QtQuick",1,0,"Translate");
+ qmlRegisterType<QGraphicsScale>("QtQuick",1,0,"Scale");
+ qmlRegisterType<QDeclarativeText>("QtQuick",1,0,"Text");
+ qmlRegisterType<QDeclarativeTextEdit>("QtQuick",1,0,"TextEdit");
+#ifndef QT_NO_LINEEDIT
+ qmlRegisterType<QDeclarativeTextInput>("QtQuick",1,0,"TextInput");
+#endif
+ qmlRegisterType<QDeclarativeViewSection>("QtQuick",1,0,"ViewSection");
+ qmlRegisterType<QDeclarativeVisualDataModel>("QtQuick",1,0,"VisualDataModel");
+ qmlRegisterType<QDeclarativeVisualItemModel>("QtQuick",1,0,"VisualItemModel");
+ qmlRegisterType<QDeclarativeAnchors>();
+ qmlRegisterType<QDeclarativeKeyEvent>();
+ qmlRegisterType<QDeclarativeMouseEvent>();
+ qmlRegisterType<QGraphicsObject>();
+ qmlRegisterType<QGraphicsWidget>("QtQuick",1,0,"QGraphicsWidget");
+ qmlRegisterExtendedType<QGraphicsWidget,QDeclarativeGraphicsWidget>("QtQuick",1,0,"QGraphicsWidget");
+ qmlRegisterType<QGraphicsTransform>();
+ qmlRegisterType<QDeclarativePathElement>();
+ qmlRegisterType<QDeclarativeCurve>();
+ qmlRegisterType<QDeclarativeScaleGrid>();
+#ifndef QT_NO_VALIDATOR
+ qmlRegisterType<QValidator>();
+#endif
+ qmlRegisterType<QDeclarativeVisualModel>();
+#ifndef QT_NO_ACTION
+ qmlRegisterType<QAction>();
+#endif
+ qmlRegisterType<QDeclarativePen>();
+ qmlRegisterType<QDeclarativeFlickableVisibleArea>();
+#ifndef QT_NO_GRAPHICSEFFECT
+ qmlRegisterType<QGraphicsEffect>();
+#endif
+
+ qmlRegisterUncreatableType<QDeclarativeKeyNavigationAttached>("QtQuick",1,0,"KeyNavigation",QDeclarativeKeyNavigationAttached::tr("KeyNavigation is only available via attached properties"));
+ qmlRegisterUncreatableType<QDeclarativeKeysAttached>("QtQuick",1,0,"Keys",QDeclarativeKeysAttached::tr("Keys is only available via attached properties"));
+
+#ifndef QT_NO_IMPORT_QT47_QML
#ifdef QT_NO_MOVIE
qmlRegisterTypeNotAvailable("Qt",4,7,"AnimatedImage",
qApp->translate("QDeclarativeAnimatedImage","Qt was built without support for QMovie"));
@@ -149,29 +226,10 @@ void QDeclarativeItemModule::defineModule()
qmlRegisterType<QDeclarativeVisualDataModel>("Qt",4,7,"VisualDataModel");
qmlRegisterType<QDeclarativeVisualItemModel>("Qt",4,7,"VisualItemModel");
- qmlRegisterType<QDeclarativeAnchors>();
- qmlRegisterType<QDeclarativeKeyEvent>();
- qmlRegisterType<QDeclarativeMouseEvent>();
- qmlRegisterType<QGraphicsObject>();
qmlRegisterType<QGraphicsWidget>("Qt",4,7,"QGraphicsWidget");
qmlRegisterExtendedType<QGraphicsWidget,QDeclarativeGraphicsWidget>("Qt",4,7,"QGraphicsWidget");
- qmlRegisterType<QGraphicsTransform>();
- qmlRegisterType<QDeclarativePathElement>();
- qmlRegisterType<QDeclarativeCurve>();
- qmlRegisterType<QDeclarativeScaleGrid>();
-#ifndef QT_NO_VALIDATOR
- qmlRegisterType<QValidator>();
-#endif
- qmlRegisterType<QDeclarativeVisualModel>();
-#ifndef QT_NO_ACTION
- qmlRegisterType<QAction>();
-#endif
- qmlRegisterType<QDeclarativePen>();
- qmlRegisterType<QDeclarativeFlickableVisibleArea>();
-#ifndef QT_NO_GRAPHICSEFFECT
- qmlRegisterType<QGraphicsEffect>();
-#endif
qmlRegisterUncreatableType<QDeclarativeKeyNavigationAttached>("Qt",4,7,"KeyNavigation",QDeclarativeKeyNavigationAttached::tr("KeyNavigation is only available via attached properties"));
qmlRegisterUncreatableType<QDeclarativeKeysAttached>("Qt",4,7,"Keys",QDeclarativeKeysAttached::tr("Keys is only available via attached properties"));
+#endif
}
diff --git a/src/declarative/graphicsitems/qdeclarativepath.cpp b/src/declarative/graphicsitems/qdeclarativepath.cpp
index f93357d..d526688 100644
--- a/src/declarative/graphicsitems/qdeclarativepath.cpp
+++ b/src/declarative/graphicsitems/qdeclarativepath.cpp
@@ -491,17 +491,17 @@ void QDeclarativeCurve::setY(qreal y)
\since 4.7
\brief The PathAttribute allows setting an attribute at a given position in a Path.
- The PathAttribute object allows attibutes consisting of a name and
- a value to be specified for the endpoints of path segments. The
+ The PathAttribute object allows attributes consisting of a name and
+ a value to be specified for various points along a path. The
attributes are exposed to the delegate as
\l{qdeclarativeintroduction.html#attached-properties} {Attached Properties}.
- The value of an attribute at any particular point is interpolated
- from the PathAttributes bounding the point.
+ The value of an attribute at any particular point along the path is interpolated
+ from the PathAttributes bounding that point.
The example below shows a path with the items scaled to 30% with
opacity 50% at the top of the path and scaled 100% with opacity
- 100% at the bottom. Note the use of the PathView.scale and
- PathView.opacity attached properties to set the scale and opacity
+ 100% at the bottom. Note the use of the PathView.iconScale and
+ PathView.iconOpacity attached properties to set the scale and opacity
of the delegate.
\table
@@ -509,14 +509,17 @@ void QDeclarativeCurve::setY(qreal y)
\o \image declarative-pathattribute.png
\o
\snippet doc/src/snippets/declarative/pathview/pathattributes.qml 0
+ (see the PathView documentation for the specification of ContactModel.qml
+ used for ContactModel above.)
\endtable
- \sa Path
+
+ \sa Path
*/
/*!
\qmlproperty string PathAttribute::name
- the name of the attribute to change.
+ This property holds the name of the attribute to change.
This attribute will be available to the delegate as PathView.<name>
@@ -544,8 +547,39 @@ void QDeclarativePathAttribute::setName(const QString &name)
}
/*!
- \qmlproperty string PathAttribute::value
- the new value of the attribute.
+ \qmlproperty real PathAttribute::value
+ This property holds the value for the attribute.
+
+ The value specified can be used to influence the visual appearance
+ of an item along the path. For example, the following Path specifies
+ an attribute named \e itemRotation, which has the value \e 0 at the
+ beginning of the path, and the value 90 at the end of the path.
+
+ \qml
+ Path {
+ startX: 0
+ startY: 0
+ PathAttribute { name: "itemRotation"; value: 0 }
+ PathLine { x: 100; y: 100 }
+ PathAttribute { name: "itemRotation"; value: 90 }
+ }
+ \endqml
+
+ In our delegate, we can then bind the \e rotation property to the
+ \l{qdeclarativeintroduction.html#attached-properties} {Attached Property}
+ \e PathView.itemRotation created for this attribute.
+
+ \qml
+ Rectangle {
+ width: 10; height: 10
+ rotation: PathView.itemRotation
+ }
+ \endqml
+
+ As each item is positioned along the path, it will be rotated accordingly:
+ an item at the beginning of the path with be not be rotated, an item at
+ the end of the path will be rotated 90 degrees, and an item mid-way along
+ the path will be rotated 45 degrees.
*/
/*!
@@ -792,6 +826,10 @@ void QDeclarativePathCubic::addToPath(QPainterPath &path)
\since 4.7
\brief The PathPercent manipulates the way a path is interpreted.
+ PathPercent allows you to manipulate the spacing between items on a
+ PathView's path. You can use it to bunch together items on part of
+ the path, and spread them out on other parts of the path.
+
The examples below show the normal distrubution of items along a path
compared to a distribution which places 50% of the items along the
PathLine section of the path.
@@ -800,25 +838,31 @@ void QDeclarativePathCubic::addToPath(QPainterPath &path)
\o \image declarative-nopercent.png
\o
\qml
- Path {
- startX: 20; startY: 0
- PathQuad { x: 50; y: 80; controlX: 0; controlY: 80 }
- PathLine { x: 150; y: 80 }
- PathQuad { x: 180; y: 0; controlX: 200; controlY: 80 }
+ PathView {
+ ...
+ Path {
+ startX: 20; startY: 0
+ PathQuad { x: 50; y: 80; controlX: 0; controlY: 80 }
+ PathLine { x: 150; y: 80 }
+ PathQuad { x: 180; y: 0; controlX: 200; controlY: 80 }
+ }
}
\endqml
\row
\o \image declarative-percent.png
\o
\qml
- Path {
- startX: 20; startY: 0
- PathQuad { x: 50; y: 80; controlX: 0; controlY: 80 }
- PathPercent { value: 0.25 }
- PathLine { x: 150; y: 80 }
- PathPercent { value: 0.75 }
- PathQuad { x: 180; y: 0; controlX: 200; controlY: 80 }
- PathPercent { value: 1 }
+ PathView {
+ ...
+ Path {
+ startX: 20; startY: 0
+ PathQuad { x: 50; y: 80; controlX: 0; controlY: 80 }
+ PathPercent { value: 0.25 }
+ PathLine { x: 150; y: 80 }
+ PathPercent { value: 0.75 }
+ PathQuad { x: 180; y: 0; controlX: 200; controlY: 80 }
+ PathPercent { value: 1 }
+ }
}
\endqml
\endtable
@@ -826,6 +870,36 @@ void QDeclarativePathCubic::addToPath(QPainterPath &path)
\sa Path
*/
+/*!
+ \qmlproperty real value
+ The proporation of items that should be laid out up to this point.
+
+ This value should always be higher than the last value specified
+ by a PathPercent at a previous position in the Path.
+
+ In the following example we have a Path made up of three PathLines.
+ Normally, the items of the PathView would be laid out equally along
+ this path, with an equal number of items per line segment. PathPercent
+ allows us to specify that the first and third lines should each hold
+ 10% of the laid out items, while the second line should hold the remaining
+ 80%.
+
+ \qml
+ PathView {
+ ...
+ Path {
+ startX: 0; startY: 0
+ PathLine { x:100; y: 0; }
+ PathPercent { value: 0.1 }
+ PathLine { x: 100; y: 100 }
+ PathPercent { value: 0.9 }
+ PathLine { x: 100; y: 0 }
+ PathPercent { value: 1 }
+ }
+ }
+ \endqml
+*/
+
qreal QDeclarativePathPercent::value() const
{
return _value;
diff --git a/src/declarative/graphicsitems/qdeclarativetextedit.cpp b/src/declarative/graphicsitems/qdeclarativetextedit.cpp
index 3ac095c..6f5608a 100644
--- a/src/declarative/graphicsitems/qdeclarativetextedit.cpp
+++ b/src/declarative/graphicsitems/qdeclarativetextedit.cpp
@@ -1422,7 +1422,7 @@ void QDeclarativeTextEditPrivate::updateDefaultTextOption()
Only relevant on platforms, which provide virtual keyboards.
\code
- import Qt 4.7
+ import QtQuick 1.0
TextEdit {
id: textEdit
text: "Hello world!"
@@ -1473,7 +1473,7 @@ void QDeclarativeTextEdit::openSoftwareInputPanel()
Only relevant on platforms, which provide virtual keyboards.
\code
- import Qt 4.7
+ import QtQuick 1.0
TextEdit {
id: textEdit
text: "Hello world!"
diff --git a/src/declarative/graphicsitems/qdeclarativetextinput.cpp b/src/declarative/graphicsitems/qdeclarativetextinput.cpp
index 5604b82..4817999 100644
--- a/src/declarative/graphicsitems/qdeclarativetextinput.cpp
+++ b/src/declarative/graphicsitems/qdeclarativetextinput.cpp
@@ -651,7 +651,7 @@ void QDeclarativeTextInput::setAutoScroll(bool b)
input of integers between 11 and 31 into the text input:
\code
- import Qt 4.7
+ import QtQuick 1.0
TextInput{
validator: IntValidator{bottom: 11; top: 31;}
focus: true
@@ -1334,7 +1334,7 @@ void QDeclarativeTextInput::moveCursorSelection(int position)
Only relevant on platforms, which provide virtual keyboards.
\qml
- import Qt 4.7
+ import QtQuick 1.0
TextInput {
id: textInput
text: "Hello world!"
@@ -1385,7 +1385,7 @@ void QDeclarativeTextInput::openSoftwareInputPanel()
Only relevant on platforms, which provide virtual keyboards.
\qml
- import Qt 4.7
+ import QtQuick 1.0
TextInput {
id: textInput
text: "Hello world!"
diff --git a/src/declarative/graphicsitems/qdeclarativevisualitemmodel.cpp b/src/declarative/graphicsitems/qdeclarativevisualitemmodel.cpp
index 21d1ea7..439f500 100644
--- a/src/declarative/graphicsitems/qdeclarativevisualitemmodel.cpp
+++ b/src/declarative/graphicsitems/qdeclarativevisualitemmodel.cpp
@@ -142,7 +142,7 @@ public:
The example below places three colored rectangles in a ListView.
\code
- import Qt 4.7
+ import QtQuick 1.0
Rectangle {
VisualItemModel {
diff --git a/src/declarative/qml/parser/qdeclarativejsast_p.h b/src/declarative/qml/parser/qdeclarativejsast_p.h
index 0623a2a..541ea7f 100644
--- a/src/declarative/qml/parser/qdeclarativejsast_p.h
+++ b/src/declarative/qml/parser/qdeclarativejsast_p.h
@@ -224,6 +224,9 @@ public:
inline Node()
: kind(Kind_Undefined) {}
+ // NOTE: node destructors are never called,
+ // instead we block free the memory
+ // (see the NodePool class)
virtual ~Node() {}
virtual ExpressionNode *expressionCast();
@@ -247,7 +250,6 @@ class QML_PARSER_EXPORT ExpressionNode: public Node
{
public:
ExpressionNode() {}
- virtual ~ExpressionNode() {}
virtual ExpressionNode *expressionCast();
@@ -259,7 +261,6 @@ class QML_PARSER_EXPORT Statement: public Node
{
public:
Statement() {}
- virtual ~Statement() {}
virtual Statement *statementCast();
@@ -379,7 +380,6 @@ public:
QDECLARATIVEJS_DECLARE_AST_NODE(ThisExpression)
ThisExpression() { kind = K; }
- virtual ~ThisExpression() {}
virtual void accept0(Visitor *visitor);
@@ -401,8 +401,6 @@ public:
IdentifierExpression(NameId *n):
name (n) { kind = K; }
- virtual ~IdentifierExpression() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -422,7 +420,6 @@ public:
QDECLARATIVEJS_DECLARE_AST_NODE(NullExpression)
NullExpression() { kind = K; }
- virtual ~NullExpression() {}
virtual void accept0(Visitor *visitor);
@@ -442,7 +439,6 @@ public:
QDECLARATIVEJS_DECLARE_AST_NODE(TrueLiteral)
TrueLiteral() { kind = K; }
- virtual ~TrueLiteral() {}
virtual void accept0(Visitor *visitor);
@@ -462,7 +458,6 @@ public:
QDECLARATIVEJS_DECLARE_AST_NODE(FalseLiteral)
FalseLiteral() { kind = K; }
- virtual ~FalseLiteral() {}
virtual void accept0(Visitor *visitor);
@@ -483,7 +478,6 @@ public:
NumericLiteral(double v):
value(v) { kind = K; }
- virtual ~NumericLiteral() {}
virtual void accept0(Visitor *visitor);
@@ -506,8 +500,6 @@ public:
StringLiteral(NameId *v):
value (v) { kind = K; }
- virtual ~StringLiteral() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -529,8 +521,6 @@ public:
RegExpLiteral(NameId *p, int f):
pattern (p), flags (f) { kind = K; }
- virtual ~RegExpLiteral() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -562,8 +552,6 @@ public:
elements (elts), elision (e)
{ kind = K; }
- virtual ~ArrayLiteral() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -591,8 +579,6 @@ public:
ObjectLiteral(PropertyNameAndValueList *plist):
properties (plist) { kind = K; }
- virtual ~ObjectLiteral() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -624,8 +610,6 @@ public:
previous->next = this;
}
- virtual ~ElementList() {}
-
inline ElementList *finish ()
{
ElementList *front = next;
@@ -657,8 +641,6 @@ public:
previous->next = this;
}
- virtual ~Elision() {}
-
virtual void accept0(Visitor *visitor);
inline Elision *finish ()
@@ -690,8 +672,6 @@ public:
previous->next = this;
}
- virtual ~PropertyNameAndValueList() {}
-
virtual void accept0(Visitor *visitor);
inline PropertyNameAndValueList *finish ()
@@ -715,7 +695,6 @@ public:
QDECLARATIVEJS_DECLARE_AST_NODE(PropertyName)
PropertyName() { kind = K; }
- virtual ~PropertyName() {}
// attributes
SourceLocation propertyNameToken;
@@ -729,8 +708,6 @@ public:
IdentifierPropertyName(NameId *n):
id (n) { kind = K; }
- virtual ~IdentifierPropertyName() {}
-
virtual void accept0(Visitor *visitor);
// attributes
@@ -744,7 +721,6 @@ public:
StringLiteralPropertyName(NameId *n):
id (n) { kind = K; }
- virtual ~StringLiteralPropertyName() {}
virtual void accept0(Visitor *visitor);
@@ -759,7 +735,6 @@ public:
NumericLiteralPropertyName(double n):
id (n) { kind = K; }
- virtual ~NumericLiteralPropertyName() {}
virtual void accept0(Visitor *visitor);
@@ -776,8 +751,6 @@ public:
base (b), expression (e)
{ kind = K; }
- virtual ~ArrayMemberExpression() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -802,8 +775,6 @@ public:
base (b), name (n)
{ kind = K; }
- virtual ~FieldMemberExpression() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -828,8 +799,6 @@ public:
base (b), arguments (a)
{ kind = K; }
- virtual ~NewMemberExpression() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -854,8 +823,6 @@ public:
NewExpression(ExpressionNode *e):
expression (e) { kind = K; }
- virtual ~NewExpression() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -878,8 +845,6 @@ public:
base (b), arguments (a)
{ kind = K; }
- virtual ~CallExpression() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -912,8 +877,6 @@ public:
previous->next = this;
}
- virtual ~ArgumentList() {}
-
virtual void accept0(Visitor *visitor);
inline ArgumentList *finish ()
@@ -937,8 +900,6 @@ public:
PostIncrementExpression(ExpressionNode *b):
base (b) { kind = K; }
- virtual ~PostIncrementExpression() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -960,8 +921,6 @@ public:
PostDecrementExpression(ExpressionNode *b):
base (b) { kind = K; }
- virtual ~PostDecrementExpression() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -982,7 +941,6 @@ public:
DeleteExpression(ExpressionNode *e):
expression (e) { kind = K; }
- virtual ~DeleteExpression() {}
virtual void accept0(Visitor *visitor);
@@ -1005,8 +963,6 @@ public:
VoidExpression(ExpressionNode *e):
expression (e) { kind = K; }
- virtual ~VoidExpression() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -1028,8 +984,6 @@ public:
TypeOfExpression(ExpressionNode *e):
expression (e) { kind = K; }
- virtual ~TypeOfExpression() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -1051,8 +1005,6 @@ public:
PreIncrementExpression(ExpressionNode *e):
expression (e) { kind = K; }
- virtual ~PreIncrementExpression() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -1074,8 +1026,6 @@ public:
PreDecrementExpression(ExpressionNode *e):
expression (e) { kind = K; }
- virtual ~PreDecrementExpression() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -1097,8 +1047,6 @@ public:
UnaryPlusExpression(ExpressionNode *e):
expression (e) { kind = K; }
- virtual ~UnaryPlusExpression() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -1120,8 +1068,6 @@ public:
UnaryMinusExpression(ExpressionNode *e):
expression (e) { kind = K; }
- virtual ~UnaryMinusExpression() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -1143,8 +1089,6 @@ public:
TildeExpression(ExpressionNode *e):
expression (e) { kind = K; }
- virtual ~TildeExpression() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -1166,8 +1110,6 @@ public:
NotExpression(ExpressionNode *e):
expression (e) { kind = K; }
- virtual ~NotExpression() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -1190,8 +1132,6 @@ public:
left (l), op (o), right (r)
{ kind = K; }
- virtual ~BinaryExpression() {}
-
virtual BinaryExpression *binaryExpressionCast();
virtual void accept0(Visitor *visitor);
@@ -1218,8 +1158,6 @@ public:
expression (e), ok (t), ko (f)
{ kind = K; }
- virtual ~ConditionalExpression() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -1244,8 +1182,6 @@ public:
Expression(ExpressionNode *l, ExpressionNode *r):
left (l), right (r) { kind = K; }
- virtual ~Expression() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -1268,8 +1204,6 @@ public:
Block(StatementList *slist):
statements (slist) { kind = K; }
- virtual ~Block() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -1301,8 +1235,6 @@ public:
previous->next = this;
}
- virtual ~StatementList() {}
-
virtual void accept0(Visitor *visitor);
inline StatementList *finish ()
@@ -1326,8 +1258,6 @@ public:
declarations (vlist)
{ kind = K; }
- virtual ~VariableStatement() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -1351,8 +1281,6 @@ public:
name (n), expression (e), readOnly(false)
{ kind = K; }
- virtual ~VariableDeclaration() {}
-
virtual void accept0(Visitor *visitor);
// attributes
@@ -1379,8 +1307,6 @@ public:
previous->next = this;
}
- virtual ~VariableDeclarationList() {}
-
virtual void accept0(Visitor *visitor);
inline VariableDeclarationList *finish (bool readOnly)
@@ -1407,7 +1333,6 @@ public:
QDECLARATIVEJS_DECLARE_AST_NODE(EmptyStatement)
EmptyStatement() { kind = K; }
- virtual ~EmptyStatement() {}
virtual void accept0(Visitor *visitor);
@@ -1429,8 +1354,6 @@ public:
ExpressionStatement(ExpressionNode *e):
expression (e) { kind = K; }
- virtual ~ExpressionStatement() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -1453,8 +1376,6 @@ public:
expression (e), ok (t), ko (f)
{ kind = K; }
- virtual ~IfStatement() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -1487,8 +1408,6 @@ public:
statement (stmt), expression (e)
{ kind = K; }
- virtual ~DoWhileStatement() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -1516,8 +1435,6 @@ public:
expression (e), statement (stmt)
{ kind = K; }
- virtual ~WhileStatement() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -1543,8 +1460,6 @@ public:
initialiser (i), condition (c), expression (e), statement (stmt)
{ kind = K; }
- virtual ~ForStatement() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -1574,8 +1489,6 @@ public:
declarations (vlist), condition (c), expression (e), statement (stmt)
{ kind = K; }
- virtual ~LocalForStatement() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -1606,8 +1519,6 @@ public:
initialiser (i), expression (e), statement (stmt)
{ kind = K; }
- virtual ~ForEachStatement() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -1635,8 +1546,6 @@ public:
declaration (v), expression (e), statement (stmt)
{ kind = K; }
- virtual ~LocalForEachStatement() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -1664,8 +1573,6 @@ public:
ContinueStatement(NameId *l = 0):
label (l) { kind = K; }
- virtual ~ContinueStatement() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -1689,8 +1596,6 @@ public:
BreakStatement(NameId *l = 0):
label (l) { kind = K; }
- virtual ~BreakStatement() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -1714,8 +1619,6 @@ public:
ReturnStatement(ExpressionNode *e):
expression (e) { kind = K; }
- virtual ~ReturnStatement() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -1739,8 +1642,6 @@ public:
expression (e), statement (stmt)
{ kind = K; }
- virtual ~WithStatement() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -1766,8 +1667,6 @@ public:
clauses (c), defaultClause (d), moreClauses (r)
{ kind = K; }
- virtual ~CaseBlock() {}
-
virtual void accept0(Visitor *visitor);
// attributes
@@ -1787,8 +1686,6 @@ public:
expression (e), block (b)
{ kind = K; }
- virtual ~SwitchStatement() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -1822,8 +1719,6 @@ public:
previous->next = this;
}
- virtual ~CaseClauses() {}
-
virtual void accept0(Visitor *visitor);
inline CaseClauses *finish ()
@@ -1847,8 +1742,6 @@ public:
expression (e), statements (slist)
{ kind = K; }
- virtual ~CaseClause() {}
-
virtual void accept0(Visitor *visitor);
// attributes
@@ -1867,8 +1760,6 @@ public:
statements (slist)
{ kind = K; }
- virtual ~DefaultClause() {}
-
virtual void accept0(Visitor *visitor);
// attributes
@@ -1886,8 +1777,6 @@ public:
label (l), statement (stmt)
{ kind = K; }
- virtual ~LabelledStatement() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -1911,8 +1800,6 @@ public:
ThrowStatement(ExpressionNode *e):
expression (e) { kind = K; }
- virtual ~ThrowStatement() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -1936,8 +1823,6 @@ public:
name (n), statement (stmt)
{ kind = K; }
- virtual ~Catch() {}
-
virtual void accept0(Visitor *visitor);
// attributes
@@ -1958,8 +1843,6 @@ public:
statement (stmt)
{ kind = K; }
- virtual ~Finally() {}
-
virtual void accept0(Visitor *visitor);
// attributes
@@ -1984,8 +1867,6 @@ public:
statement (stmt), catchExpression (c), finallyExpression (0)
{ kind = K; }
- virtual ~TryStatement() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -2017,8 +1898,6 @@ public:
name (n), formals (f), body (b)
{ kind = K; }
- virtual ~FunctionExpression() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -2048,8 +1927,6 @@ public:
FunctionExpression(n, f, b)
{ kind = K; }
- virtual ~FunctionDeclaration() {}
-
virtual void accept0(Visitor *visitor);
};
@@ -2070,8 +1947,6 @@ public:
previous->next = this;
}
- virtual ~FormalParameterList() {}
-
virtual void accept0(Visitor *visitor);
inline FormalParameterList *finish ()
@@ -2097,8 +1972,6 @@ public:
elements (elts)
{ kind = K; }
- virtual ~FunctionBody() {}
-
virtual void accept0(Visitor *visitor);
// attributes
@@ -2114,8 +1987,6 @@ public:
elements (elts)
{ kind = K; }
- virtual ~Program() {}
-
virtual void accept0(Visitor *visitor);
// attributes
@@ -2139,8 +2010,6 @@ public:
previous->next = this;
}
- virtual ~SourceElements() {}
-
virtual void accept0(Visitor *visitor);
inline SourceElements *finish ()
@@ -2162,8 +2031,6 @@ public:
inline SourceElement()
{ kind = K; }
-
- virtual ~SourceElement() {}
};
class QML_PARSER_EXPORT FunctionSourceElement: public SourceElement
@@ -2175,8 +2042,6 @@ public:
declaration (f)
{ kind = K; }
- virtual ~FunctionSourceElement() {}
-
virtual void accept0(Visitor *visitor);
// attributes
@@ -2192,8 +2057,6 @@ public:
statement (stmt)
{ kind = K; }
- virtual ~StatementSourceElement() {}
-
virtual void accept0(Visitor *visitor);
// attributes
@@ -2208,8 +2071,6 @@ public:
DebuggerStatement()
{ kind = K; }
- virtual ~DebuggerStatement() {}
-
virtual void accept0(Visitor *visitor);
virtual SourceLocation firstSourceLocation() const
@@ -2256,8 +2117,6 @@ public:
previous->next = this;
}
- virtual ~UiQualifiedId() {}
-
UiQualifiedId *finish()
{
UiQualifiedId *head = next;
@@ -2459,8 +2318,6 @@ public:
previous->next = this;
}
- virtual ~UiParameterList() {}
-
virtual void accept0(Visitor *) {}
inline UiParameterList *finish ()
diff --git a/src/declarative/qml/qdeclarativecompiledbindings.cpp b/src/declarative/qml/qdeclarativecompiledbindings.cpp
index 9402596..5f0fd56 100644
--- a/src/declarative/qml/qdeclarativecompiledbindings.cpp
+++ b/src/declarative/qml/qdeclarativecompiledbindings.cpp
@@ -438,7 +438,7 @@ struct Instr {
qint8 output;
qint8 reg;
quint8 exceptionId;
- quint32 index;
+ quint32 id;
} attached;
struct {
QML_INSTR_HEADER
@@ -988,7 +988,7 @@ static void dumpInstruction(const Instr *instr)
qWarning().nospace() << "\t" << "LoadRoot" << "\t\t" << instr->load.index << "\t" << instr->load.reg;
break;
case Instr::LoadAttached:
- qWarning().nospace() << "\t" << "LoadAttached" << "\t\t" << instr->attached.output << "\t" << instr->attached.reg << "\t" << instr->attached.index;
+ qWarning().nospace() << "\t" << "LoadAttached" << "\t\t" << instr->attached.output << "\t" << instr->attached.reg << "\t" << instr->attached.id;
break;
case Instr::ConvertIntToReal:
qWarning().nospace() << "\t" << "ConvertIntToReal" << "\t" << instr->unaryop.output << "\t" << instr->unaryop.src;
@@ -1225,7 +1225,7 @@ void QDeclarativeCompiledBindingsPrivate::run(int instrIndex,
output.setUndefined();
} else {
QObject *attached =
- qmlAttachedPropertiesObjectById(instr->attached.index,
+ qmlAttachedPropertiesObjectById(instr->attached.id,
registers[instr->attached.reg].getQObject(),
true);
Q_ASSERT(attached);
@@ -1895,7 +1895,7 @@ bool QDeclarativeBindingCompilerPrivate::parseName(AST::Node *node, Result &type
attach.common.type = Instr::LoadAttached;
attach.attached.output = reg;
attach.attached.reg = reg;
- attach.attached.index = attachType->index();
+ attach.attached.id = attachType->attachedPropertiesId();
attach.attached.exceptionId = exceptionId(nameNodes.at(ii));
bytecode << attach;
@@ -2011,7 +2011,7 @@ bool QDeclarativeBindingCompilerPrivate::parseName(AST::Node *node, Result &type
attach.common.type = Instr::LoadAttached;
attach.attached.output = reg;
attach.attached.reg = reg;
- attach.attached.index = attachType->index();
+ attach.attached.id = attachType->attachedPropertiesId();
bytecode << attach;
absType = 0;
diff --git a/src/declarative/qml/qdeclarativecompiler.cpp b/src/declarative/qml/qdeclarativecompiler.cpp
index e55dc92..90d38b3 100644
--- a/src/declarative/qml/qdeclarativecompiler.cpp
+++ b/src/declarative/qml/qdeclarativecompiler.cpp
@@ -1286,7 +1286,7 @@ bool QDeclarativeCompiler::buildSubObject(Object *obj, const BindingContext &ctx
int QDeclarativeCompiler::componentTypeRef()
{
- QDeclarativeType *t = QDeclarativeMetaType::qmlType("Qt/Component",4,7);
+ QDeclarativeType *t = QDeclarativeMetaType::qmlType("QtQuick/Component",1,0);
for (int ii = output->types.count() - 1; ii >= 0; --ii) {
if (output->types.at(ii).type == t)
return ii;
@@ -1407,7 +1407,7 @@ bool QDeclarativeCompiler::buildProperty(QDeclarativeParser::Property *prop,
COMPILE_EXCEPTION(prop, tr("Invalid attached object assignment"));
Q_ASSERT(type->attachedPropertiesFunction());
- prop->index = type->index();
+ prop->index = type->attachedPropertiesId();
prop->value->metatype = type->attachedPropertiesType();
} else {
// Setup regular property data
diff --git a/src/declarative/qml/qdeclarativecomponent.cpp b/src/declarative/qml/qdeclarativecomponent.cpp
index 7f58166..b532b0c 100644
--- a/src/declarative/qml/qdeclarativecomponent.cpp
+++ b/src/declarative/qml/qdeclarativecomponent.cpp
@@ -78,7 +78,7 @@ class QByteArray;
For example, if there is a \c main.qml file like this:
\qml
- import Qt 4.7
+ import QtQuick 1.0
Item {
width: 200
diff --git a/src/declarative/qml/qdeclarativecontext.cpp b/src/declarative/qml/qdeclarativecontext.cpp
index de45a95..59d5cfa 100644
--- a/src/declarative/qml/qdeclarativecontext.cpp
+++ b/src/declarative/qml/qdeclarativecontext.cpp
@@ -85,7 +85,7 @@ QDeclarativeContextPrivate::QDeclarativeContextPrivate()
context->setContextProperty("myModel", &modelData);
QDeclarativeComponent component(&engine);
- component.setData("import Qt 4.7\nListView { model: myModel }", QUrl());
+ component.setData("import QtQuick 1.0\nListView { model: myModel }", QUrl());
component.create(context);
\endcode
@@ -112,7 +112,7 @@ QDeclarativeContextPrivate::QDeclarativeContextPrivate()
context->setContextObject(&myDataSet);
QDeclarativeComponent component(&engine);
- component.setData("import Qt 4.7\nListView { model: myModel }", QUrl());
+ component.setData("import QtQuick 1.0\nListView { model: myModel }", QUrl());
component.create(context);
\endcode
diff --git a/src/declarative/qml/qdeclarativedom.cpp b/src/declarative/qml/qdeclarativedom.cpp
index 1a9b501..fa79425 100644
--- a/src/declarative/qml/qdeclarativedom.cpp
+++ b/src/declarative/qml/qdeclarativedom.cpp
@@ -896,7 +896,7 @@ QByteArray QDeclarativeDomObject::customTypeData() const
*/
bool QDeclarativeDomObject::isComponent() const
{
- return (d->object && d->object->typeName == "Qt/Component");
+ return (d->object && (d->object->typeName == "Qt/Component" || d->object->typeName == "QtQuick/Component"));
}
/*!
diff --git a/src/declarative/qml/qdeclarativeengine.cpp b/src/declarative/qml/qdeclarativeengine.cpp
index 26b3629..724553f 100644
--- a/src/declarative/qml/qdeclarativeengine.cpp
+++ b/src/declarative/qml/qdeclarativeengine.cpp
@@ -141,7 +141,7 @@ QT_BEGIN_NAMESPACE
\qml
// MyRect.qml
- import Qt 4.7
+ import QtQuick 1.0
Item {
width: 200; height: 200
@@ -177,9 +177,15 @@ static bool qt_QmlQtModule_registered = false;
void QDeclarativeEnginePrivate::defineModule()
{
+ qmlRegisterType<QDeclarativeComponent>("QtQuick",1,0,"Component");
+ qmlRegisterType<QObject>("QtQuick",1,0,"QtObject");
+ qmlRegisterType<QDeclarativeWorkerScript>("QtQuick",1,0,"WorkerScript");
+
+#ifndef QT_NO_IMPORT_QT47_QML
qmlRegisterType<QDeclarativeComponent>("Qt",4,7,"Component");
qmlRegisterType<QObject>("Qt",4,7,"QtObject");
qmlRegisterType<QDeclarativeWorkerScript>("Qt",4,7,"WorkerScript");
+#endif
qmlRegisterType<QDeclarativeBinding>();
}
@@ -198,7 +204,7 @@ with enums and functions. To use it, call the members of the global \c Qt objec
For example:
\qml
-import Qt 4.7
+import QtQuick 1.0
Text {
color: Qt.rgba(255, 0, 0, 1)
@@ -510,7 +516,7 @@ QDeclarativeWorkerScriptEngine *QDeclarativeEnginePrivate::getWorkerScriptEngine
\code
QDeclarativeEngine engine;
QDeclarativeComponent component(&engine);
- component.setData("import Qt 4.7\nText { text: \"Hello world!\" }", QUrl());
+ component.setData("import QtQuick 1.0\nText { text: \"Hello world!\" }", QUrl());
QDeclarativeItem *item = qobject_cast<QDeclarativeItem *>(component.create());
//add item to view, etc
diff --git a/src/declarative/qml/qdeclarativeexpression.cpp b/src/declarative/qml/qdeclarativeexpression.cpp
index 6fc4df0..77a1ba4 100644
--- a/src/declarative/qml/qdeclarativeexpression.cpp
+++ b/src/declarative/qml/qdeclarativeexpression.cpp
@@ -202,7 +202,7 @@ QScriptValue QDeclarativeExpressionPrivate::evalInObjectScope(QDeclarativeContex
For example, given a file \c main.qml like this:
\qml
- import Qt 4.7
+ import QtQuick 1.0
Item {
width: 200; height: 200
diff --git a/src/declarative/qml/qdeclarativemetatype.cpp b/src/declarative/qml/qdeclarativemetatype.cpp
index a5c878f..7a78a1f 100644
--- a/src/declarative/qml/qdeclarativemetatype.cpp
+++ b/src/declarative/qml/qdeclarativemetatype.cpp
@@ -146,6 +146,7 @@ public:
const QMetaObject *m_baseMetaObject;
QDeclarativeAttachedPropertiesFunc m_attachedPropertiesFunc;
const QMetaObject *m_attachedPropertiesType;
+ int m_attachedPropertiesId;
int m_parserStatusCast;
int m_propertyValueSourceCast;
int m_propertyValueInterceptorCast;
@@ -155,8 +156,12 @@ public:
QDeclarativeCustomParser *m_customParser;
mutable volatile bool m_isSetup:1;
mutable QList<QDeclarativeProxyMetaObject::ProxyData> m_metaObjects;
+
+ static QHash<const QMetaObject *, int> m_attachedPropertyIds;
};
+QHash<const QMetaObject *, int> QDeclarativeTypePrivate::m_attachedPropertyIds;
+
QDeclarativeTypePrivate::QDeclarativeTypePrivate()
: m_isInterface(false), m_iid(0), m_typeId(0), m_listId(0),
m_allocationSize(0), m_newFunc(0), m_baseMetaObject(0), m_attachedPropertiesFunc(0), m_attachedPropertiesType(0),
@@ -198,6 +203,14 @@ QDeclarativeType::QDeclarativeType(int index, const QDeclarativePrivate::Registe
d->m_baseMetaObject = type.metaObject;
d->m_attachedPropertiesFunc = type.attachedPropertiesFunction;
d->m_attachedPropertiesType = type.attachedPropertiesMetaObject;
+ if (d->m_attachedPropertiesType) {
+ QHash<const QMetaObject *, int>::Iterator iter = d->m_attachedPropertyIds.find(d->m_baseMetaObject);
+ if (iter == d->m_attachedPropertyIds.end())
+ iter = d->m_attachedPropertyIds.insert(d->m_baseMetaObject, index);
+ d->m_attachedPropertiesId = *iter;
+ } else {
+ d->m_attachedPropertiesId = -1;
+ }
d->m_parserStatusCast = type.parserStatusCast;
d->m_propertyValueSourceCast = type.valueSourceCast;
d->m_propertyValueInterceptorCast = type.valueInterceptorCast;
@@ -461,6 +474,16 @@ const QMetaObject *QDeclarativeType::attachedPropertiesType() const
return d->m_attachedPropertiesType;
}
+/*
+This is the id passed to qmlAttachedPropertiesById(). This is different from the index
+for the case that a single class is registered under two or more names (eg. Item in
+Qt 4.7 and QtQuick 1.0).
+*/
+int QDeclarativeType::attachedPropertiesId() const
+{
+ return d->m_attachedPropertiesId;
+}
+
int QDeclarativeType::parserStatusCast() const
{
return d->m_parserStatusCast;
@@ -662,7 +685,7 @@ int QDeclarativeMetaType::attachedPropertiesFuncId(const QMetaObject *mo)
QDeclarativeType *type = data->metaObjectToType.value(mo);
if (type && type->attachedPropertiesFunction())
- return type->index();
+ return type->attachedPropertiesId();
else
return -1;
}
diff --git a/src/declarative/qml/qdeclarativemetatype_p.h b/src/declarative/qml/qdeclarativemetatype_p.h
index f410547..382abd2 100644
--- a/src/declarative/qml/qdeclarativemetatype_p.h
+++ b/src/declarative/qml/qdeclarativemetatype_p.h
@@ -137,6 +137,7 @@ public:
QDeclarativeAttachedPropertiesFunc attachedPropertiesFunction() const;
const QMetaObject *attachedPropertiesType() const;
+ int attachedPropertiesId() const;
int parserStatusCast() const;
QVariant fromObject(QObject *) const;
diff --git a/src/declarative/qml/qdeclarativeproperty.cpp b/src/declarative/qml/qdeclarativeproperty.cpp
index d0dd2e8..bc20bff 100644
--- a/src/declarative/qml/qdeclarativeproperty.cpp
+++ b/src/declarative/qml/qdeclarativeproperty.cpp
@@ -220,7 +220,7 @@ void QDeclarativePropertyPrivate::initProperty(QObject *obj, const QString &name
QDeclarativeAttachedPropertiesFunc func = data->type->attachedPropertiesFunction();
if (!func) return; // Not an attachable type
- currentObject = qmlAttachedPropertiesObjectById(data->type->index(), currentObject);
+ currentObject = qmlAttachedPropertiesObjectById(data->type->attachedPropertiesId(), currentObject);
if (!currentObject) return; // Something is broken with the attachable type
} else {
Q_ASSERT(data->typeNamespace);
@@ -232,7 +232,7 @@ void QDeclarativePropertyPrivate::initProperty(QObject *obj, const QString &name
QDeclarativeAttachedPropertiesFunc func = data->type->attachedPropertiesFunction();
if (!func) return; // Not an attachable type
- currentObject = qmlAttachedPropertiesObjectById(data->type->index(), currentObject);
+ currentObject = qmlAttachedPropertiesObjectById(data->type->attachedPropertiesId(), currentObject);
if (!currentObject) return; // Something is broken with the attachable type
}
} else {
diff --git a/src/declarative/qml/qdeclarativetypenamescriptclass.cpp b/src/declarative/qml/qdeclarativetypenamescriptclass.cpp
index 764a8db..cba7b4a 100644
--- a/src/declarative/qml/qdeclarativetypenamescriptclass.cpp
+++ b/src/declarative/qml/qdeclarativetypenamescriptclass.cpp
@@ -129,7 +129,7 @@ QDeclarativeTypeNameScriptClass::queryProperty(Object *obj, const Identifier &na
return 0;
} else if (data->object) {
// Must be an attached property
- object = qmlAttachedPropertiesObjectById(data->type->index(), data->object);
+ object = qmlAttachedPropertiesObjectById(data->type->attachedPropertiesId(), data->object);
if (!object) return 0;
return ep->objectClass->queryProperty(object, name, flags, 0);
}
diff --git a/src/declarative/qml/qdeclarativevaluetype.cpp b/src/declarative/qml/qdeclarativevaluetype.cpp
index fda62ee..b9e9cec 100644
--- a/src/declarative/qml/qdeclarativevaluetype.cpp
+++ b/src/declarative/qml/qdeclarativevaluetype.cpp
@@ -50,7 +50,7 @@ QT_BEGIN_NAMESPACE
Q_GUI_EXPORT int qt_defaultDpi();
template<typename T>
-int qmlRegisterValueTypeEnums(const char *qmlName)
+int qmlRegisterValueTypeEnums(const char *uri, int versionMajor, int versionMinor, const char *qmlName)
{
QByteArray name(T::staticMetaObject.className());
@@ -63,7 +63,7 @@ int qmlRegisterValueTypeEnums(const char *qmlName)
QString(),
- "Qt", 4, 7, qmlName, &T::staticMetaObject,
+ uri, versionMajor, versionMinor, qmlName, &T::staticMetaObject,
0, 0,
@@ -99,8 +99,12 @@ bool QDeclarativeValueTypeFactory::isValueType(int idx)
void QDeclarativeValueTypeFactory::registerValueTypes()
{
- qmlRegisterValueTypeEnums<QDeclarativeEasingValueType>("Easing");
- qmlRegisterValueTypeEnums<QDeclarativeFontValueType>("Font");
+ qmlRegisterValueTypeEnums<QDeclarativeEasingValueType>("QtQuick",1,0,"Easing");
+ qmlRegisterValueTypeEnums<QDeclarativeFontValueType>("QtQuick",1,0,"Font");
+#ifndef QT_NO_IMPORT_QT47_QML
+ qmlRegisterValueTypeEnums<QDeclarativeEasingValueType>("Qt",4,7,"Easing");
+ qmlRegisterValueTypeEnums<QDeclarativeFontValueType>("Qt",4,7,"Font");
+#endif
}
QDeclarativeValueType *QDeclarativeValueTypeFactory::valueType(int t)
diff --git a/src/declarative/util/qdeclarativeanimation.cpp b/src/declarative/util/qdeclarativeanimation.cpp
index ba6f1e7..f2e6217 100644
--- a/src/declarative/util/qdeclarativeanimation.cpp
+++ b/src/declarative/util/qdeclarativeanimation.cpp
@@ -305,6 +305,8 @@ void QDeclarativeAbstractAnimation::componentFinalized()
animation will finish playing normally but not restart.
By default, the alwaysRunToEnd property is not set.
+
+ \note alwaysRunToEnd has no effect on animations in a Transition.
*/
bool QDeclarativeAbstractAnimation::alwaysRunToEnd() const
{
diff --git a/src/declarative/util/qdeclarativefontloader.cpp b/src/declarative/util/qdeclarativefontloader.cpp
index 6879494..3e4a81a 100644
--- a/src/declarative/util/qdeclarativefontloader.cpp
+++ b/src/declarative/util/qdeclarativefontloader.cpp
@@ -157,7 +157,7 @@ QHash<QUrl, QDeclarativeFontObject*> QDeclarativeFontLoaderPrivate::fonts;
For example:
\qml
- import Qt 4.7
+ import QtQuick 1.0
Column {
FontLoader { id: fixedFont; name: "Courier" }
diff --git a/src/declarative/util/qdeclarativetimer.cpp b/src/declarative/util/qdeclarativetimer.cpp
index 56320e6..c240f22 100644
--- a/src/declarative/util/qdeclarativetimer.cpp
+++ b/src/declarative/util/qdeclarativetimer.cpp
@@ -82,7 +82,7 @@ public:
object to access the current time.
\qml
- import Qt 4.7
+ import QtQuick 1.0
Item {
Timer {
diff --git a/src/declarative/util/qdeclarativeutilmodule.cpp b/src/declarative/util/qdeclarativeutilmodule.cpp
index c5bfebd..0544f22 100644
--- a/src/declarative/util/qdeclarativeutilmodule.cpp
+++ b/src/declarative/util/qdeclarativeutilmodule.cpp
@@ -75,6 +75,55 @@
void QDeclarativeUtilModule::defineModule()
{
+ qmlRegisterType<QDeclarativeAnchorAnimation>("QtQuick",1,0,"AnchorAnimation");
+ qmlRegisterType<QDeclarativeAnchorChanges>("QtQuick",1,0,"AnchorChanges");
+ qmlRegisterType<QDeclarativeBehavior>("QtQuick",1,0,"Behavior");
+ qmlRegisterType<QDeclarativeBind>("QtQuick",1,0,"Binding");
+ qmlRegisterType<QDeclarativeColorAnimation>("QtQuick",1,0,"ColorAnimation");
+ qmlRegisterType<QDeclarativeConnections>("QtQuick",1,0,"Connections");
+ qmlRegisterType<QDeclarativeSmoothedAnimation>("QtQuick",1,0,"SmoothedAnimation");
+ qmlRegisterType<QDeclarativeFontLoader>("QtQuick",1,0,"FontLoader");
+ qmlRegisterType<QDeclarativeListElement>("QtQuick",1,0,"ListElement");
+ qmlRegisterType<QDeclarativeNumberAnimation>("QtQuick",1,0,"NumberAnimation");
+ qmlRegisterType<QDeclarativePackage>("QtQuick",1,0,"Package");
+ qmlRegisterType<QDeclarativeParallelAnimation>("QtQuick",1,0,"ParallelAnimation");
+ qmlRegisterType<QDeclarativeParentAnimation>("QtQuick",1,0,"ParentAnimation");
+ qmlRegisterType<QDeclarativeParentChange>("QtQuick",1,0,"ParentChange");
+ qmlRegisterType<QDeclarativePauseAnimation>("QtQuick",1,0,"PauseAnimation");
+ qmlRegisterType<QDeclarativePropertyAction>("QtQuick",1,0,"PropertyAction");
+ qmlRegisterType<QDeclarativePropertyAnimation>("QtQuick",1,0,"PropertyAnimation");
+ qmlRegisterType<QDeclarativeRotationAnimation>("QtQuick",1,0,"RotationAnimation");
+ qmlRegisterType<QDeclarativeScriptAction>("QtQuick",1,0,"ScriptAction");
+ qmlRegisterType<QDeclarativeSequentialAnimation>("QtQuick",1,0,"SequentialAnimation");
+ qmlRegisterType<QDeclarativeSpringAnimation>("QtQuick",1,0,"SpringAnimation");
+ qmlRegisterType<QDeclarativeStateChangeScript>("QtQuick",1,0,"StateChangeScript");
+ qmlRegisterType<QDeclarativeStateGroup>("QtQuick",1,0,"StateGroup");
+ qmlRegisterType<QDeclarativeState>("QtQuick",1,0,"State");
+ qmlRegisterType<QDeclarativeSystemPalette>("QtQuick",1,0,"SystemPalette");
+ qmlRegisterType<QDeclarativeTimer>("QtQuick",1,0,"Timer");
+ qmlRegisterType<QDeclarativeTransition>("QtQuick",1,0,"Transition");
+ qmlRegisterType<QDeclarativeVector3dAnimation>("QtQuick",1,0,"Vector3dAnimation");
+#ifdef QT_NO_XMLPATTERNS
+ qmlRegisterTypeNotAvailable("QtQuick",1,0,"XmlListModel",
+ qApp->translate("QDeclarativeXmlListModel","Qt was built without support for xmlpatterns"));
+ qmlRegisterTypeNotAvailable("QtQuick",1,0,"XmlRole",
+ qApp->translate("QDeclarativeXmlListModel","Qt was built without support for xmlpatterns"));
+#else
+ qmlRegisterType<QDeclarativeXmlListModel>("QtQuick",1,0,"XmlListModel");
+ qmlRegisterType<QDeclarativeXmlListModelRole>("QtQuick",1,0,"XmlRole");
+#endif
+
+ qmlRegisterType<QDeclarativeAnchors>();
+ qmlRegisterType<QDeclarativeStateOperation>();
+ qmlRegisterType<QDeclarativeAnchorSet>();
+
+ qmlRegisterUncreatableType<QDeclarativeAbstractAnimation>("QtQuick",1,0,"Animation",QDeclarativeAbstractAnimation::tr("Animation is an abstract class"));
+
+ qmlRegisterCustomType<QDeclarativeListModel>("QtQuick",1,0,"ListModel", new QDeclarativeListModelParser);
+ qmlRegisterCustomType<QDeclarativePropertyChanges>("QtQuick",1,0,"PropertyChanges", new QDeclarativePropertyChangesParser);
+ qmlRegisterCustomType<QDeclarativeConnections>("QtQuick",1,0,"Connections", new QDeclarativeConnectionsParser);
+
+#ifndef QT_NO_IMPORT_QT47_QML
qmlRegisterType<QDeclarativeAnchorAnimation>("Qt",4,7,"AnchorAnimation");
qmlRegisterType<QDeclarativeAnchorChanges>("Qt",4,7,"AnchorChanges");
qmlRegisterType<QDeclarativeBehavior>("Qt",4,7,"Behavior");
@@ -113,13 +162,10 @@ void QDeclarativeUtilModule::defineModule()
qmlRegisterType<QDeclarativeXmlListModelRole>("Qt",4,7,"XmlRole");
#endif
- qmlRegisterType<QDeclarativeAnchors>();
- qmlRegisterType<QDeclarativeStateOperation>();
- qmlRegisterType<QDeclarativeAnchorSet>();
-
qmlRegisterUncreatableType<QDeclarativeAbstractAnimation>("Qt",4,7,"Animation",QDeclarativeAbstractAnimation::tr("Animation is an abstract class"));
qmlRegisterCustomType<QDeclarativeListModel>("Qt", 4,7, "ListModel", new QDeclarativeListModelParser);
qmlRegisterCustomType<QDeclarativePropertyChanges>("Qt", 4, 7, "PropertyChanges", new QDeclarativePropertyChangesParser);
qmlRegisterCustomType<QDeclarativeConnections>("Qt", 4, 7, "Connections", new QDeclarativeConnectionsParser);
+#endif
}
diff --git a/src/declarative/util/qdeclarativexmllistmodel.cpp b/src/declarative/util/qdeclarativexmllistmodel.cpp
index f0ed80b..24edc89 100644
--- a/src/declarative/util/qdeclarativexmllistmodel.cpp
+++ b/src/declarative/util/qdeclarativexmllistmodel.cpp
@@ -531,7 +531,7 @@ void QDeclarativeXmlListModelPrivate::clear_role(QDeclarativeListProperty<QDecla
A XmlListModel could create a model from this data, like this:
\qml
- import Qt 4.7
+ import QtQuick 1.0
XmlListModel {
id: xmlModel
diff --git a/src/gui/graphicsview/qgraphicsitem.cpp b/src/gui/graphicsview/qgraphicsitem.cpp
index b404692..3b18c31 100644
--- a/src/gui/graphicsview/qgraphicsitem.cpp
+++ b/src/gui/graphicsview/qgraphicsitem.cpp
@@ -7669,7 +7669,12 @@ void QGraphicsObject::updateMicroFocus()
void QGraphicsItemPrivate::children_append(QDeclarativeListProperty<QGraphicsObject> *list, QGraphicsObject *item)
{
- QGraphicsItemPrivate::get(item)->setParentItemHelper(static_cast<QGraphicsObject *>(list->object), /*newParentVariant=*/0, /*thisPointerVariant=*/0);
+ QGraphicsObject *graphicsObject = static_cast<QGraphicsObject *>(list->object);
+ if (QGraphicsItemPrivate::get(graphicsObject)->sendParentChangeNotification) {
+ item->setParentItem(graphicsObject);
+ } else {
+ QGraphicsItemPrivate::get(item)->setParentItemHelper(graphicsObject, 0, 0);
+ }
}
int QGraphicsItemPrivate::children_count(QDeclarativeListProperty<QGraphicsObject> *list)
@@ -7691,8 +7696,13 @@ void QGraphicsItemPrivate::children_clear(QDeclarativeListProperty<QGraphicsObje
{
QGraphicsItemPrivate *d = QGraphicsItemPrivate::get(static_cast<QGraphicsObject *>(list->object));
int childCount = d->children.count();
- for (int index = 0; index < childCount; index++)
- QGraphicsItemPrivate::get(d->children.at(0))->setParentItemHelper(0, /*newParentVariant=*/0, /*thisPointerVariant=*/0);
+ if (d->sendParentChangeNotification) {
+ for (int index = 0; index < childCount; index++)
+ d->children.at(0)->setParentItem(0);
+ } else {
+ for (int index = 0; index < childCount; index++)
+ QGraphicsItemPrivate::get(d->children.at(0))->setParentItemHelper(0, 0, 0);
+ }
}
/*!
diff --git a/src/gui/graphicsview/qgraphicsitem_p.h b/src/gui/graphicsview/qgraphicsitem_p.h
index 77e4054..c8a7699 100644
--- a/src/gui/graphicsview/qgraphicsitem_p.h
+++ b/src/gui/graphicsview/qgraphicsitem_p.h
@@ -238,6 +238,7 @@ public:
pendingPolish(0),
mayHaveChildWithGraphicsEffect(0),
isDeclarativeItem(0),
+ sendParentChangeNotification(0),
globalStackingOrder(-1),
q_ptr(0)
{
@@ -584,7 +585,8 @@ public:
quint32 pendingPolish : 1;
quint32 mayHaveChildWithGraphicsEffect : 1;
quint32 isDeclarativeItem : 1;
- quint32 padding : 23;
+ quint32 sendParentChangeNotification : 1;
+ quint32 padding : 22;
// Optional stacking order
int globalStackingOrder;
diff --git a/src/gui/kernel/qwidget_x11.cpp b/src/gui/kernel/qwidget_x11.cpp
index e01489f..8d80e10 100644
--- a/src/gui/kernel/qwidget_x11.cpp
+++ b/src/gui/kernel/qwidget_x11.cpp
@@ -889,8 +889,10 @@ void QWidgetPrivate::create_sys(WId window, bool initializeWindow, bool destroyO
q->setWindowOpacity(maybeTopData()->opacity/255.);
}
- } else if (q->testAttribute(Qt::WA_SetCursor) && q->internalWinId()) {
+ } else if (q->internalWinId()) {
qt_x11_enforce_cursor(q);
+ if (QWidget *p = q->parentWidget()) // reset the cursor on the native parent
+ qt_x11_enforce_cursor(p);
}
if (extra && !extra->mask.isEmpty() && q->internalWinId())
diff --git a/src/imports/particles/qdeclarativeparticles.cpp b/src/imports/particles/qdeclarativeparticles.cpp
index edb69bc..3bd4b43 100644
--- a/src/imports/particles/qdeclarativeparticles.cpp
+++ b/src/imports/particles/qdeclarativeparticles.cpp
@@ -637,7 +637,7 @@ void QDeclarativeParticlesPrivate::updateOpacity(QDeclarativeParticle &p, int ag
snow, the lower one has particles expelled up like a fountain.
\qml
-import Qt 4.7
+import QtQuick 1.0
import Qt.labs.particles 1.0
Rectangle {
diff --git a/src/opengl/gl2paintengineex/qglengineshadermanager.cpp b/src/opengl/gl2paintengineex/qglengineshadermanager.cpp
index 40b3641..ac87784 100644
--- a/src/opengl/gl2paintengineex/qglengineshadermanager.cpp
+++ b/src/opengl/gl2paintengineex/qglengineshadermanager.cpp
@@ -41,6 +41,7 @@
#include "qglengineshadermanager_p.h"
#include "qglengineshadersource_p.h"
+#include "qpaintengineex_opengl2_p.h"
#if defined(QT_DEBUG)
#include <QMetaEnum>
@@ -248,6 +249,7 @@ QByteArray QGLEngineSharedShaders::snippetNameStr(SnippetName name)
#endif
// The address returned here will only be valid until next time this function is called.
+// The program is return bound.
QGLEngineShaderProg *QGLEngineSharedShaders::findProgramInCache(const QGLEngineShaderProg &prog)
{
for (int i = 0; i < cachedPrograms.size(); ++i) {
@@ -255,6 +257,7 @@ QGLEngineShaderProg *QGLEngineSharedShaders::findProgramInCache(const QGLEngineS
if (*cachedProg == prog) {
// Move the program to the top of the list as a poor-man's cache algo
cachedPrograms.move(i, 0);
+ cachedProg->program->bind();
return cachedProg;
}
}
@@ -355,6 +358,14 @@ QGLEngineShaderProg *QGLEngineSharedShaders::findProgramInCache(const QGLEngineS
qWarning() << error;
break;
}
+
+ newProg->program->bind();
+
+ if (newProg->maskFragShader != QGLEngineSharedShaders::NoMaskFragmentShader) {
+ GLuint location = newProg->program->uniformLocation("maskTexture");
+ newProg->program->setUniformValue(location, QT_MASK_TEXTURE_UNIT);
+ }
+
if (cachedPrograms.count() > 30) {
// The cache is full, so delete the last 5 programs in the list.
// These programs will be least used, as a program us bumped to
@@ -769,10 +780,8 @@ bool QGLEngineShaderManager::useCorrectShaderProg()
// At this point, requiredProgram is fully populated so try to find the program in the cache
currentShaderProg = sharedShaders->findProgramInCache(requiredProgram);
- if (currentShaderProg) {
- currentShaderProg->program->bind();
- if (useCustomSrc)
- customSrcStage->setUniforms(currentShaderProg->program);
+ if (currentShaderProg && useCustomSrc) {
+ customSrcStage->setUniforms(currentShaderProg->program);
}
// Make sure all the vertex attribute arrays the program uses are enabled (and the ones it
diff --git a/src/opengl/gl2paintengineex/qpaintengineex_opengl2.cpp b/src/opengl/gl2paintengineex/qpaintengineex_opengl2.cpp
index ee59830..a81ed8e 100644
--- a/src/opengl/gl2paintengineex/qpaintengineex_opengl2.cpp
+++ b/src/opengl/gl2paintengineex/qpaintengineex_opengl2.cpp
@@ -98,6 +98,10 @@ QT_BEGIN_NAMESPACE
extern Q_GUI_EXPORT bool qt_cleartype_enabled;
#endif
+#ifdef Q_WS_MAC
+extern bool qt_applefontsmoothing_enabled;
+#endif
+
extern QImage qt_imageForBrush(int brushStyle, bool invert);
////////////////////////////////// Private Methods //////////////////////////////////////////
@@ -612,8 +616,6 @@ void QGL2PaintEngineExPrivate::transferMode(EngineMode newMode)
}
if (newMode == TextDrawingMode) {
- setVertexAttributePointer(QT_VERTEX_COORDS_ATTR, (GLfloat*)vertexCoordinateArray.data());
- setVertexAttributePointer(QT_TEXTURE_COORDS_ATTR, (GLfloat*)textureCoordinateArray.data());
shaderManager->setHasComplexGeometry(true);
} else {
shaderManager->setHasComplexGeometry(false);
@@ -1444,7 +1446,7 @@ namespace {
{
public:
QOpenGLStaticTextUserData()
- : QStaticTextUserData(OpenGLUserData)
+ : QStaticTextUserData(OpenGLUserData), cacheSize(0, 0)
{
}
@@ -1452,6 +1454,7 @@ namespace {
{
}
+ QSize cacheSize;
QGL2PEXVertexArray vertexCoordinateArray;
QGL2PEXVertexArray textureCoordinateArray;
};
@@ -1474,9 +1477,22 @@ void QGL2PaintEngineExPrivate::drawCachedGlyphs(QFontEngineGlyphCache::Type glyp
staticTextItem->fontEngine->setGlyphCache(ctx, cache);
}
- cache->setPaintEnginePrivate(this);
- cache->populate(staticTextItem->fontEngine, staticTextItem->numGlyphs, staticTextItem->glyphs,
- staticTextItem->glyphPositions);
+ bool recreateVertexArrays = false;
+ if (staticTextItem->userDataNeedsUpdate)
+ recreateVertexArrays = true;
+ else if (staticTextItem->userData == 0)
+ recreateVertexArrays = true;
+ else if (staticTextItem->userData->type != QStaticTextUserData::OpenGLUserData)
+ recreateVertexArrays = true;
+
+ // We only need to update the cache with new glyphs if we are actually going to recreate the vertex arrays.
+ // If the cache size has changed, we do need to regenerate the vertices, but we don't need to repopulate the
+ // cache so this text is performed before we test if the cache size has changed.
+ if (recreateVertexArrays) {
+ cache->setPaintEnginePrivate(this);
+ cache->populate(staticTextItem->fontEngine, staticTextItem->numGlyphs, staticTextItem->glyphs,
+ staticTextItem->glyphPositions);
+ }
if (cache->width() == 0 || cache->height() == 0)
return;
@@ -1488,14 +1504,6 @@ void QGL2PaintEngineExPrivate::drawCachedGlyphs(QFontEngineGlyphCache::Type glyp
GLfloat dx = 1.0 / cache->width();
GLfloat dy = 1.0 / cache->height();
- bool recreateVertexArrays = false;
- if (staticTextItem->userDataNeedsUpdate)
- recreateVertexArrays = true;
- else if (staticTextItem->userData == 0)
- recreateVertexArrays = true;
- else if (staticTextItem->userData->type != QStaticTextUserData::OpenGLUserData)
- recreateVertexArrays = true;
-
// Use global arrays by default
QGL2PEXVertexArray *vertexCoordinates = &vertexCoordinateArray;
QGL2PEXVertexArray *textureCoordinates = &textureCoordinateArray;
@@ -1516,6 +1524,12 @@ void QGL2PaintEngineExPrivate::drawCachedGlyphs(QFontEngineGlyphCache::Type glyp
// Use cache if backend optimizations is turned on
vertexCoordinates = &userData->vertexCoordinateArray;
textureCoordinates = &userData->textureCoordinateArray;
+
+ QSize size(cache->width(), cache->height());
+ if (userData->cacheSize != size) {
+ recreateVertexArrays = true;
+ userData->cacheSize = size;
+ }
}
@@ -1630,7 +1644,6 @@ void QGL2PaintEngineExPrivate::drawCachedGlyphs(QFontEngineGlyphCache::Type glyp
glBindTexture(GL_TEXTURE_2D, cache->texture());
updateTextureFilter(GL_TEXTURE_2D, GL_REPEAT, false);
- shaderManager->currentProgram()->setUniformValue(location(QGLEngineShaderManager::MaskTexture), QT_MASK_TEXTURE_UNIT);
#if defined(QT_OPENGL_DRAWCACHEDGLYPHS_INDEX_ARRAY_VBO)
glDrawElements(GL_TRIANGLE_STRIP, 6 * staticTextItem->numGlyphs, GL_UNSIGNED_SHORT, 0);
#else
@@ -1660,13 +1673,26 @@ void QGL2PaintEngineExPrivate::drawCachedGlyphs(QFontEngineGlyphCache::Type glyp
}
//### TODO: Gamma correction
- glActiveTexture(GL_TEXTURE0 + QT_MASK_TEXTURE_UNIT);
- if (lastMaskTextureUsed != cache->texture()) {
- glBindTexture(GL_TEXTURE_2D, cache->texture());
- lastMaskTextureUsed = cache->texture();
+ QGLTextureGlyphCache::FilterMode filterMode = (s->matrix.type() > QTransform::TxTranslate)?QGLTextureGlyphCache::Linear:QGLTextureGlyphCache::Nearest;
+ if (lastMaskTextureUsed != cache->texture() || cache->filterMode() != filterMode) {
+
+ glActiveTexture(GL_TEXTURE0 + QT_MASK_TEXTURE_UNIT);
+ if (lastMaskTextureUsed != cache->texture()) {
+ glBindTexture(GL_TEXTURE_2D, cache->texture());
+ lastMaskTextureUsed = cache->texture();
+ }
+
+ if (cache->filterMode() != filterMode) {
+ if (filterMode == QGLTextureGlyphCache::Linear) {
+ glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ } else {
+ glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ }
+ cache->setFilterMode(filterMode);
+ }
}
- updateTextureFilter(GL_TEXTURE_2D, GL_REPEAT, s->matrix.type() > QTransform::TxTranslate);
- shaderManager->currentProgram()->setUniformValue(location(QGLEngineShaderManager::MaskTexture), QT_MASK_TEXTURE_UNIT);
#if defined(QT_OPENGL_DRAWCACHEDGLYPHS_INDEX_ARRAY_VBO)
glDrawElements(GL_TRIANGLE_STRIP, 6 * staticTextItem->numGlyphs, GL_UNSIGNED_SHORT, 0);
@@ -1846,6 +1872,9 @@ bool QGL2PaintEngineEx::begin(QPaintDevice *pdev)
#if defined(Q_WS_WIN)
if (qt_cleartype_enabled)
#endif
+#if defined(Q_WS_MAC)
+ if (qt_applefontsmoothing_enabled)
+#endif
d->glyphCacheType = QFontEngineGlyphCache::Raster_RGBMask;
#endif
diff --git a/src/opengl/gl2paintengineex/qtextureglyphcache_gl.cpp b/src/opengl/gl2paintengineex/qtextureglyphcache_gl.cpp
index f353995..9a5bac0 100644
--- a/src/opengl/gl2paintengineex/qtextureglyphcache_gl.cpp
+++ b/src/opengl/gl2paintengineex/qtextureglyphcache_gl.cpp
@@ -57,6 +57,7 @@ QGLTextureGlyphCache::QGLTextureGlyphCache(QGLContext *context, QFontEngineGlyph
, ctx(context)
, m_width(0)
, m_height(0)
+ , m_filterMode(Nearest)
{
// broken FBO readback is a bug in the SGX 1.3 and 1.4 drivers for the N900 where
// copying between FBO's is broken if the texture is either GL_ALPHA or POT. The
@@ -114,6 +115,9 @@ void QGLTextureGlyphCache::createTextureData(int width, int height)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ m_filterMode = Nearest;
}
void QGLTextureGlyphCache::resizeTextureData(int width, int height)
@@ -152,6 +156,7 @@ void QGLTextureGlyphCache::resizeTextureData(int width, int height)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ m_filterMode = Nearest;
glBindTexture(GL_TEXTURE_2D, 0);
glFramebufferTexture2D(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT,
GL_TEXTURE_2D, tmp_texture, 0);
diff --git a/src/opengl/gl2paintengineex/qtextureglyphcache_gl_p.h b/src/opengl/gl2paintengineex/qtextureglyphcache_gl_p.h
index eb3693c..e22146d 100644
--- a/src/opengl/gl2paintengineex/qtextureglyphcache_gl_p.h
+++ b/src/opengl/gl2paintengineex/qtextureglyphcache_gl_p.h
@@ -83,6 +83,12 @@ public:
inline void setPaintEnginePrivate(QGL2PaintEngineExPrivate *p) { pex = p; }
+ enum FilterMode {
+ Nearest,
+ Linear
+ };
+ FilterMode filterMode() const { return m_filterMode; }
+ void setFilterMode(FilterMode m) { m_filterMode = m; }
public Q_SLOTS:
void contextDestroyed(const QGLContext *context) {
@@ -117,6 +123,8 @@ private:
int m_height;
QGLShaderProgram *m_program;
+
+ FilterMode m_filterMode;
};
QT_END_NAMESPACE
diff --git a/src/s60installs/bwins/QtCoreu.def b/src/s60installs/bwins/QtCoreu.def
index 0f66d72..5eeb244 100644
--- a/src/s60installs/bwins/QtCoreu.def
+++ b/src/s60installs/bwins/QtCoreu.def
@@ -4482,4 +4482,5 @@ EXPORTS
?textDirection@QLocale@@QBE?AW4LayoutDirection@Qt@@XZ @ 4481 NONAME ; enum Qt::LayoutDirection QLocale::textDirection(void) const
?msecsSinceReference@QElapsedTimer@@QBE_JXZ @ 4482 NONAME ; long long QElapsedTimer::msecsSinceReference(void) const
?selectThread@QEventDispatcherSymbian@@AAEAAVQSelectThread@@XZ @ 4483 NONAME ; class QSelectThread & QEventDispatcherSymbian::selectThread(void)
+ ?qt_symbian_SetupThreadHeap@@YAHHAAUSStdEpocThreadCreateInfo@@@Z @ 4484 NONAME ; int qt_symbian_SetupThreadHeap(int, struct SStdEpocThreadCreateInfo &)
diff --git a/src/s60installs/bwins/QtDeclarativeu.def b/src/s60installs/bwins/QtDeclarativeu.def
index f417892..cf0398a 100644
--- a/src/s60installs/bwins/QtDeclarativeu.def
+++ b/src/s60installs/bwins/QtDeclarativeu.def
@@ -142,7 +142,7 @@ EXPORTS
?setEnumOrFlag@QMetaPropertyBuilder@@QAEX_N@Z @ 141 NONAME ; void QMetaPropertyBuilder::setEnumOrFlag(bool)
?getStaticMetaObject@QDeclarativeRectangle@@SAABUQMetaObject@@XZ @ 142 NONAME ; struct QMetaObject const & QDeclarativeRectangle::getStaticMetaObject(void)
?isValid@QDeclarativeProperty@@QBE_NXZ @ 143 NONAME ; bool QDeclarativeProperty::isValid(void) const
- ?isConnected@QDeclarativeDebugClient@@QBE_NXZ @ 144 NONAME ; bool QDeclarativeDebugClient::isConnected(void) const
+ ?isConnected@QDeclarativeDebugClient@@QBE_NXZ @ 144 NONAME ABSENT ; bool QDeclarativeDebugClient::isConnected(void) const
?enabled@QDeclarativeBinding@@QBE_NXZ @ 145 NONAME ; bool QDeclarativeBinding::enabled(void) const
?setSource@QDeclarativeView@@QAEXABVQUrl@@@Z @ 146 NONAME ; void QDeclarativeView::setSource(class QUrl const &)
??_EQDeclarativeDebugService@@UAE@I@Z @ 147 NONAME ; QDeclarativeDebugService::~QDeclarativeDebugService(unsigned int)
@@ -461,7 +461,7 @@ EXPORTS
?qt_metacall@QDeclarativeContext@@UAEHW4Call@QMetaObject@@HPAPAX@Z @ 460 NONAME ; int QDeclarativeContext::qt_metacall(enum QMetaObject::Call, int, void * *)
??_EQDeclarativeValueType@@UAE@I@Z @ 461 NONAME ; QDeclarativeValueType::~QDeclarativeValueType(unsigned int)
?qt_metacall@QDeclarativeState@@UAEHW4Call@QMetaObject@@HPAPAX@Z @ 462 NONAME ; int QDeclarativeState::qt_metacall(enum QMetaObject::Call, int, void * *)
- ?isEnabled@QDeclarativeDebugService@@QBE_NXZ @ 463 NONAME ; bool QDeclarativeDebugService::isEnabled(void) const
+ ?isEnabled@QDeclarativeDebugService@@QBE_NXZ @ 463 NONAME ABSENT ; bool QDeclarativeDebugService::isEnabled(void) const
?stateChanged@QDeclarativeDebugWatch@@IAEXW4State@1@@Z @ 464 NONAME ; void QDeclarativeDebugWatch::stateChanged(enum QDeclarativeDebugWatch::State)
??0QMetaMethodBuilder@@AAE@PBVQMetaObjectBuilder@@H@Z @ 465 NONAME ; QMetaMethodBuilder::QMetaMethodBuilder(class QMetaObjectBuilder const *, int)
??4QDeclarativeListReference@@QAEAAV0@ABV0@@Z @ 466 NONAME ; class QDeclarativeListReference & QDeclarativeListReference::operator=(class QDeclarativeListReference const &)
@@ -1028,7 +1028,7 @@ EXPORTS
?qmlInfo@@YA?AVQDeclarativeInfo@@PBVQObject@@ABVQDeclarativeError@@@Z @ 1027 NONAME ; class QDeclarativeInfo qmlInfo(class QObject const *, class QDeclarativeError const &)
?staticMetaObject@QDeclarativeText@@2UQMetaObject@@B @ 1028 NONAME ; struct QMetaObject const QDeclarativeText::staticMetaObject
?color@QDeclarativeRectangle@@QBE?AVQColor@@XZ @ 1029 NONAME ; class QColor QDeclarativeRectangle::color(void) const
- ?isEnabled@QDeclarativeDebugClient@@QBE_NXZ @ 1030 NONAME ; bool QDeclarativeDebugClient::isEnabled(void) const
+ ?isEnabled@QDeclarativeDebugClient@@QBE_NXZ @ 1030 NONAME ABSENT ; bool QDeclarativeDebugClient::isEnabled(void) const
?send@QPacketProtocol@@QAEXABVQPacket@@@Z @ 1031 NONAME ; void QPacketProtocol::send(class QPacket const &)
?width@QDeclarativePixmap@@QBEHXZ @ 1032 NONAME ; int QDeclarativePixmap::width(void) const
?error@QDeclarativeCustomParser@@IAEXABVQDeclarativeCustomParserNode@@ABVQString@@@Z @ 1033 NONAME ; void QDeclarativeCustomParser::error(class QDeclarativeCustomParserNode const &, class QString const &)
@@ -1147,7 +1147,7 @@ EXPORTS
?removeNotifySignal@QMetaPropertyBuilder@@QAEXXZ @ 1146 NONAME ; void QMetaPropertyBuilder::removeNotifySignal(void)
?trUtf8@QDeclarativeDebugService@@SA?AVQString@@PBD0@Z @ 1147 NONAME ; class QString QDeclarativeDebugService::trUtf8(char const *, char const *)
?setImportPathList@QDeclarativeEngine@@QAEXABVQStringList@@@Z @ 1148 NONAME ; void QDeclarativeEngine::setImportPathList(class QStringList const &)
- ?enabledChanged@QDeclarativeDebugService@@MAEX_N@Z @ 1149 NONAME ; void QDeclarativeDebugService::enabledChanged(bool)
+ ?enabledChanged@QDeclarativeDebugService@@MAEX_N@Z @ 1149 NONAME ABSENT ; void QDeclarativeDebugService::enabledChanged(bool)
?addWatch@QDeclarativeEngineDebug@@QAEPAVQDeclarativeDebugWatch@@ABVQDeclarativeDebugObjectReference@@PAVQObject@@@Z @ 1150 NONAME ; class QDeclarativeDebugWatch * QDeclarativeEngineDebug::addWatch(class QDeclarativeDebugObjectReference const &, class QObject *)
?asAST@Variant@QDeclarativeParser@@QBEPAVNode@AST@QDeclarativeJS@@XZ @ 1151 NONAME ; class QDeclarativeJS::AST::Node * QDeclarativeParser::Variant::asAST(void) const
?indexOfClassInfo@QMetaObjectBuilder@@QAEHABVQByteArray@@@Z @ 1152 NONAME ; int QMetaObjectBuilder::indexOfClassInfo(class QByteArray const &)
@@ -1386,7 +1386,7 @@ EXPORTS
?qmlTypes@QDeclarativeMetaType@@SA?AV?$QList@PAVQDeclarativeType@@@@XZ @ 1385 NONAME ; class QList<class QDeclarativeType *> QDeclarativeMetaType::qmlTypes(void)
?valueTypeCoreIndex@QDeclarativePropertyPrivate@@SAHABVQDeclarativeProperty@@@Z @ 1386 NONAME ; int QDeclarativePropertyPrivate::valueTypeCoreIndex(class QDeclarativeProperty const &)
?writeEnumProperty@QDeclarativePropertyPrivate@@SA_NABVQMetaProperty@@HPAVQObject@@ABVQVariant@@H@Z @ 1387 NONAME ; bool QDeclarativePropertyPrivate::writeEnumProperty(class QMetaProperty const &, int, class QObject *, class QVariant const &, int)
- ?setEnabled@QDeclarativeDebugClient@@QAEX_N@Z @ 1388 NONAME ; void QDeclarativeDebugClient::setEnabled(bool)
+ ?setEnabled@QDeclarativeDebugClient@@QAEX_N@Z @ 1388 NONAME ABSENT ; void QDeclarativeDebugClient::setEnabled(bool)
??1QMetaObjectBuilder@@UAE@XZ @ 1389 NONAME ; QMetaObjectBuilder::~QMetaObjectBuilder(void)
?tr@QDeclarativeStateOperation@@SA?AVQString@@PBD0@Z @ 1390 NONAME ; class QString QDeclarativeStateOperation::tr(char const *, char const *)
?clear@QPacket@@QAEXXZ @ 1391 NONAME ; void QPacket::clear(void)
@@ -1715,89 +1715,127 @@ EXPORTS
??0QDeclarativeListModel@@AAE@PBV0@PAVQDeclarativeListModelWorkerAgent@@@Z @ 1714 NONAME ; QDeclarativeListModel::QDeclarativeListModel(class QDeclarativeListModel const *, class QDeclarativeListModelWorkerAgent *)
?inWorkerThread@QDeclarativeListModel@@ABE_NXZ @ 1715 NONAME ; bool QDeclarativeListModel::inWorkerThread(void) const
?canMove@QDeclarativeListModel@@ABE_NHHH@Z @ 1716 NONAME ; bool QDeclarativeListModel::canMove(int, int, int) const
- ?setLoops@QDeclarativeAbstractAnimation@@QAEXH@Z @ 1717 NONAME ; void QDeclarativeAbstractAnimation::setLoops(int)
- ?trUtf8@QDeclarativeAbstractAnimation@@SA?AVQString@@PBD0@Z @ 1718 NONAME ; class QString QDeclarativeAbstractAnimation::trUtf8(char const *, char const *)
- ?staticMetaObject@QDeclarativeAbstractAnimation@@2UQMetaObject@@B @ 1719 NONAME ; struct QMetaObject const QDeclarativeAbstractAnimation::staticMetaObject
- ?setRunning@QDeclarativeTimer@@QAEX_N@Z @ 1720 NONAME ; void QDeclarativeTimer::setRunning(bool)
- ?tr@QDeclarativeTimer@@SA?AVQString@@PBD0@Z @ 1721 NONAME ; class QString QDeclarativeTimer::tr(char const *, char const *)
- ?qt_metacall@QDeclarativeTimer@@UAEHW4Call@QMetaObject@@HPAPAX@Z @ 1722 NONAME ; int QDeclarativeTimer::qt_metacall(enum QMetaObject::Call, int, void * *)
- ?setPaused@QDeclarativeAbstractAnimation@@QAEX_N@Z @ 1723 NONAME ; void QDeclarativeAbstractAnimation::setPaused(bool)
- ?setRepeating@QDeclarativeTimer@@QAEX_N@Z @ 1724 NONAME ; void QDeclarativeTimer::setRepeating(bool)
- ?interval@QDeclarativeTimer@@QBEHXZ @ 1725 NONAME ; int QDeclarativeTimer::interval(void) const
- ?start@QDeclarativeTimer@@QAEXXZ @ 1726 NONAME ; void QDeclarativeTimer::start(void)
- ?transition@QDeclarativeAbstractAnimation@@UAEXAAV?$QList@VQDeclarativeAction@@@@AAV?$QList@VQDeclarativeProperty@@@@W4TransitionDirection@1@@Z @ 1727 NONAME ; void QDeclarativeAbstractAnimation::transition(class QList<class QDeclarativeAction> &, class QList<class QDeclarativeProperty> &, enum QDeclarativeAbstractAnimation::TransitionDirection)
- ?componentComplete@QDeclarativeAbstractAnimation@@UAEXXZ @ 1728 NONAME ; void QDeclarativeAbstractAnimation::componentComplete(void)
- ?runningChanged@QDeclarativeAbstractAnimation@@IAEX_N@Z @ 1729 NONAME ; void QDeclarativeAbstractAnimation::runningChanged(bool)
- ?trUtf8@QDeclarativeAbstractAnimation@@SA?AVQString@@PBD0H@Z @ 1730 NONAME ; class QString QDeclarativeAbstractAnimation::trUtf8(char const *, char const *, int)
- ?metaObject@QDeclarativeTimer@@UBEPBUQMetaObject@@XZ @ 1731 NONAME ; struct QMetaObject const * QDeclarativeTimer::metaObject(void) const
- ?setGroup@QDeclarativeAbstractAnimation@@QAEXPAVQDeclarativeAnimationGroup@@@Z @ 1732 NONAME ; void QDeclarativeAbstractAnimation::setGroup(class QDeclarativeAnimationGroup *)
- ?isRepeating@QDeclarativeTimer@@QBE_NXZ @ 1733 NONAME ; bool QDeclarativeTimer::isRepeating(void) const
- ?setTriggeredOnStart@QDeclarativeTimer@@QAEX_N@Z @ 1734 NONAME ; void QDeclarativeTimer::setTriggeredOnStart(bool)
- ?currentTime@QDeclarativeAbstractAnimation@@QAEHXZ @ 1735 NONAME ; int QDeclarativeAbstractAnimation::currentTime(void)
- ??1QDeclarativeAbstractAnimation@@UAE@XZ @ 1736 NONAME ; QDeclarativeAbstractAnimation::~QDeclarativeAbstractAnimation(void)
- ?triggered@QDeclarativeTimer@@IAEXXZ @ 1737 NONAME ; void QDeclarativeTimer::triggered(void)
- ?finished@QDeclarativeTimer@@AAEXXZ @ 1738 NONAME ; void QDeclarativeTimer::finished(void)
- ?pausedChanged@QDeclarativeAbstractAnimation@@IAEX_N@Z @ 1739 NONAME ; void QDeclarativeAbstractAnimation::pausedChanged(bool)
- ?complete@QDeclarativeAbstractAnimation@@QAEXXZ @ 1740 NONAME ; void QDeclarativeAbstractAnimation::complete(void)
- ?setRunning@QDeclarativeAbstractAnimation@@QAEX_N@Z @ 1741 NONAME ; void QDeclarativeAbstractAnimation::setRunning(bool)
- ?completed@QDeclarativeAbstractAnimation@@IAEXXZ @ 1742 NONAME ; void QDeclarativeAbstractAnimation::completed(void)
- ?trUtf8@QDeclarativeTimer@@SA?AVQString@@PBD0@Z @ 1743 NONAME ; class QString QDeclarativeTimer::trUtf8(char const *, char const *)
- ?loopCountChanged@QDeclarativeAbstractAnimation@@IAEXH@Z @ 1744 NONAME ; void QDeclarativeAbstractAnimation::loopCountChanged(int)
- ?repeatChanged@QDeclarativeTimer@@IAEXXZ @ 1745 NONAME ; void QDeclarativeTimer::repeatChanged(void)
- ?setDisableUserControl@QDeclarativeAbstractAnimation@@QAEXXZ @ 1746 NONAME ; void QDeclarativeAbstractAnimation::setDisableUserControl(void)
- ?setDefaultTarget@QDeclarativeAbstractAnimation@@QAEXABVQDeclarativeProperty@@@Z @ 1747 NONAME ; void QDeclarativeAbstractAnimation::setDefaultTarget(class QDeclarativeProperty const &)
- ?triggeredOnStart@QDeclarativeTimer@@QBE_NXZ @ 1748 NONAME ; bool QDeclarativeTimer::triggeredOnStart(void) const
- ?notifyRunningChanged@QDeclarativeAbstractAnimation@@AAEX_N@Z @ 1749 NONAME ; void QDeclarativeAbstractAnimation::notifyRunningChanged(bool)
- ?componentComplete@QDeclarativeTimer@@MAEXXZ @ 1750 NONAME ; void QDeclarativeTimer::componentComplete(void)
- ?tr@QDeclarativeAbstractAnimation@@SA?AVQString@@PBD0@Z @ 1751 NONAME ; class QString QDeclarativeAbstractAnimation::tr(char const *, char const *)
- ?isRunning@QDeclarativeAbstractAnimation@@QBE_NXZ @ 1752 NONAME ; bool QDeclarativeAbstractAnimation::isRunning(void) const
- ?d_func@QDeclarativeAbstractAnimation@@ABEPBVQDeclarativeAbstractAnimationPrivate@@XZ @ 1753 NONAME ; class QDeclarativeAbstractAnimationPrivate const * QDeclarativeAbstractAnimation::d_func(void) const
- ??_EQDeclarativeAbstractAnimation@@UAE@I@Z @ 1754 NONAME ; QDeclarativeAbstractAnimation::~QDeclarativeAbstractAnimation(unsigned int)
- ?d_func@QDeclarativeAbstractAnimation@@AAEPAVQDeclarativeAbstractAnimationPrivate@@XZ @ 1755 NONAME ; class QDeclarativeAbstractAnimationPrivate * QDeclarativeAbstractAnimation::d_func(void)
- ?componentFinalized@QDeclarativeAbstractAnimation@@AAEXXZ @ 1756 NONAME ; void QDeclarativeAbstractAnimation::componentFinalized(void)
- ??_EQDeclarativeTimer@@UAE@I@Z @ 1757 NONAME ; QDeclarativeTimer::~QDeclarativeTimer(unsigned int)
- ?pause@QDeclarativeAbstractAnimation@@QAEXXZ @ 1758 NONAME ; void QDeclarativeAbstractAnimation::pause(void)
- ?stop@QDeclarativeTimer@@QAEXXZ @ 1759 NONAME ; void QDeclarativeTimer::stop(void)
- ?timelineComplete@QDeclarativeAbstractAnimation@@AAEXXZ @ 1760 NONAME ; void QDeclarativeAbstractAnimation::timelineComplete(void)
- ?setAlwaysRunToEnd@QDeclarativeAbstractAnimation@@QAEX_N@Z @ 1761 NONAME ; void QDeclarativeAbstractAnimation::setAlwaysRunToEnd(bool)
- ?classBegin@QDeclarativeAbstractAnimation@@UAEXXZ @ 1762 NONAME ; void QDeclarativeAbstractAnimation::classBegin(void)
- ?d_func@QDeclarativeTimer@@AAEPAVQDeclarativeTimerPrivate@@XZ @ 1763 NONAME ; class QDeclarativeTimerPrivate * QDeclarativeTimer::d_func(void)
- ??0QDeclarativeAbstractAnimation@@QAE@PAVQObject@@@Z @ 1764 NONAME ; QDeclarativeAbstractAnimation::QDeclarativeAbstractAnimation(class QObject *)
- ?metaObject@QDeclarativeAbstractAnimation@@UBEPBUQMetaObject@@XZ @ 1765 NONAME ; struct QMetaObject const * QDeclarativeAbstractAnimation::metaObject(void) const
- ?tr@QDeclarativeAbstractAnimation@@SA?AVQString@@PBD0H@Z @ 1766 NONAME ; class QString QDeclarativeAbstractAnimation::tr(char const *, char const *, int)
- ?started@QDeclarativeAbstractAnimation@@IAEXXZ @ 1767 NONAME ; void QDeclarativeAbstractAnimation::started(void)
- ?setInterval@QDeclarativeTimer@@QAEXH@Z @ 1768 NONAME ; void QDeclarativeTimer::setInterval(int)
- ?d_func@QDeclarativeTimer@@ABEPBVQDeclarativeTimerPrivate@@XZ @ 1769 NONAME ; class QDeclarativeTimerPrivate const * QDeclarativeTimer::d_func(void) const
- ?staticMetaObject@QDeclarativeTimer@@2UQMetaObject@@B @ 1770 NONAME ; struct QMetaObject const QDeclarativeTimer::staticMetaObject
- ?qt_metacast@QDeclarativeAbstractAnimation@@UAEPAXPBD@Z @ 1771 NONAME ; void * QDeclarativeAbstractAnimation::qt_metacast(char const *)
- ?setCurrentTime@QDeclarativeAbstractAnimation@@QAEXH@Z @ 1772 NONAME ; void QDeclarativeAbstractAnimation::setCurrentTime(int)
- ?restart@QDeclarativeAbstractAnimation@@QAEXXZ @ 1773 NONAME ; void QDeclarativeAbstractAnimation::restart(void)
- ??0QDeclarativeAbstractAnimation@@IAE@AAVQDeclarativeAbstractAnimationPrivate@@PAVQObject@@@Z @ 1774 NONAME ; QDeclarativeAbstractAnimation::QDeclarativeAbstractAnimation(class QDeclarativeAbstractAnimationPrivate &, class QObject *)
- ?resume@QDeclarativeAbstractAnimation@@QAEXXZ @ 1775 NONAME ; void QDeclarativeAbstractAnimation::resume(void)
- ?runningChanged@QDeclarativeTimer@@IAEXXZ @ 1776 NONAME ; void QDeclarativeTimer::runningChanged(void)
- ?ticked@QDeclarativeTimer@@AAEXXZ @ 1777 NONAME ; void QDeclarativeTimer::ticked(void)
- ?trUtf8@QDeclarativeTimer@@SA?AVQString@@PBD0H@Z @ 1778 NONAME ; class QString QDeclarativeTimer::trUtf8(char const *, char const *, int)
- ??0QDeclarativeTimer@@QAE@PAVQObject@@@Z @ 1779 NONAME ; QDeclarativeTimer::QDeclarativeTimer(class QObject *)
- ?loops@QDeclarativeAbstractAnimation@@QBEHXZ @ 1780 NONAME ; int QDeclarativeAbstractAnimation::loops(void) const
- ?setTarget@QDeclarativeAbstractAnimation@@EAEXABVQDeclarativeProperty@@@Z @ 1781 NONAME ; void QDeclarativeAbstractAnimation::setTarget(class QDeclarativeProperty const &)
- ?alwaysRunToEnd@QDeclarativeAbstractAnimation@@QBE_NXZ @ 1782 NONAME ; bool QDeclarativeAbstractAnimation::alwaysRunToEnd(void) const
- ?tr@QDeclarativeTimer@@SA?AVQString@@PBD0H@Z @ 1783 NONAME ; class QString QDeclarativeTimer::tr(char const *, char const *, int)
- ?intervalChanged@QDeclarativeTimer@@IAEXXZ @ 1784 NONAME ; void QDeclarativeTimer::intervalChanged(void)
- ?isPaused@QDeclarativeAbstractAnimation@@QBE_NXZ @ 1785 NONAME ; bool QDeclarativeAbstractAnimation::isPaused(void) const
- ?getStaticMetaObject@QDeclarativeAbstractAnimation@@SAABUQMetaObject@@XZ @ 1786 NONAME ; struct QMetaObject const & QDeclarativeAbstractAnimation::getStaticMetaObject(void)
- ?group@QDeclarativeAbstractAnimation@@QBEPAVQDeclarativeAnimationGroup@@XZ @ 1787 NONAME ; class QDeclarativeAnimationGroup * QDeclarativeAbstractAnimation::group(void) const
- ?classBegin@QDeclarativeTimer@@MAEXXZ @ 1788 NONAME ; void QDeclarativeTimer::classBegin(void)
- ?restart@QDeclarativeTimer@@QAEXXZ @ 1789 NONAME ; void QDeclarativeTimer::restart(void)
- ??1QDeclarativeTimer@@UAE@XZ @ 1790 NONAME ; QDeclarativeTimer::~QDeclarativeTimer(void)
- ?getStaticMetaObject@QDeclarativeTimer@@SAABUQMetaObject@@XZ @ 1791 NONAME ; struct QMetaObject const & QDeclarativeTimer::getStaticMetaObject(void)
- ?qt_metacast@QDeclarativeTimer@@UAEPAXPBD@Z @ 1792 NONAME ; void * QDeclarativeTimer::qt_metacast(char const *)
- ?alwaysRunToEndChanged@QDeclarativeAbstractAnimation@@IAEX_N@Z @ 1793 NONAME ; void QDeclarativeAbstractAnimation::alwaysRunToEndChanged(bool)
- ?triggeredOnStartChanged@QDeclarativeTimer@@IAEXXZ @ 1794 NONAME ; void QDeclarativeTimer::triggeredOnStartChanged(void)
- ?isRunning@QDeclarativeTimer@@QBE_NXZ @ 1795 NONAME ; bool QDeclarativeTimer::isRunning(void) const
- ?update@QDeclarativeTimer@@AAEXXZ @ 1796 NONAME ; void QDeclarativeTimer::update(void)
- ?stop@QDeclarativeAbstractAnimation@@QAEXXZ @ 1797 NONAME ; void QDeclarativeAbstractAnimation::stop(void)
- ?start@QDeclarativeAbstractAnimation@@QAEXXZ @ 1798 NONAME ; void QDeclarativeAbstractAnimation::start(void)
- ?qt_metacall@QDeclarativeAbstractAnimation@@UAEHW4Call@QMetaObject@@HPAPAX@Z @ 1799 NONAME ; int QDeclarativeAbstractAnimation::qt_metacall(enum QMetaObject::Call, int, void * *)
- ?getScriptEngine@QDeclarativeDebugHelper@@SAPAVQScriptEngine@@PAVQDeclarativeEngine@@@Z @ 1800 NONAME ; class QScriptEngine * QDeclarativeDebugHelper::getScriptEngine(class QDeclarativeEngine *)
- ?setAnimationSlowDownFactor@QDeclarativeDebugHelper@@SAXM@Z @ 1801 NONAME ; void QDeclarativeDebugHelper::setAnimationSlowDownFactor(float)
+ ?getScriptEngine@QDeclarativeDebugHelper@@SAPAVQScriptEngine@@PAVQDeclarativeEngine@@@Z @ 1717 NONAME ; class QScriptEngine * QDeclarativeDebugHelper::getScriptEngine(class QDeclarativeEngine *)
+ ?setAnimationSlowDownFactor@QDeclarativeDebugHelper@@SAXM@Z @ 1718 NONAME ; void QDeclarativeDebugHelper::setAnimationSlowDownFactor(float)
+ ?add@QDeclarativeBasePositioner@@QBEPAVQDeclarativeTransition@@XZ @ 1719 NONAME ; class QDeclarativeTransition * QDeclarativeBasePositioner::add(void) const
+ ?setLoops@QDeclarativeAbstractAnimation@@QAEXH@Z @ 1720 NONAME ; void QDeclarativeAbstractAnimation::setLoops(int)
+ ?trUtf8@QDeclarativeAbstractAnimation@@SA?AVQString@@PBD0@Z @ 1721 NONAME ; class QString QDeclarativeAbstractAnimation::trUtf8(char const *, char const *)
+ ?tr@QDeclarativeBasePositioner@@SA?AVQString@@PBD0@Z @ 1722 NONAME ; class QString QDeclarativeBasePositioner::tr(char const *, char const *)
+ ?staticMetaObject@QDeclarativeAbstractAnimation@@2UQMetaObject@@B @ 1723 NONAME ; struct QMetaObject const QDeclarativeAbstractAnimation::staticMetaObject
+ ?setMove@QDeclarativeBasePositioner@@QAEXPAVQDeclarativeTransition@@@Z @ 1724 NONAME ; void QDeclarativeBasePositioner::setMove(class QDeclarativeTransition *)
+ ?setRunning@QDeclarativeTimer@@QAEX_N@Z @ 1725 NONAME ; void QDeclarativeTimer::setRunning(bool)
+ ?tr@QDeclarativeTimer@@SA?AVQString@@PBD0@Z @ 1726 NONAME ; class QString QDeclarativeTimer::tr(char const *, char const *)
+ ?qt_metacall@QDeclarativeTimer@@UAEHW4Call@QMetaObject@@HPAPAX@Z @ 1727 NONAME ; int QDeclarativeTimer::qt_metacall(enum QMetaObject::Call, int, void * *)
+ ?setPaused@QDeclarativeAbstractAnimation@@QAEX_N@Z @ 1728 NONAME ; void QDeclarativeAbstractAnimation::setPaused(bool)
+ ?d_func@QDeclarativeBasePositioner@@ABEPBVQDeclarativeBasePositionerPrivate@@XZ @ 1729 NONAME ; class QDeclarativeBasePositionerPrivate const * QDeclarativeBasePositioner::d_func(void) const
+ ?setRepeating@QDeclarativeTimer@@QAEX_N@Z @ 1730 NONAME ; void QDeclarativeTimer::setRepeating(bool)
+ ?interval@QDeclarativeTimer@@QBEHXZ @ 1731 NONAME ; int QDeclarativeTimer::interval(void) const
+ ?start@QDeclarativeTimer@@QAEXXZ @ 1732 NONAME ; void QDeclarativeTimer::start(void)
+ ?transition@QDeclarativeAbstractAnimation@@UAEXAAV?$QList@VQDeclarativeAction@@@@AAV?$QList@VQDeclarativeProperty@@@@W4TransitionDirection@1@@Z @ 1733 NONAME ; void QDeclarativeAbstractAnimation::transition(class QList<class QDeclarativeAction> &, class QList<class QDeclarativeProperty> &, enum QDeclarativeAbstractAnimation::TransitionDirection)
+ ?componentComplete@QDeclarativeAbstractAnimation@@UAEXXZ @ 1734 NONAME ; void QDeclarativeAbstractAnimation::componentComplete(void)
+ ?statusChanged@QDeclarativeDebugService@@MAEXW4Status@1@@Z @ 1735 NONAME ; void QDeclarativeDebugService::statusChanged(enum QDeclarativeDebugService::Status)
+ ?runningChanged@QDeclarativeAbstractAnimation@@IAEX_N@Z @ 1736 NONAME ; void QDeclarativeAbstractAnimation::runningChanged(bool)
+ ?trUtf8@QDeclarativeAbstractAnimation@@SA?AVQString@@PBD0H@Z @ 1737 NONAME ; class QString QDeclarativeAbstractAnimation::trUtf8(char const *, char const *, int)
+ ??_EQDeclarativeBasePositioner@@UAE@I@Z @ 1738 NONAME ; QDeclarativeBasePositioner::~QDeclarativeBasePositioner(unsigned int)
+ ?metaObject@QDeclarativeTimer@@UBEPBUQMetaObject@@XZ @ 1739 NONAME ; struct QMetaObject const * QDeclarativeTimer::metaObject(void) const
+ ?setGroup@QDeclarativeAbstractAnimation@@QAEXPAVQDeclarativeAnimationGroup@@@Z @ 1740 NONAME ; void QDeclarativeAbstractAnimation::setGroup(class QDeclarativeAnimationGroup *)
+ ?isRepeating@QDeclarativeTimer@@QBE_NXZ @ 1741 NONAME ; bool QDeclarativeTimer::isRepeating(void) const
+ ?setTriggeredOnStart@QDeclarativeTimer@@QAEX_N@Z @ 1742 NONAME ; void QDeclarativeTimer::setTriggeredOnStart(bool)
+ ?currentTime@QDeclarativeAbstractAnimation@@QAEHXZ @ 1743 NONAME ; int QDeclarativeAbstractAnimation::currentTime(void)
+ ?status@QDeclarativeEngineDebug@@QBE?AW4Status@1@XZ @ 1744 NONAME ; enum QDeclarativeEngineDebug::Status QDeclarativeEngineDebug::status(void) const
+ ??1QDeclarativeAbstractAnimation@@UAE@XZ @ 1745 NONAME ; QDeclarativeAbstractAnimation::~QDeclarativeAbstractAnimation(void)
+ ?triggered@QDeclarativeTimer@@IAEXXZ @ 1746 NONAME ; void QDeclarativeTimer::triggered(void)
+ ?getStaticMetaObject@QDeclarativeBasePositioner@@SAABUQMetaObject@@XZ @ 1747 NONAME ; struct QMetaObject const & QDeclarativeBasePositioner::getStaticMetaObject(void)
+ ?finished@QDeclarativeTimer@@AAEXXZ @ 1748 NONAME ; void QDeclarativeTimer::finished(void)
+ ?pausedChanged@QDeclarativeAbstractAnimation@@IAEX_N@Z @ 1749 NONAME ; void QDeclarativeAbstractAnimation::pausedChanged(bool)
+ ?complete@QDeclarativeAbstractAnimation@@QAEXXZ @ 1750 NONAME ; void QDeclarativeAbstractAnimation::complete(void)
+ ?setRunning@QDeclarativeAbstractAnimation@@QAEX_N@Z @ 1751 NONAME ; void QDeclarativeAbstractAnimation::setRunning(bool)
+ ?trUtf8@QDeclarativeBasePositioner@@SA?AVQString@@PBD0H@Z @ 1752 NONAME ; class QString QDeclarativeBasePositioner::trUtf8(char const *, char const *, int)
+ ?trUtf8@QDeclarativeBasePositioner@@SA?AVQString@@PBD0@Z @ 1753 NONAME ; class QString QDeclarativeBasePositioner::trUtf8(char const *, char const *)
+ ?completed@QDeclarativeAbstractAnimation@@IAEXXZ @ 1754 NONAME ; void QDeclarativeAbstractAnimation::completed(void)
+ ?trUtf8@QDeclarativeTimer@@SA?AVQString@@PBD0@Z @ 1755 NONAME ; class QString QDeclarativeTimer::trUtf8(char const *, char const *)
+ ?loopCountChanged@QDeclarativeAbstractAnimation@@IAEXH@Z @ 1756 NONAME ; void QDeclarativeAbstractAnimation::loopCountChanged(int)
+ ?repeatChanged@QDeclarativeTimer@@IAEXXZ @ 1757 NONAME ; void QDeclarativeTimer::repeatChanged(void)
+ ?setDisableUserControl@QDeclarativeAbstractAnimation@@QAEXXZ @ 1758 NONAME ; void QDeclarativeAbstractAnimation::setDisableUserControl(void)
+ ?componentComplete@QDeclarativeBasePositioner@@MAEXXZ @ 1759 NONAME ; void QDeclarativeBasePositioner::componentComplete(void)
+ ?setDefaultTarget@QDeclarativeAbstractAnimation@@QAEXABVQDeclarativeProperty@@@Z @ 1760 NONAME ; void QDeclarativeAbstractAnimation::setDefaultTarget(class QDeclarativeProperty const &)
+ ?staticMetaObject@QDeclarativeBasePositioner@@2UQMetaObject@@B @ 1761 NONAME ; struct QMetaObject const QDeclarativeBasePositioner::staticMetaObject
+ ?triggeredOnStart@QDeclarativeTimer@@QBE_NXZ @ 1762 NONAME ; bool QDeclarativeTimer::triggeredOnStart(void) const
+ ?notifyRunningChanged@QDeclarativeAbstractAnimation@@AAEX_N@Z @ 1763 NONAME ; void QDeclarativeAbstractAnimation::notifyRunningChanged(bool)
+ ?statusChanged@QDeclarativeDebugClient@@MAEXW4Status@1@@Z @ 1764 NONAME ; void QDeclarativeDebugClient::statusChanged(enum QDeclarativeDebugClient::Status)
+ ??0QDeclarativeBasePositioner@@IAE@AAVQDeclarativeBasePositionerPrivate@@W4PositionerType@0@PAVQDeclarativeItem@@@Z @ 1765 NONAME ; QDeclarativeBasePositioner::QDeclarativeBasePositioner(class QDeclarativeBasePositionerPrivate &, enum QDeclarativeBasePositioner::PositionerType, class QDeclarativeItem *)
+ ?componentComplete@QDeclarativeTimer@@MAEXXZ @ 1766 NONAME ; void QDeclarativeTimer::componentComplete(void)
+ ?tr@QDeclarativeAbstractAnimation@@SA?AVQString@@PBD0@Z @ 1767 NONAME ; class QString QDeclarativeAbstractAnimation::tr(char const *, char const *)
+ ?isRunning@QDeclarativeAbstractAnimation@@QBE_NXZ @ 1768 NONAME ; bool QDeclarativeAbstractAnimation::isRunning(void) const
+ ?d_func@QDeclarativeAbstractAnimation@@ABEPBVQDeclarativeAbstractAnimationPrivate@@XZ @ 1769 NONAME ; class QDeclarativeAbstractAnimationPrivate const * QDeclarativeAbstractAnimation::d_func(void) const
+ ??_EQDeclarativeAbstractAnimation@@UAE@I@Z @ 1770 NONAME ; QDeclarativeAbstractAnimation::~QDeclarativeAbstractAnimation(unsigned int)
+ ??0QDeclarativeBasePositioner@@QAE@W4PositionerType@0@PAVQDeclarativeItem@@@Z @ 1771 NONAME ; QDeclarativeBasePositioner::QDeclarativeBasePositioner(enum QDeclarativeBasePositioner::PositionerType, class QDeclarativeItem *)
+ ?qt_metacall@QDeclarativeBasePositioner@@UAEHW4Call@QMetaObject@@HPAPAX@Z @ 1772 NONAME ; int QDeclarativeBasePositioner::qt_metacall(enum QMetaObject::Call, int, void * *)
+ ?status@QDeclarativeDebugClient@@QBE?AW4Status@1@XZ @ 1773 NONAME ; enum QDeclarativeDebugClient::Status QDeclarativeDebugClient::status(void) const
+ ?prePositioning@QDeclarativeBasePositioner@@IAEXXZ @ 1774 NONAME ; void QDeclarativeBasePositioner::prePositioning(void)
+ ?finishApplyTransitions@QDeclarativeBasePositioner@@IAEXXZ @ 1775 NONAME ; void QDeclarativeBasePositioner::finishApplyTransitions(void)
+ ?d_func@QDeclarativeAbstractAnimation@@AAEPAVQDeclarativeAbstractAnimationPrivate@@XZ @ 1776 NONAME ; class QDeclarativeAbstractAnimationPrivate * QDeclarativeAbstractAnimation::d_func(void)
+ ?componentFinalized@QDeclarativeAbstractAnimation@@AAEXXZ @ 1777 NONAME ; void QDeclarativeAbstractAnimation::componentFinalized(void)
+ ??_EQDeclarativeTimer@@UAE@I@Z @ 1778 NONAME ; QDeclarativeTimer::~QDeclarativeTimer(unsigned int)
+ ?pause@QDeclarativeAbstractAnimation@@QAEXXZ @ 1779 NONAME ; void QDeclarativeAbstractAnimation::pause(void)
+ ?stop@QDeclarativeTimer@@QAEXXZ @ 1780 NONAME ; void QDeclarativeTimer::stop(void)
+ ?timelineComplete@QDeclarativeAbstractAnimation@@AAEXXZ @ 1781 NONAME ; void QDeclarativeAbstractAnimation::timelineComplete(void)
+ ?d_func@QDeclarativeBasePositioner@@AAEPAVQDeclarativeBasePositionerPrivate@@XZ @ 1782 NONAME ; class QDeclarativeBasePositionerPrivate * QDeclarativeBasePositioner::d_func(void)
+ ?setAlwaysRunToEnd@QDeclarativeAbstractAnimation@@QAEX_N@Z @ 1783 NONAME ; void QDeclarativeAbstractAnimation::setAlwaysRunToEnd(bool)
+ ?classBegin@QDeclarativeAbstractAnimation@@UAEXXZ @ 1784 NONAME ; void QDeclarativeAbstractAnimation::classBegin(void)
+ ?d_func@QDeclarativeTimer@@AAEPAVQDeclarativeTimerPrivate@@XZ @ 1785 NONAME ; class QDeclarativeTimerPrivate * QDeclarativeTimer::d_func(void)
+ ?spacing@QDeclarativeBasePositioner@@QBEHXZ @ 1786 NONAME ; int QDeclarativeBasePositioner::spacing(void) const
+ ??0QDeclarativeAbstractAnimation@@QAE@PAVQObject@@@Z @ 1787 NONAME ; QDeclarativeAbstractAnimation::QDeclarativeAbstractAnimation(class QObject *)
+ ?metaObject@QDeclarativeAbstractAnimation@@UBEPBUQMetaObject@@XZ @ 1788 NONAME ; struct QMetaObject const * QDeclarativeAbstractAnimation::metaObject(void) const
+ ?tr@QDeclarativeAbstractAnimation@@SA?AVQString@@PBD0H@Z @ 1789 NONAME ; class QString QDeclarativeAbstractAnimation::tr(char const *, char const *, int)
+ ?started@QDeclarativeAbstractAnimation@@IAEXXZ @ 1790 NONAME ; void QDeclarativeAbstractAnimation::started(void)
+ ?setInterval@QDeclarativeTimer@@QAEXH@Z @ 1791 NONAME ; void QDeclarativeTimer::setInterval(int)
+ ?statusChanged@QDeclarativeEngineDebug@@IAEXW4Status@1@@Z @ 1792 NONAME ; void QDeclarativeEngineDebug::statusChanged(enum QDeclarativeEngineDebug::Status)
+ ?d_func@QDeclarativeTimer@@ABEPBVQDeclarativeTimerPrivate@@XZ @ 1793 NONAME ; class QDeclarativeTimerPrivate const * QDeclarativeTimer::d_func(void) const
+ ?setSpacing@QDeclarativeBasePositioner@@QAEXH@Z @ 1794 NONAME ; void QDeclarativeBasePositioner::setSpacing(int)
+ ?staticMetaObject@QDeclarativeTimer@@2UQMetaObject@@B @ 1795 NONAME ; struct QMetaObject const QDeclarativeTimer::staticMetaObject
+ ?positionY@QDeclarativeBasePositioner@@IAEXHABVPositionedItem@1@@Z @ 1796 NONAME ; void QDeclarativeBasePositioner::positionY(int, class QDeclarativeBasePositioner::PositionedItem const &)
+ ?qt_metacast@QDeclarativeAbstractAnimation@@UAEPAXPBD@Z @ 1797 NONAME ; void * QDeclarativeAbstractAnimation::qt_metacast(char const *)
+ ?setAdd@QDeclarativeBasePositioner@@QAEXPAVQDeclarativeTransition@@@Z @ 1798 NONAME ; void QDeclarativeBasePositioner::setAdd(class QDeclarativeTransition *)
+ ?setCurrentTime@QDeclarativeAbstractAnimation@@QAEXH@Z @ 1799 NONAME ; void QDeclarativeAbstractAnimation::setCurrentTime(int)
+ ?attachedPropertiesId@QDeclarativeType@@QBEHXZ @ 1800 NONAME ; int QDeclarativeType::attachedPropertiesId(void) const
+ ?positionX@QDeclarativeBasePositioner@@IAEXHABVPositionedItem@1@@Z @ 1801 NONAME ; void QDeclarativeBasePositioner::positionX(int, class QDeclarativeBasePositioner::PositionedItem const &)
+ ?restart@QDeclarativeAbstractAnimation@@QAEXXZ @ 1802 NONAME ; void QDeclarativeAbstractAnimation::restart(void)
+ ?itemChange@QDeclarativeBasePositioner@@MAE?AVQVariant@@W4GraphicsItemChange@QGraphicsItem@@ABV2@@Z @ 1803 NONAME ; class QVariant QDeclarativeBasePositioner::itemChange(enum QGraphicsItem::GraphicsItemChange, class QVariant const &)
+ ??0QDeclarativeAbstractAnimation@@IAE@AAVQDeclarativeAbstractAnimationPrivate@@PAVQObject@@@Z @ 1804 NONAME ; QDeclarativeAbstractAnimation::QDeclarativeAbstractAnimation(class QDeclarativeAbstractAnimationPrivate &, class QObject *)
+ ?resume@QDeclarativeAbstractAnimation@@QAEXXZ @ 1805 NONAME ; void QDeclarativeAbstractAnimation::resume(void)
+ ?runningChanged@QDeclarativeTimer@@IAEXXZ @ 1806 NONAME ; void QDeclarativeTimer::runningChanged(void)
+ ?ticked@QDeclarativeTimer@@AAEXXZ @ 1807 NONAME ; void QDeclarativeTimer::ticked(void)
+ ?graphicsWidgetGeometryChanged@QDeclarativeBasePositioner@@IAEXXZ @ 1808 NONAME ; void QDeclarativeBasePositioner::graphicsWidgetGeometryChanged(void)
+ ?trUtf8@QDeclarativeTimer@@SA?AVQString@@PBD0H@Z @ 1809 NONAME ; class QString QDeclarativeTimer::trUtf8(char const *, char const *, int)
+ ??0QDeclarativeTimer@@QAE@PAVQObject@@@Z @ 1810 NONAME ; QDeclarativeTimer::QDeclarativeTimer(class QObject *)
+ ?loops@QDeclarativeAbstractAnimation@@QBEHXZ @ 1811 NONAME ; int QDeclarativeAbstractAnimation::loops(void) const
+ ?setTarget@QDeclarativeAbstractAnimation@@EAEXABVQDeclarativeProperty@@@Z @ 1812 NONAME ; void QDeclarativeAbstractAnimation::setTarget(class QDeclarativeProperty const &)
+ ?alwaysRunToEnd@QDeclarativeAbstractAnimation@@QBE_NXZ @ 1813 NONAME ; bool QDeclarativeAbstractAnimation::alwaysRunToEnd(void) const
+ ?tr@QDeclarativeTimer@@SA?AVQString@@PBD0H@Z @ 1814 NONAME ; class QString QDeclarativeTimer::tr(char const *, char const *, int)
+ ?status@QDeclarativeDebugService@@QBE?AW4Status@1@XZ @ 1815 NONAME ; enum QDeclarativeDebugService::Status QDeclarativeDebugService::status(void) const
+ ?intervalChanged@QDeclarativeTimer@@IAEXXZ @ 1816 NONAME ; void QDeclarativeTimer::intervalChanged(void)
+ ?isPaused@QDeclarativeAbstractAnimation@@QBE_NXZ @ 1817 NONAME ; bool QDeclarativeAbstractAnimation::isPaused(void) const
+ ?getStaticMetaObject@QDeclarativeAbstractAnimation@@SAABUQMetaObject@@XZ @ 1818 NONAME ; struct QMetaObject const & QDeclarativeAbstractAnimation::getStaticMetaObject(void)
+ ?group@QDeclarativeAbstractAnimation@@QBEPAVQDeclarativeAnimationGroup@@XZ @ 1819 NONAME ; class QDeclarativeAnimationGroup * QDeclarativeAbstractAnimation::group(void) const
+ ?classBegin@QDeclarativeTimer@@MAEXXZ @ 1820 NONAME ; void QDeclarativeTimer::classBegin(void)
+ ?restart@QDeclarativeTimer@@QAEXXZ @ 1821 NONAME ; void QDeclarativeTimer::restart(void)
+ ?move@QDeclarativeBasePositioner@@QBEPAVQDeclarativeTransition@@XZ @ 1822 NONAME ; class QDeclarativeTransition * QDeclarativeBasePositioner::move(void) const
+ ?spacingChanged@QDeclarativeBasePositioner@@IAEXXZ @ 1823 NONAME ; void QDeclarativeBasePositioner::spacingChanged(void)
+ ?qt_metacast@QDeclarativeBasePositioner@@UAEPAXPBD@Z @ 1824 NONAME ; void * QDeclarativeBasePositioner::qt_metacast(char const *)
+ ??1QDeclarativeTimer@@UAE@XZ @ 1825 NONAME ; QDeclarativeTimer::~QDeclarativeTimer(void)
+ ?getStaticMetaObject@QDeclarativeTimer@@SAABUQMetaObject@@XZ @ 1826 NONAME ; struct QMetaObject const & QDeclarativeTimer::getStaticMetaObject(void)
+ ?tr@QDeclarativeBasePositioner@@SA?AVQString@@PBD0H@Z @ 1827 NONAME ; class QString QDeclarativeBasePositioner::tr(char const *, char const *, int)
+ ??1QDeclarativeBasePositioner@@UAE@XZ @ 1828 NONAME ; QDeclarativeBasePositioner::~QDeclarativeBasePositioner(void)
+ ?moveChanged@QDeclarativeBasePositioner@@IAEXXZ @ 1829 NONAME ; void QDeclarativeBasePositioner::moveChanged(void)
+ ?qt_metacast@QDeclarativeTimer@@UAEPAXPBD@Z @ 1830 NONAME ; void * QDeclarativeTimer::qt_metacast(char const *)
+ ?metaObject@QDeclarativeBasePositioner@@UBEPBUQMetaObject@@XZ @ 1831 NONAME ; struct QMetaObject const * QDeclarativeBasePositioner::metaObject(void) const
+ ?alwaysRunToEndChanged@QDeclarativeAbstractAnimation@@IAEX_N@Z @ 1832 NONAME ; void QDeclarativeAbstractAnimation::alwaysRunToEndChanged(bool)
+ ?triggeredOnStartChanged@QDeclarativeTimer@@IAEXXZ @ 1833 NONAME ; void QDeclarativeTimer::triggeredOnStartChanged(void)
+ ?isRunning@QDeclarativeTimer@@QBE_NXZ @ 1834 NONAME ; bool QDeclarativeTimer::isRunning(void) const
+ ?update@QDeclarativeTimer@@AAEXXZ @ 1835 NONAME ; void QDeclarativeTimer::update(void)
+ ?stop@QDeclarativeAbstractAnimation@@QAEXXZ @ 1836 NONAME ; void QDeclarativeAbstractAnimation::stop(void)
+ ?addChanged@QDeclarativeBasePositioner@@IAEXXZ @ 1837 NONAME ; void QDeclarativeBasePositioner::addChanged(void)
+ ?start@QDeclarativeAbstractAnimation@@QAEXXZ @ 1838 NONAME ; void QDeclarativeAbstractAnimation::start(void)
+ ?qt_metacall@QDeclarativeAbstractAnimation@@UAEHW4Call@QMetaObject@@HPAPAX@Z @ 1839 NONAME ; int QDeclarativeAbstractAnimation::qt_metacall(enum QMetaObject::Call, int, void * *)
diff --git a/src/s60installs/eabi/QtCoreu.def b/src/s60installs/eabi/QtCoreu.def
index 01679be..f496839 100644
--- a/src/s60installs/eabi/QtCoreu.def
+++ b/src/s60installs/eabi/QtCoreu.def
@@ -3711,4 +3711,5 @@ EXPORTS
_ZlsR11QDataStreamRK12QEasingCurve @ 3710 NONAME
_ZltRK13QElapsedTimerS1_ @ 3711 NONAME
_ZrsR11QDataStreamR12QEasingCurve @ 3712 NONAME
+ _Z26qt_symbian_SetupThreadHeapiR24SStdEpocThreadCreateInfo @ 3713 NONAME
diff --git a/src/s60installs/eabi/QtDeclarativeu.def b/src/s60installs/eabi/QtDeclarativeu.def
index c4cd9b6..d4084cc 100644
--- a/src/s60installs/eabi/QtDeclarativeu.def
+++ b/src/s60installs/eabi/QtDeclarativeu.def
@@ -673,7 +673,7 @@ EXPORTS
_ZN22QDeclarativeTransitionD0Ev @ 672 NONAME
_ZN22QDeclarativeTransitionD1Ev @ 673 NONAME
_ZN22QDeclarativeTransitionD2Ev @ 674 NONAME
- _ZN23QDeclarativeDebugClient10setEnabledEb @ 675 NONAME
+ _ZN23QDeclarativeDebugClient10setEnabledEb @ 675 NONAME ABSENT
_ZN23QDeclarativeDebugClient11qt_metacallEN11QMetaObject4CallEiPPv @ 676 NONAME
_ZN23QDeclarativeDebugClient11qt_metacastEPKc @ 677 NONAME
_ZN23QDeclarativeDebugClient11sendMessageERK10QByteArray @ 678 NONAME
@@ -766,7 +766,7 @@ EXPORTS
_ZN24QDeclarativeDebugService11qt_metacallEN11QMetaObject4CallEiPPv @ 765 NONAME
_ZN24QDeclarativeDebugService11qt_metacastEPKc @ 766 NONAME
_ZN24QDeclarativeDebugService11sendMessageERK10QByteArray @ 767 NONAME
- _ZN24QDeclarativeDebugService14enabledChangedEb @ 768 NONAME
+ _ZN24QDeclarativeDebugService14enabledChangedEb @ 768 NONAME ABSENT
_ZN24QDeclarativeDebugService14objectToStringEP7QObject @ 769 NONAME
_ZN24QDeclarativeDebugService15messageReceivedERK10QByteArray @ 770 NONAME
_ZN24QDeclarativeDebugService16staticMetaObjectE @ 771 NONAME DATA 16
@@ -1390,9 +1390,9 @@ EXPORTS
_ZNK22QDeclarativeTransition7toStateEv @ 1389 NONAME
_ZNK22QDeclarativeTransition9fromStateEv @ 1390 NONAME
_ZNK23QDeclarativeDebugClient10metaObjectEv @ 1391 NONAME
- _ZNK23QDeclarativeDebugClient11isConnectedEv @ 1392 NONAME
+ _ZNK23QDeclarativeDebugClient11isConnectedEv @ 1392 NONAME ABSENT
_ZNK23QDeclarativeDebugClient4nameEv @ 1393 NONAME
- _ZNK23QDeclarativeDebugClient9isEnabledEv @ 1394 NONAME
+ _ZNK23QDeclarativeDebugClient9isEnabledEv @ 1394 NONAME ABSENT
_ZNK23QDeclarativeDomDocument10rootObjectEv @ 1395 NONAME
_ZNK23QDeclarativeDomDocument6errorsEv @ 1396 NONAME
_ZNK23QDeclarativeDomDocument7importsEv @ 1397 NONAME
@@ -1427,7 +1427,7 @@ EXPORTS
_ZNK24QDeclarativeCustomParser12evaluateEnumERK10QByteArray @ 1426 NONAME
_ZNK24QDeclarativeDebugService10metaObjectEv @ 1427 NONAME
_ZNK24QDeclarativeDebugService4nameEv @ 1428 NONAME
- _ZNK24QDeclarativeDebugService9isEnabledEv @ 1429 NONAME
+ _ZNK24QDeclarativeDebugService9isEnabledEv @ 1429 NONAME ABSENT
_ZNK24QDeclarativeDomComponent13componentRootEv @ 1430 NONAME
_ZNK24QDeclarativeScriptString11scopeObjectEv @ 1431 NONAME
_ZNK24QDeclarativeScriptString6scriptEv @ 1432 NONAME
@@ -1747,90 +1747,140 @@ EXPORTS
_ZN21QDeclarativeListModelC1EPKS_P32QDeclarativeListModelWorkerAgent @ 1746 NONAME
_ZN21QDeclarativeListModelC2EPKS_P32QDeclarativeListModelWorkerAgent @ 1747 NONAME
_ZNK21QDeclarativeListModel14inWorkerThreadEv @ 1748 NONAME
- _ZN17QDeclarativeTimer10classBeginEv @ 1749 NONAME
- _ZN17QDeclarativeTimer10setRunningEb @ 1750 NONAME
- _ZN17QDeclarativeTimer11qt_metacallEN11QMetaObject4CallEiPPv @ 1751 NONAME
- _ZN17QDeclarativeTimer11qt_metacastEPKc @ 1752 NONAME
- _ZN17QDeclarativeTimer11setIntervalEi @ 1753 NONAME
- _ZN17QDeclarativeTimer12setRepeatingEb @ 1754 NONAME
- _ZN17QDeclarativeTimer13repeatChangedEv @ 1755 NONAME
- _ZN17QDeclarativeTimer14runningChangedEv @ 1756 NONAME
- _ZN17QDeclarativeTimer15intervalChangedEv @ 1757 NONAME
- _ZN17QDeclarativeTimer16staticMetaObjectE @ 1758 NONAME DATA 16
- _ZN17QDeclarativeTimer17componentCompleteEv @ 1759 NONAME
- _ZN17QDeclarativeTimer19getStaticMetaObjectEv @ 1760 NONAME
- _ZN17QDeclarativeTimer19setTriggeredOnStartEb @ 1761 NONAME
- _ZN17QDeclarativeTimer23triggeredOnStartChangedEv @ 1762 NONAME
- _ZN17QDeclarativeTimer4stopEv @ 1763 NONAME
- _ZN17QDeclarativeTimer5startEv @ 1764 NONAME
- _ZN17QDeclarativeTimer6tickedEv @ 1765 NONAME
- _ZN17QDeclarativeTimer6updateEv @ 1766 NONAME
- _ZN17QDeclarativeTimer7restartEv @ 1767 NONAME
- _ZN17QDeclarativeTimer8finishedEv @ 1768 NONAME
- _ZN17QDeclarativeTimer9triggeredEv @ 1769 NONAME
- _ZN17QDeclarativeTimerC1EP7QObject @ 1770 NONAME
- _ZN17QDeclarativeTimerC2EP7QObject @ 1771 NONAME
- _ZN29QDeclarativeAbstractAnimation10classBeginEv @ 1772 NONAME
- _ZN29QDeclarativeAbstractAnimation10setRunningEb @ 1773 NONAME
- _ZN29QDeclarativeAbstractAnimation10transitionER5QListI18QDeclarativeActionERS0_I20QDeclarativePropertyENS_19TransitionDirectionE @ 1774 NONAME
- _ZN29QDeclarativeAbstractAnimation11currentTimeEv @ 1775 NONAME
- _ZN29QDeclarativeAbstractAnimation11qt_metacallEN11QMetaObject4CallEiPPv @ 1776 NONAME
- _ZN29QDeclarativeAbstractAnimation11qt_metacastEPKc @ 1777 NONAME
- _ZN29QDeclarativeAbstractAnimation13pausedChangedEb @ 1778 NONAME
- _ZN29QDeclarativeAbstractAnimation14runningChangedEb @ 1779 NONAME
- _ZN29QDeclarativeAbstractAnimation14setCurrentTimeEi @ 1780 NONAME
- _ZN29QDeclarativeAbstractAnimation16loopCountChangedEi @ 1781 NONAME
- _ZN29QDeclarativeAbstractAnimation16setDefaultTargetERK20QDeclarativeProperty @ 1782 NONAME
- _ZN29QDeclarativeAbstractAnimation16staticMetaObjectE @ 1783 NONAME DATA 16
- _ZN29QDeclarativeAbstractAnimation16timelineCompleteEv @ 1784 NONAME
- _ZN29QDeclarativeAbstractAnimation17componentCompleteEv @ 1785 NONAME
- _ZN29QDeclarativeAbstractAnimation17setAlwaysRunToEndEb @ 1786 NONAME
- _ZN29QDeclarativeAbstractAnimation18componentFinalizedEv @ 1787 NONAME
- _ZN29QDeclarativeAbstractAnimation19getStaticMetaObjectEv @ 1788 NONAME
- _ZN29QDeclarativeAbstractAnimation20notifyRunningChangedEb @ 1789 NONAME
- _ZN29QDeclarativeAbstractAnimation21alwaysRunToEndChangedEb @ 1790 NONAME
- _ZN29QDeclarativeAbstractAnimation21setDisableUserControlEv @ 1791 NONAME
- _ZN29QDeclarativeAbstractAnimation4stopEv @ 1792 NONAME
- _ZN29QDeclarativeAbstractAnimation5pauseEv @ 1793 NONAME
- _ZN29QDeclarativeAbstractAnimation5startEv @ 1794 NONAME
- _ZN29QDeclarativeAbstractAnimation6resumeEv @ 1795 NONAME
- _ZN29QDeclarativeAbstractAnimation7restartEv @ 1796 NONAME
- _ZN29QDeclarativeAbstractAnimation7startedEv @ 1797 NONAME
- _ZN29QDeclarativeAbstractAnimation8completeEv @ 1798 NONAME
- _ZN29QDeclarativeAbstractAnimation8setGroupEP26QDeclarativeAnimationGroup @ 1799 NONAME
- _ZN29QDeclarativeAbstractAnimation8setLoopsEi @ 1800 NONAME
- _ZN29QDeclarativeAbstractAnimation9completedEv @ 1801 NONAME
- _ZN29QDeclarativeAbstractAnimation9setPausedEb @ 1802 NONAME
- _ZN29QDeclarativeAbstractAnimation9setTargetERK20QDeclarativeProperty @ 1803 NONAME
- _ZN29QDeclarativeAbstractAnimationC2EP7QObject @ 1804 NONAME
- _ZN29QDeclarativeAbstractAnimationC2ER36QDeclarativeAbstractAnimationPrivateP7QObject @ 1805 NONAME
- _ZN29QDeclarativeAbstractAnimationD0Ev @ 1806 NONAME
- _ZN29QDeclarativeAbstractAnimationD1Ev @ 1807 NONAME
- _ZN29QDeclarativeAbstractAnimationD2Ev @ 1808 NONAME
- _ZNK17QDeclarativeTimer10metaObjectEv @ 1809 NONAME
- _ZNK17QDeclarativeTimer11isRepeatingEv @ 1810 NONAME
- _ZNK17QDeclarativeTimer16triggeredOnStartEv @ 1811 NONAME
- _ZNK17QDeclarativeTimer8intervalEv @ 1812 NONAME
- _ZNK17QDeclarativeTimer9isRunningEv @ 1813 NONAME
- _ZNK29QDeclarativeAbstractAnimation10metaObjectEv @ 1814 NONAME
- _ZNK29QDeclarativeAbstractAnimation14alwaysRunToEndEv @ 1815 NONAME
- _ZNK29QDeclarativeAbstractAnimation5groupEv @ 1816 NONAME
- _ZNK29QDeclarativeAbstractAnimation5loopsEv @ 1817 NONAME
- _ZNK29QDeclarativeAbstractAnimation8isPausedEv @ 1818 NONAME
- _ZNK29QDeclarativeAbstractAnimation9isRunningEv @ 1819 NONAME
- _ZTI17QDeclarativeTimer @ 1820 NONAME
- _ZTI29QDeclarativeAbstractAnimation @ 1821 NONAME
- _ZTV17QDeclarativeTimer @ 1822 NONAME
- _ZTV29QDeclarativeAbstractAnimation @ 1823 NONAME
- _ZThn12_N29QDeclarativeAbstractAnimation10classBeginEv @ 1824 NONAME
- _ZThn12_N29QDeclarativeAbstractAnimation17componentCompleteEv @ 1825 NONAME
- _ZThn12_N29QDeclarativeAbstractAnimationD0Ev @ 1826 NONAME
- _ZThn12_N29QDeclarativeAbstractAnimationD1Ev @ 1827 NONAME
- _ZThn8_N17QDeclarativeTimer10classBeginEv @ 1828 NONAME
- _ZThn8_N17QDeclarativeTimer17componentCompleteEv @ 1829 NONAME
- _ZThn8_N29QDeclarativeAbstractAnimation9setTargetERK20QDeclarativeProperty @ 1830 NONAME
- _ZThn8_N29QDeclarativeAbstractAnimationD0Ev @ 1831 NONAME
- _ZThn8_N29QDeclarativeAbstractAnimationD1Ev @ 1832 NONAME
- _ZN23QDeclarativeDebugHelper15getScriptEngineEP18QDeclarativeEngine @ 1833 NONAME
- _ZN23QDeclarativeDebugHelper26setAnimationSlowDownFactorEf @ 1834 NONAME
+ _ZN23QDeclarativeDebugHelper15getScriptEngineEP18QDeclarativeEngine @ 1749 NONAME
+ _ZN23QDeclarativeDebugHelper26setAnimationSlowDownFactorEf @ 1750 NONAME
+ _ZN17QDeclarativeTimer10classBeginEv @ 1751 NONAME
+ _ZN17QDeclarativeTimer10setRunningEb @ 1752 NONAME
+ _ZN17QDeclarativeTimer11qt_metacallEN11QMetaObject4CallEiPPv @ 1753 NONAME
+ _ZN17QDeclarativeTimer11qt_metacastEPKc @ 1754 NONAME
+ _ZN17QDeclarativeTimer11setIntervalEi @ 1755 NONAME
+ _ZN17QDeclarativeTimer12setRepeatingEb @ 1756 NONAME
+ _ZN17QDeclarativeTimer13repeatChangedEv @ 1757 NONAME
+ _ZN17QDeclarativeTimer14runningChangedEv @ 1758 NONAME
+ _ZN17QDeclarativeTimer15intervalChangedEv @ 1759 NONAME
+ _ZN17QDeclarativeTimer16staticMetaObjectE @ 1760 NONAME DATA 16
+ _ZN17QDeclarativeTimer17componentCompleteEv @ 1761 NONAME
+ _ZN17QDeclarativeTimer19getStaticMetaObjectEv @ 1762 NONAME
+ _ZN17QDeclarativeTimer19setTriggeredOnStartEb @ 1763 NONAME
+ _ZN17QDeclarativeTimer23triggeredOnStartChangedEv @ 1764 NONAME
+ _ZN17QDeclarativeTimer4stopEv @ 1765 NONAME
+ _ZN17QDeclarativeTimer5startEv @ 1766 NONAME
+ _ZN17QDeclarativeTimer6tickedEv @ 1767 NONAME
+ _ZN17QDeclarativeTimer6updateEv @ 1768 NONAME
+ _ZN17QDeclarativeTimer7restartEv @ 1769 NONAME
+ _ZN17QDeclarativeTimer8finishedEv @ 1770 NONAME
+ _ZN17QDeclarativeTimer9triggeredEv @ 1771 NONAME
+ _ZN17QDeclarativeTimerC1EP7QObject @ 1772 NONAME
+ _ZN17QDeclarativeTimerC2EP7QObject @ 1773 NONAME
+ _ZN23QDeclarativeDebugClient13statusChangedENS_6StatusE @ 1774 NONAME
+ _ZN23QDeclarativeDebugClientD0Ev @ 1775 NONAME
+ _ZN23QDeclarativeDebugClientD1Ev @ 1776 NONAME
+ _ZN23QDeclarativeDebugClientD2Ev @ 1777 NONAME
+ _ZN23QDeclarativeEngineDebug13statusChangedENS_6StatusE @ 1778 NONAME
+ _ZN24QDeclarativeDebugService13statusChangedENS_6StatusE @ 1779 NONAME
+ _ZN24QDeclarativeDebugServiceD0Ev @ 1780 NONAME
+ _ZN24QDeclarativeDebugServiceD1Ev @ 1781 NONAME
+ _ZN24QDeclarativeDebugServiceD2Ev @ 1782 NONAME
+ _ZN26QDeclarativeBasePositioner10addChangedEv @ 1783 NONAME
+ _ZN26QDeclarativeBasePositioner10itemChangeEN13QGraphicsItem18GraphicsItemChangeERK8QVariant @ 1784 NONAME
+ _ZN26QDeclarativeBasePositioner10setSpacingEi @ 1785 NONAME
+ _ZN26QDeclarativeBasePositioner11moveChangedEv @ 1786 NONAME
+ _ZN26QDeclarativeBasePositioner11qt_metacallEN11QMetaObject4CallEiPPv @ 1787 NONAME
+ _ZN26QDeclarativeBasePositioner11qt_metacastEPKc @ 1788 NONAME
+ _ZN26QDeclarativeBasePositioner14prePositioningEv @ 1789 NONAME
+ _ZN26QDeclarativeBasePositioner14spacingChangedEv @ 1790 NONAME
+ _ZN26QDeclarativeBasePositioner16staticMetaObjectE @ 1791 NONAME DATA 16
+ _ZN26QDeclarativeBasePositioner17componentCompleteEv @ 1792 NONAME
+ _ZN26QDeclarativeBasePositioner19getStaticMetaObjectEv @ 1793 NONAME
+ _ZN26QDeclarativeBasePositioner22finishApplyTransitionsEv @ 1794 NONAME
+ _ZN26QDeclarativeBasePositioner29graphicsWidgetGeometryChangedEv @ 1795 NONAME
+ _ZN26QDeclarativeBasePositioner6setAddEP22QDeclarativeTransition @ 1796 NONAME
+ _ZN26QDeclarativeBasePositioner7setMoveEP22QDeclarativeTransition @ 1797 NONAME
+ _ZN26QDeclarativeBasePositioner9positionXEiRKNS_14PositionedItemE @ 1798 NONAME
+ _ZN26QDeclarativeBasePositioner9positionYEiRKNS_14PositionedItemE @ 1799 NONAME
+ _ZN26QDeclarativeBasePositionerC2ENS_14PositionerTypeEP16QDeclarativeItem @ 1800 NONAME
+ _ZN26QDeclarativeBasePositionerC2ER33QDeclarativeBasePositionerPrivateNS_14PositionerTypeEP16QDeclarativeItem @ 1801 NONAME
+ _ZN26QDeclarativeBasePositionerD0Ev @ 1802 NONAME
+ _ZN26QDeclarativeBasePositionerD1Ev @ 1803 NONAME
+ _ZN26QDeclarativeBasePositionerD2Ev @ 1804 NONAME
+ _ZN27QDeclarativeDebugConnectionD0Ev @ 1805 NONAME
+ _ZN27QDeclarativeDebugConnectionD1Ev @ 1806 NONAME
+ _ZN27QDeclarativeDebugConnectionD2Ev @ 1807 NONAME
+ _ZN29QDeclarativeAbstractAnimation10classBeginEv @ 1808 NONAME
+ _ZN29QDeclarativeAbstractAnimation10setRunningEb @ 1809 NONAME
+ _ZN29QDeclarativeAbstractAnimation10transitionER5QListI18QDeclarativeActionERS0_I20QDeclarativePropertyENS_19TransitionDirectionE @ 1810 NONAME
+ _ZN29QDeclarativeAbstractAnimation11currentTimeEv @ 1811 NONAME
+ _ZN29QDeclarativeAbstractAnimation11qt_metacallEN11QMetaObject4CallEiPPv @ 1812 NONAME
+ _ZN29QDeclarativeAbstractAnimation11qt_metacastEPKc @ 1813 NONAME
+ _ZN29QDeclarativeAbstractAnimation13pausedChangedEb @ 1814 NONAME
+ _ZN29QDeclarativeAbstractAnimation14runningChangedEb @ 1815 NONAME
+ _ZN29QDeclarativeAbstractAnimation14setCurrentTimeEi @ 1816 NONAME
+ _ZN29QDeclarativeAbstractAnimation16loopCountChangedEi @ 1817 NONAME
+ _ZN29QDeclarativeAbstractAnimation16setDefaultTargetERK20QDeclarativeProperty @ 1818 NONAME
+ _ZN29QDeclarativeAbstractAnimation16staticMetaObjectE @ 1819 NONAME DATA 16
+ _ZN29QDeclarativeAbstractAnimation16timelineCompleteEv @ 1820 NONAME
+ _ZN29QDeclarativeAbstractAnimation17componentCompleteEv @ 1821 NONAME
+ _ZN29QDeclarativeAbstractAnimation17setAlwaysRunToEndEb @ 1822 NONAME
+ _ZN29QDeclarativeAbstractAnimation18componentFinalizedEv @ 1823 NONAME
+ _ZN29QDeclarativeAbstractAnimation19getStaticMetaObjectEv @ 1824 NONAME
+ _ZN29QDeclarativeAbstractAnimation20notifyRunningChangedEb @ 1825 NONAME
+ _ZN29QDeclarativeAbstractAnimation21alwaysRunToEndChangedEb @ 1826 NONAME
+ _ZN29QDeclarativeAbstractAnimation21setDisableUserControlEv @ 1827 NONAME
+ _ZN29QDeclarativeAbstractAnimation4stopEv @ 1828 NONAME
+ _ZN29QDeclarativeAbstractAnimation5pauseEv @ 1829 NONAME
+ _ZN29QDeclarativeAbstractAnimation5startEv @ 1830 NONAME
+ _ZN29QDeclarativeAbstractAnimation6resumeEv @ 1831 NONAME
+ _ZN29QDeclarativeAbstractAnimation7restartEv @ 1832 NONAME
+ _ZN29QDeclarativeAbstractAnimation7startedEv @ 1833 NONAME
+ _ZN29QDeclarativeAbstractAnimation8completeEv @ 1834 NONAME
+ _ZN29QDeclarativeAbstractAnimation8setGroupEP26QDeclarativeAnimationGroup @ 1835 NONAME
+ _ZN29QDeclarativeAbstractAnimation8setLoopsEi @ 1836 NONAME
+ _ZN29QDeclarativeAbstractAnimation9completedEv @ 1837 NONAME
+ _ZN29QDeclarativeAbstractAnimation9setPausedEb @ 1838 NONAME
+ _ZN29QDeclarativeAbstractAnimation9setTargetERK20QDeclarativeProperty @ 1839 NONAME
+ _ZN29QDeclarativeAbstractAnimationC2EP7QObject @ 1840 NONAME
+ _ZN29QDeclarativeAbstractAnimationC2ER36QDeclarativeAbstractAnimationPrivateP7QObject @ 1841 NONAME
+ _ZN29QDeclarativeAbstractAnimationD0Ev @ 1842 NONAME
+ _ZN29QDeclarativeAbstractAnimationD1Ev @ 1843 NONAME
+ _ZN29QDeclarativeAbstractAnimationD2Ev @ 1844 NONAME
+ _ZNK16QDeclarativeType20attachedPropertiesIdEv @ 1845 NONAME
+ _ZNK17QDeclarativeTimer10metaObjectEv @ 1846 NONAME
+ _ZNK17QDeclarativeTimer11isRepeatingEv @ 1847 NONAME
+ _ZNK17QDeclarativeTimer16triggeredOnStartEv @ 1848 NONAME
+ _ZNK17QDeclarativeTimer8intervalEv @ 1849 NONAME
+ _ZNK17QDeclarativeTimer9isRunningEv @ 1850 NONAME
+ _ZNK23QDeclarativeDebugClient6statusEv @ 1851 NONAME
+ _ZNK23QDeclarativeEngineDebug6statusEv @ 1852 NONAME
+ _ZNK24QDeclarativeDebugService6statusEv @ 1853 NONAME
+ _ZNK26QDeclarativeBasePositioner10metaObjectEv @ 1854 NONAME
+ _ZNK26QDeclarativeBasePositioner3addEv @ 1855 NONAME
+ _ZNK26QDeclarativeBasePositioner4moveEv @ 1856 NONAME
+ _ZNK26QDeclarativeBasePositioner7spacingEv @ 1857 NONAME
+ _ZNK29QDeclarativeAbstractAnimation10metaObjectEv @ 1858 NONAME
+ _ZNK29QDeclarativeAbstractAnimation14alwaysRunToEndEv @ 1859 NONAME
+ _ZNK29QDeclarativeAbstractAnimation5groupEv @ 1860 NONAME
+ _ZNK29QDeclarativeAbstractAnimation5loopsEv @ 1861 NONAME
+ _ZNK29QDeclarativeAbstractAnimation8isPausedEv @ 1862 NONAME
+ _ZNK29QDeclarativeAbstractAnimation9isRunningEv @ 1863 NONAME
+ _ZTI17QDeclarativeTimer @ 1864 NONAME
+ _ZTI26QDeclarativeBasePositioner @ 1865 NONAME
+ _ZTI29QDeclarativeAbstractAnimation @ 1866 NONAME
+ _ZTV17QDeclarativeTimer @ 1867 NONAME
+ _ZTV26QDeclarativeBasePositioner @ 1868 NONAME
+ _ZTV29QDeclarativeAbstractAnimation @ 1869 NONAME
+ _ZThn12_N29QDeclarativeAbstractAnimation10classBeginEv @ 1870 NONAME
+ _ZThn12_N29QDeclarativeAbstractAnimation17componentCompleteEv @ 1871 NONAME
+ _ZThn12_N29QDeclarativeAbstractAnimationD0Ev @ 1872 NONAME
+ _ZThn12_N29QDeclarativeAbstractAnimationD1Ev @ 1873 NONAME
+ _ZThn16_N26QDeclarativeBasePositioner17componentCompleteEv @ 1874 NONAME
+ _ZThn16_N26QDeclarativeBasePositionerD0Ev @ 1875 NONAME
+ _ZThn16_N26QDeclarativeBasePositionerD1Ev @ 1876 NONAME
+ _ZThn8_N17QDeclarativeTimer10classBeginEv @ 1877 NONAME
+ _ZThn8_N17QDeclarativeTimer17componentCompleteEv @ 1878 NONAME
+ _ZThn8_N26QDeclarativeBasePositioner10itemChangeEN13QGraphicsItem18GraphicsItemChangeERK8QVariant @ 1879 NONAME
+ _ZThn8_N26QDeclarativeBasePositionerD0Ev @ 1880 NONAME
+ _ZThn8_N26QDeclarativeBasePositionerD1Ev @ 1881 NONAME
+ _ZThn8_N29QDeclarativeAbstractAnimation9setTargetERK20QDeclarativeProperty @ 1882 NONAME
+ _ZThn8_N29QDeclarativeAbstractAnimationD0Ev @ 1883 NONAME
+ _ZThn8_N29QDeclarativeAbstractAnimationD1Ev @ 1884 NONAME
diff --git a/src/s60main/newallocator_hook.cpp b/src/s60main/newallocator_hook.cpp
new file mode 100644
index 0000000..9cc6afb
--- /dev/null
+++ b/src/s60main/newallocator_hook.cpp
@@ -0,0 +1,58 @@
+/****************************************************************************
+**
+** Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the Symbian application wrapper of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#include <e32std.h>
+#include <qglobal.h>
+
+struct SStdEpocThreadCreateInfo;
+
+Q_CORE_EXPORT TInt qt_symbian_SetupThreadHeap(TBool aNotFirst, SStdEpocThreadCreateInfo& aInfo);
+
+
+/* \internal
+ *
+ * Uses link-time symbol preemption to capture a call from the application
+ * startup. On return, there is some kind of heap allocator installed on the
+ * thread.
+*/
+TInt UserHeap::SetupThreadHeap(TBool aNotFirst, SStdEpocThreadCreateInfo& aInfo)
+{
+ return qt_symbian_SetupThreadHeap(aNotFirst, aInfo);
+}
diff --git a/src/s60main/s60main.pro b/src/s60main/s60main.pro
index a273897..664f155 100644
--- a/src/s60main/s60main.pro
+++ b/src/s60main/s60main.pro
@@ -14,7 +14,8 @@ symbian {
CONFIG -= jpeg
INCLUDEPATH += tmp $$QMAKE_INCDIR_QT/QtCore $$MW_LAYER_SYSTEMINCLUDE
SOURCES = qts60main.cpp \
- qts60main_mcrt0.cpp
+ qts60main_mcrt0.cpp \
+ newallocator_hook.cpp
# s60main needs to be built in ARM mode for GCCE to work.
CONFIG += do_not_build_as_thumb
diff --git a/src/script/api/qscriptengine.cpp b/src/script/api/qscriptengine.cpp
index 07aced4..128e9c3 100644
--- a/src/script/api/qscriptengine.cpp
+++ b/src/script/api/qscriptengine.cpp
@@ -46,6 +46,7 @@
#include "Error.h"
#include "Interpreter.h"
+#include "ExceptionHelpers.h"
#include "PrototypeFunction.h"
#include "InitializeThreading.h"
#include "ObjectPrototype.h"
@@ -4116,9 +4117,11 @@ bool QScriptEngine::isEvaluating() const
void QScriptEngine::abortEvaluation(const QScriptValue &result)
{
Q_D(QScriptEngine);
-
- d->timeoutChecker()->setShouldAbort(true);
+ if (!isEvaluating())
+ return;
d->abortResult = result;
+ d->timeoutChecker()->setShouldAbort(true);
+ JSC::throwError(d->currentFrame, JSC::createInterruptedExecutionException(&d->currentFrame->globalData()).toObject(d->currentFrame));
}
#ifndef QT_NO_QOBJECT