summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/javascriptcore/JavaScriptCore/wtf
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/javascriptcore/JavaScriptCore/wtf')
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/ASCIICType.h166
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/AVLTree.h959
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/AlwaysInline.h63
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/Assertions.cpp207
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/Assertions.h249
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/ByteArray.cpp38
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/ByteArray.h80
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/CONTRIBUTORS.pthreads-win32137
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/CrossThreadRefCounted.h169
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/CurrentTime.cpp232
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/CurrentTime.h47
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/DateMath.cpp917
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/DateMath.h192
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/Deque.h669
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/DisallowCType.h74
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/FastAllocBase.h403
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/FastMalloc.cpp4148
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/FastMalloc.h193
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/Forward.h43
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/GOwnPtr.cpp65
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/GOwnPtr.h98
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/GetPtr.h33
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashCountedSet.h205
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashFunctions.h186
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashIterators.h216
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashMap.h337
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashSet.h278
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashTable.cpp69
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashTable.h1158
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashTraits.h115
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/ListHashSet.h616
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/ListRefPtr.h61
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/Locker.h47
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/MainThread.cpp133
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/MainThread.h59
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/MallocZoneSupport.h65
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/MathExtras.h178
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/MessageQueue.h183
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/Noncopyable.h52
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/NotFound.h37
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/OwnArrayPtr.h75
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/OwnFastMallocPtr.h52
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/OwnPtr.h142
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/OwnPtrCommon.h61
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/OwnPtrWin.cpp76
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/PassOwnPtr.h177
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/PassRefPtr.h195
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/Platform.h787
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/PtrAndFlags.h64
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/RandomNumber.cpp102
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/RandomNumber.h42
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/RandomNumberSeed.h89
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/RefCounted.h137
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/RefCountedLeakCounter.cpp100
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/RefCountedLeakCounter.h48
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/RefPtr.h206
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/RefPtrHashMap.h350
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/RetainPtr.h203
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/SegmentedVector.h252
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/StdLibExtras.h63
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/StringExtras.h88
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/TCPackedCache.h234
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/TCPageMap.h316
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/TCSpinLock.h239
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/TCSystemAlloc.cpp469
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/TCSystemAlloc.h75
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/ThreadSpecific.h266
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/ThreadSpecificWin.cpp54
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/Threading.cpp97
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/Threading.h332
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/ThreadingNone.cpp59
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/ThreadingPthreads.cpp375
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/ThreadingWin.cpp493
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/TypeTraits.cpp120
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/TypeTraits.h339
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/UnusedParam.h29
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/VMTags.h55
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/Vector.h1014
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/VectorTraits.h102
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/dtoa.cpp2379
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/dtoa.h37
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/qt/MainThreadQt.cpp74
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/qt/ThreadingQt.cpp271
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/Collator.h67
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/CollatorDefault.cpp75
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/UTF8.cpp303
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/UTF8.h75
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/Unicode.h39
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/glib/UnicodeGLib.cpp214
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/glib/UnicodeGLib.h238
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/glib/UnicodeMacrosFromICU.h69
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/icu/CollatorICU.cpp150
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/icu/UnicodeIcu.h230
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/qt4/UnicodeQt4.h545
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/wince/FastMallocWince.h177
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/wince/MemoryManager.cpp171
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/wince/MemoryManager.h80
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/wtf/wince/mt19937ar.c170
98 files changed, 26518 insertions, 0 deletions
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ASCIICType.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ASCIICType.h
new file mode 100644
index 0000000..0c3c29f
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ASCIICType.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2007, 2008, 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_ASCIICType_h
+#define WTF_ASCIICType_h
+
+#include <wtf/Assertions.h>
+#include <wtf/Platform.h>
+
+// The behavior of many of the functions in the <ctype.h> header is dependent
+// on the current locale. But in the WebKit project, all uses of those functions
+// are in code processing something that's not locale-specific. These equivalents
+// for some of the <ctype.h> functions are named more explicitly, not dependent
+// on the C library locale, and we should also optimize them as needed.
+
+// All functions return false or leave the character unchanged if passed a character
+// that is outside the range 0-7F. So they can be used on Unicode strings or
+// characters if the intent is to do processing only if the character is ASCII.
+
+namespace WTF {
+
+ inline bool isASCII(char c) { return !(c & ~0x7F); }
+ inline bool isASCII(unsigned short c) { return !(c & ~0x7F); }
+#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED)
+ inline bool isASCII(wchar_t c) { return !(c & ~0x7F); }
+#endif
+ inline bool isASCII(int c) { return !(c & ~0x7F); }
+
+ inline bool isASCIIAlpha(char c) { return (c | 0x20) >= 'a' && (c | 0x20) <= 'z'; }
+ inline bool isASCIIAlpha(unsigned short c) { return (c | 0x20) >= 'a' && (c | 0x20) <= 'z'; }
+#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED)
+ inline bool isASCIIAlpha(wchar_t c) { return (c | 0x20) >= 'a' && (c | 0x20) <= 'z'; }
+#endif
+ inline bool isASCIIAlpha(int c) { return (c | 0x20) >= 'a' && (c | 0x20) <= 'z'; }
+
+ inline bool isASCIIAlphanumeric(char c) { return (c >= '0' && c <= '9') || ((c | 0x20) >= 'a' && (c | 0x20) <= 'z'); }
+ inline bool isASCIIAlphanumeric(unsigned short c) { return (c >= '0' && c <= '9') || ((c | 0x20) >= 'a' && (c | 0x20) <= 'z'); }
+#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED)
+ inline bool isASCIIAlphanumeric(wchar_t c) { return (c >= '0' && c <= '9') || ((c | 0x20) >= 'a' && (c | 0x20) <= 'z'); }
+#endif
+ inline bool isASCIIAlphanumeric(int c) { return (c >= '0' && c <= '9') || ((c | 0x20) >= 'a' && (c | 0x20) <= 'z'); }
+
+ inline bool isASCIIDigit(char c) { return (c >= '0') & (c <= '9'); }
+ inline bool isASCIIDigit(unsigned short c) { return (c >= '0') & (c <= '9'); }
+#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED)
+ inline bool isASCIIDigit(wchar_t c) { return (c >= '0') & (c <= '9'); }
+#endif
+ inline bool isASCIIDigit(int c) { return (c >= '0') & (c <= '9'); }
+
+ inline bool isASCIIHexDigit(char c) { return (c >= '0' && c <= '9') || ((c | 0x20) >= 'a' && (c | 0x20) <= 'f'); }
+ inline bool isASCIIHexDigit(unsigned short c) { return (c >= '0' && c <= '9') || ((c | 0x20) >= 'a' && (c | 0x20) <= 'f'); }
+#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED)
+ inline bool isASCIIHexDigit(wchar_t c) { return (c >= '0' && c <= '9') || ((c | 0x20) >= 'a' && (c | 0x20) <= 'f'); }
+#endif
+ inline bool isASCIIHexDigit(int c) { return (c >= '0' && c <= '9') || ((c | 0x20) >= 'a' && (c | 0x20) <= 'f'); }
+
+ inline bool isASCIIOctalDigit(char c) { return (c >= '0') & (c <= '7'); }
+ inline bool isASCIIOctalDigit(unsigned short c) { return (c >= '0') & (c <= '7'); }
+#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED)
+ inline bool isASCIIOctalDigit(wchar_t c) { return (c >= '0') & (c <= '7'); }
+#endif
+ inline bool isASCIIOctalDigit(int c) { return (c >= '0') & (c <= '7'); }
+
+ inline bool isASCIILower(char c) { return c >= 'a' && c <= 'z'; }
+ inline bool isASCIILower(unsigned short c) { return c >= 'a' && c <= 'z'; }
+#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED)
+ inline bool isASCIILower(wchar_t c) { return c >= 'a' && c <= 'z'; }
+#endif
+ inline bool isASCIILower(int c) { return c >= 'a' && c <= 'z'; }
+
+ inline bool isASCIIUpper(char c) { return c >= 'A' && c <= 'Z'; }
+ inline bool isASCIIUpper(unsigned short c) { return c >= 'A' && c <= 'Z'; }
+#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED)
+ inline bool isASCIIUpper(wchar_t c) { return c >= 'A' && c <= 'Z'; }
+#endif
+ inline bool isASCIIUpper(int c) { return c >= 'A' && c <= 'Z'; }
+
+ /*
+ Statistics from a run of Apple's page load test for callers of isASCIISpace:
+
+ character count
+ --------- -----
+ non-spaces 689383
+ 20 space 294720
+ 0A \n 89059
+ 09 \t 28320
+ 0D \r 0
+ 0C \f 0
+ 0B \v 0
+ */
+ inline bool isASCIISpace(char c) { return c <= ' ' && (c == ' ' || (c <= 0xD && c >= 0x9)); }
+ inline bool isASCIISpace(unsigned short c) { return c <= ' ' && (c == ' ' || (c <= 0xD && c >= 0x9)); }
+#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED)
+ inline bool isASCIISpace(wchar_t c) { return c <= ' ' && (c == ' ' || (c <= 0xD && c >= 0x9)); }
+#endif
+ inline bool isASCIISpace(int c) { return c <= ' ' && (c == ' ' || (c <= 0xD && c >= 0x9)); }
+
+ inline char toASCIILower(char c) { return c | ((c >= 'A' && c <= 'Z') << 5); }
+ inline unsigned short toASCIILower(unsigned short c) { return c | ((c >= 'A' && c <= 'Z') << 5); }
+#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED)
+ inline wchar_t toASCIILower(wchar_t c) { return c | ((c >= 'A' && c <= 'Z') << 5); }
+#endif
+ inline int toASCIILower(int c) { return c | ((c >= 'A' && c <= 'Z') << 5); }
+
+ inline char toASCIIUpper(char c) { return static_cast<char>(c & ~((c >= 'a' && c <= 'z') << 5)); }
+ inline unsigned short toASCIIUpper(unsigned short c) { return static_cast<unsigned short>(c & ~((c >= 'a' && c <= 'z') << 5)); }
+#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED)
+ inline wchar_t toASCIIUpper(wchar_t c) { return static_cast<wchar_t>(c & ~((c >= 'a' && c <= 'z') << 5)); }
+#endif
+ inline int toASCIIUpper(int c) { return static_cast<int>(c & ~((c >= 'a' && c <= 'z') << 5)); }
+
+ inline int toASCIIHexValue(char c) { ASSERT(isASCIIHexDigit(c)); return c < 'A' ? c - '0' : (c - 'A' + 10) & 0xF; }
+ inline int toASCIIHexValue(unsigned short c) { ASSERT(isASCIIHexDigit(c)); return c < 'A' ? c - '0' : (c - 'A' + 10) & 0xF; }
+#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED)
+ inline int toASCIIHexValue(wchar_t c) { ASSERT(isASCIIHexDigit(c)); return c < 'A' ? c - '0' : (c - 'A' + 10) & 0xF; }
+#endif
+ inline int toASCIIHexValue(int c) { ASSERT(isASCIIHexDigit(c)); return c < 'A' ? c - '0' : (c - 'A' + 10) & 0xF; }
+
+ inline bool isASCIIPrintable(char c) { return c >= ' ' && c <= '~'; }
+ inline bool isASCIIPrintable(unsigned short c) { return c >= ' ' && c <= '~'; }
+#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED)
+ inline bool isASCIIPrintable(wchar_t c) { return c >= ' ' && c <= '~'; }
+#endif
+ inline bool isASCIIPrintable(int c) { return c >= ' ' && c <= '~'; }
+}
+
+using WTF::isASCII;
+using WTF::isASCIIAlpha;
+using WTF::isASCIIAlphanumeric;
+using WTF::isASCIIDigit;
+using WTF::isASCIIHexDigit;
+using WTF::isASCIILower;
+using WTF::isASCIIOctalDigit;
+using WTF::isASCIIPrintable;
+using WTF::isASCIISpace;
+using WTF::isASCIIUpper;
+using WTF::toASCIIHexValue;
+using WTF::toASCIILower;
+using WTF::toASCIIUpper;
+
+#endif
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/AVLTree.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/AVLTree.h
new file mode 100644
index 0000000..d7470e7
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/AVLTree.h
@@ -0,0 +1,959 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Based on Abstract AVL Tree Template v1.5 by Walt Karas
+ * <http://geocities.com/wkaras/gen_cpp/avl_tree.html>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AVL_TREE_H_
+#define AVL_TREE_H_
+
+#include "Assertions.h"
+
+namespace WTF {
+
+// Here is the reference class for BSet.
+//
+// class BSet
+// {
+// public:
+//
+// class ANY_bitref
+// {
+// public:
+// operator bool ();
+// void operator = (bool b);
+// };
+//
+// // Does not have to initialize bits.
+// BSet();
+//
+// // Must return a valid value for index when 0 <= index < maxDepth
+// ANY_bitref operator [] (unsigned index);
+//
+// // Set all bits to 1.
+// void set();
+//
+// // Set all bits to 0.
+// void reset();
+// };
+
+template<unsigned maxDepth>
+class AVLTreeDefaultBSet {
+public:
+ bool& operator[](unsigned i) { ASSERT(i < maxDepth); return m_data[i]; }
+ void set() { for (unsigned i = 0; i < maxDepth; ++i) m_data[i] = true; }
+ void reset() { for (unsigned i = 0; i < maxDepth; ++i) m_data[i] = false; }
+
+private:
+ bool m_data[maxDepth];
+};
+
+// How to determine maxDepth:
+// d Minimum number of nodes
+// 2 2
+// 3 4
+// 4 7
+// 5 12
+// 6 20
+// 7 33
+// 8 54
+// 9 88
+// 10 143
+// 11 232
+// 12 376
+// 13 609
+// 14 986
+// 15 1,596
+// 16 2,583
+// 17 4,180
+// 18 6,764
+// 19 10,945
+// 20 17,710
+// 21 28,656
+// 22 46,367
+// 23 75,024
+// 24 121,392
+// 25 196,417
+// 26 317,810
+// 27 514,228
+// 28 832,039
+// 29 1,346,268
+// 30 2,178,308
+// 31 3,524,577
+// 32 5,702,886
+// 33 9,227,464
+// 34 14,930,351
+// 35 24,157,816
+// 36 39,088,168
+// 37 63,245,985
+// 38 102,334,154
+// 39 165,580,140
+// 40 267,914,295
+// 41 433,494,436
+// 42 701,408,732
+// 43 1,134,903,169
+// 44 1,836,311,902
+// 45 2,971,215,072
+//
+// E.g., if, in a particular instantiation, the maximum number of nodes in a tree instance is 1,000,000, the maximum depth should be 28.
+// You pick 28 because MN(28) is 832,039, which is less than or equal to 1,000,000, and MN(29) is 1,346,268, which is strictly greater than 1,000,000.
+
+template <class Abstractor, unsigned maxDepth = 32, class BSet = AVLTreeDefaultBSet<maxDepth> >
+class AVLTree {
+public:
+
+ typedef typename Abstractor::key key;
+ typedef typename Abstractor::handle handle;
+ typedef typename Abstractor::size size;
+
+ enum SearchType {
+ EQUAL = 1,
+ LESS = 2,
+ GREATER = 4,
+ LESS_EQUAL = EQUAL | LESS,
+ GREATER_EQUAL = EQUAL | GREATER
+ };
+
+
+ Abstractor& abstractor() { return abs; }
+
+ inline handle insert(handle h);
+
+ inline handle search(key k, SearchType st = EQUAL);
+ inline handle search_least();
+ inline handle search_greatest();
+
+ inline handle remove(key k);
+
+ inline handle subst(handle new_node);
+
+ void purge() { abs.root = null(); }
+
+ bool is_empty() { return abs.root == null(); }
+
+ AVLTree() { abs.root = null(); }
+
+ class Iterator {
+ public:
+
+ // Initialize depth to invalid value, to indicate iterator is
+ // invalid. (Depth is zero-base.)
+ Iterator() { depth = ~0U; }
+
+ void start_iter(AVLTree &tree, key k, SearchType st = EQUAL)
+ {
+ // Mask of high bit in an int.
+ const int MASK_HIGH_BIT = (int) ~ ((~ (unsigned) 0) >> 1);
+
+ // Save the tree that we're going to iterate through in a
+ // member variable.
+ tree_ = &tree;
+
+ int cmp, target_cmp;
+ handle h = tree_->abs.root;
+ unsigned d = 0;
+
+ depth = ~0U;
+
+ if (h == null())
+ // Tree is empty.
+ return;
+
+ if (st & LESS)
+ // Key can be greater than key of starting node.
+ target_cmp = 1;
+ else if (st & GREATER)
+ // Key can be less than key of starting node.
+ target_cmp = -1;
+ else
+ // Key must be same as key of starting node.
+ target_cmp = 0;
+
+ for (;;) {
+ cmp = cmp_k_n(k, h);
+ if (cmp == 0) {
+ if (st & EQUAL) {
+ // Equal node was sought and found as starting node.
+ depth = d;
+ break;
+ }
+ cmp = -target_cmp;
+ } else if (target_cmp != 0) {
+ if (!((cmp ^ target_cmp) & MASK_HIGH_BIT)) {
+ // cmp and target_cmp are both negative or both positive.
+ depth = d;
+ }
+ }
+ h = cmp < 0 ? get_lt(h) : get_gt(h);
+ if (h == null())
+ break;
+ branch[d] = cmp > 0;
+ path_h[d++] = h;
+ }
+ }
+
+ void start_iter_least(AVLTree &tree)
+ {
+ tree_ = &tree;
+
+ handle h = tree_->abs.root;
+
+ depth = ~0U;
+
+ branch.reset();
+
+ while (h != null()) {
+ if (depth != ~0U)
+ path_h[depth] = h;
+ depth++;
+ h = get_lt(h);
+ }
+ }
+
+ void start_iter_greatest(AVLTree &tree)
+ {
+ tree_ = &tree;
+
+ handle h = tree_->abs.root;
+
+ depth = ~0U;
+
+ branch.set();
+
+ while (h != null()) {
+ if (depth != ~0U)
+ path_h[depth] = h;
+ depth++;
+ h = get_gt(h);
+ }
+ }
+
+ handle operator*()
+ {
+ if (depth == ~0U)
+ return null();
+
+ return depth == 0 ? tree_->abs.root : path_h[depth - 1];
+ }
+
+ void operator++()
+ {
+ if (depth != ~0U) {
+ handle h = get_gt(**this);
+ if (h == null()) {
+ do {
+ if (depth == 0) {
+ depth = ~0U;
+ break;
+ }
+ depth--;
+ } while (branch[depth]);
+ } else {
+ branch[depth] = true;
+ path_h[depth++] = h;
+ for (;;) {
+ h = get_lt(h);
+ if (h == null())
+ break;
+ branch[depth] = false;
+ path_h[depth++] = h;
+ }
+ }
+ }
+ }
+
+ void operator--()
+ {
+ if (depth != ~0U) {
+ handle h = get_lt(**this);
+ if (h == null())
+ do {
+ if (depth == 0) {
+ depth = ~0U;
+ break;
+ }
+ depth--;
+ } while (!branch[depth]);
+ else {
+ branch[depth] = false;
+ path_h[depth++] = h;
+ for (;;) {
+ h = get_gt(h);
+ if (h == null())
+ break;
+ branch[depth] = true;
+ path_h[depth++] = h;
+ }
+ }
+ }
+ }
+
+ void operator++(int) { ++(*this); }
+ void operator--(int) { --(*this); }
+
+ protected:
+
+ // Tree being iterated over.
+ AVLTree *tree_;
+
+ // Records a path into the tree. If branch[n] is true, indicates
+ // take greater branch from the nth node in the path, otherwise
+ // take the less branch. branch[0] gives branch from root, and
+ // so on.
+ BSet branch;
+
+ // Zero-based depth of path into tree.
+ unsigned depth;
+
+ // Handles of nodes in path from root to current node (returned by *).
+ handle path_h[maxDepth - 1];
+
+ int cmp_k_n(key k, handle h) { return tree_->abs.compare_key_node(k, h); }
+ int cmp_n_n(handle h1, handle h2) { return tree_->abs.compare_node_node(h1, h2); }
+ handle get_lt(handle h) { return tree_->abs.get_less(h); }
+ handle get_gt(handle h) { return tree_->abs.get_greater(h); }
+ handle null() { return tree_->abs.null(); }
+ };
+
+ template<typename fwd_iter>
+ bool build(fwd_iter p, size num_nodes)
+ {
+ if (num_nodes == 0) {
+ abs.root = null();
+ return true;
+ }
+
+ // Gives path to subtree being built. If branch[N] is false, branch
+ // less from the node at depth N, if true branch greater.
+ BSet branch;
+
+ // If rem[N] is true, then for the current subtree at depth N, it's
+ // greater subtree has one more node than it's less subtree.
+ BSet rem;
+
+ // Depth of root node of current subtree.
+ unsigned depth = 0;
+
+ // Number of nodes in current subtree.
+ size num_sub = num_nodes;
+
+ // The algorithm relies on a stack of nodes whose less subtree has
+ // been built, but whose right subtree has not yet been built. The
+ // stack is implemented as linked list. The nodes are linked
+ // together by having the "greater" handle of a node set to the
+ // next node in the list. "less_parent" is the handle of the first
+ // node in the list.
+ handle less_parent = null();
+
+ // h is root of current subtree, child is one of its children.
+ handle h, child;
+
+ for (;;) {
+ while (num_sub > 2) {
+ // Subtract one for root of subtree.
+ num_sub--;
+ rem[depth] = !!(num_sub & 1);
+ branch[depth++] = false;
+ num_sub >>= 1;
+ }
+
+ if (num_sub == 2) {
+ // Build a subtree with two nodes, slanting to greater.
+ // I arbitrarily chose to always have the extra node in the
+ // greater subtree when there is an odd number of nodes to
+ // split between the two subtrees.
+
+ h = *p;
+ p++;
+ child = *p;
+ p++;
+ set_lt(child, null());
+ set_gt(child, null());
+ set_bf(child, 0);
+ set_gt(h, child);
+ set_lt(h, null());
+ set_bf(h, 1);
+ } else { // num_sub == 1
+ // Build a subtree with one node.
+
+ h = *p;
+ p++;
+ set_lt(h, null());
+ set_gt(h, null());
+ set_bf(h, 0);
+ }
+
+ while (depth) {
+ depth--;
+ if (!branch[depth])
+ // We've completed a less subtree.
+ break;
+
+ // We've completed a greater subtree, so attach it to
+ // its parent (that is less than it). We pop the parent
+ // off the stack of less parents.
+ child = h;
+ h = less_parent;
+ less_parent = get_gt(h);
+ set_gt(h, child);
+ // num_sub = 2 * (num_sub - rem[depth]) + rem[depth] + 1
+ num_sub <<= 1;
+ num_sub += 1 - rem[depth];
+ if (num_sub & (num_sub - 1))
+ // num_sub is not a power of 2
+ set_bf(h, 0);
+ else
+ // num_sub is a power of 2
+ set_bf(h, 1);
+ }
+
+ if (num_sub == num_nodes)
+ // We've completed the full tree.
+ break;
+
+ // The subtree we've completed is the less subtree of the
+ // next node in the sequence.
+
+ child = h;
+ h = *p;
+ p++;
+ set_lt(h, child);
+
+ // Put h into stack of less parents.
+ set_gt(h, less_parent);
+ less_parent = h;
+
+ // Proceed to creating greater than subtree of h.
+ branch[depth] = true;
+ num_sub += rem[depth++];
+
+ } // end for (;;)
+
+ abs.root = h;
+
+ return true;
+ }
+
+protected:
+
+ friend class Iterator;
+
+ // Create a class whose sole purpose is to take advantage of
+ // the "empty member" optimization.
+ struct abs_plus_root : public Abstractor {
+ // The handle of the root element in the AVL tree.
+ handle root;
+ };
+
+ abs_plus_root abs;
+
+
+ handle get_lt(handle h) { return abs.get_less(h); }
+ void set_lt(handle h, handle lh) { abs.set_less(h, lh); }
+
+ handle get_gt(handle h) { return abs.get_greater(h); }
+ void set_gt(handle h, handle gh) { abs.set_greater(h, gh); }
+
+ int get_bf(handle h) { return abs.get_balance_factor(h); }
+ void set_bf(handle h, int bf) { abs.set_balance_factor(h, bf); }
+
+ int cmp_k_n(key k, handle h) { return abs.compare_key_node(k, h); }
+ int cmp_n_n(handle h1, handle h2) { return abs.compare_node_node(h1, h2); }
+
+ handle null() { return abs.null(); }
+
+private:
+
+ // Balances subtree, returns handle of root node of subtree
+ // after balancing.
+ handle balance(handle bal_h)
+ {
+ handle deep_h;
+
+ // Either the "greater than" or the "less than" subtree of
+ // this node has to be 2 levels deeper (or else it wouldn't
+ // need balancing).
+
+ if (get_bf(bal_h) > 0) {
+ // "Greater than" subtree is deeper.
+
+ deep_h = get_gt(bal_h);
+
+ if (get_bf(deep_h) < 0) {
+ handle old_h = bal_h;
+ bal_h = get_lt(deep_h);
+
+ set_gt(old_h, get_lt(bal_h));
+ set_lt(deep_h, get_gt(bal_h));
+ set_lt(bal_h, old_h);
+ set_gt(bal_h, deep_h);
+
+ int bf = get_bf(bal_h);
+ if (bf != 0) {
+ if (bf > 0) {
+ set_bf(old_h, -1);
+ set_bf(deep_h, 0);
+ } else {
+ set_bf(deep_h, 1);
+ set_bf(old_h, 0);
+ }
+ set_bf(bal_h, 0);
+ } else {
+ set_bf(old_h, 0);
+ set_bf(deep_h, 0);
+ }
+ } else {
+ set_gt(bal_h, get_lt(deep_h));
+ set_lt(deep_h, bal_h);
+ if (get_bf(deep_h) == 0) {
+ set_bf(deep_h, -1);
+ set_bf(bal_h, 1);
+ } else {
+ set_bf(deep_h, 0);
+ set_bf(bal_h, 0);
+ }
+ bal_h = deep_h;
+ }
+ } else {
+ // "Less than" subtree is deeper.
+
+ deep_h = get_lt(bal_h);
+
+ if (get_bf(deep_h) > 0) {
+ handle old_h = bal_h;
+ bal_h = get_gt(deep_h);
+ set_lt(old_h, get_gt(bal_h));
+ set_gt(deep_h, get_lt(bal_h));
+ set_gt(bal_h, old_h);
+ set_lt(bal_h, deep_h);
+
+ int bf = get_bf(bal_h);
+ if (bf != 0) {
+ if (bf < 0) {
+ set_bf(old_h, 1);
+ set_bf(deep_h, 0);
+ } else {
+ set_bf(deep_h, -1);
+ set_bf(old_h, 0);
+ }
+ set_bf(bal_h, 0);
+ } else {
+ set_bf(old_h, 0);
+ set_bf(deep_h, 0);
+ }
+ } else {
+ set_lt(bal_h, get_gt(deep_h));
+ set_gt(deep_h, bal_h);
+ if (get_bf(deep_h) == 0) {
+ set_bf(deep_h, 1);
+ set_bf(bal_h, -1);
+ } else {
+ set_bf(deep_h, 0);
+ set_bf(bal_h, 0);
+ }
+ bal_h = deep_h;
+ }
+ }
+
+ return bal_h;
+ }
+
+};
+
+template <class Abstractor, unsigned maxDepth, class BSet>
+inline typename AVLTree<Abstractor, maxDepth, BSet>::handle
+AVLTree<Abstractor, maxDepth, BSet>::insert(handle h)
+{
+ set_lt(h, null());
+ set_gt(h, null());
+ set_bf(h, 0);
+
+ if (abs.root == null())
+ abs.root = h;
+ else {
+ // Last unbalanced node encountered in search for insertion point.
+ handle unbal = null();
+ // Parent of last unbalanced node.
+ handle parent_unbal = null();
+ // Balance factor of last unbalanced node.
+ int unbal_bf;
+
+ // Zero-based depth in tree.
+ unsigned depth = 0, unbal_depth = 0;
+
+ // Records a path into the tree. If branch[n] is true, indicates
+ // take greater branch from the nth node in the path, otherwise
+ // take the less branch. branch[0] gives branch from root, and
+ // so on.
+ BSet branch;
+
+ handle hh = abs.root;
+ handle parent = null();
+ int cmp;
+
+ do {
+ if (get_bf(hh) != 0) {
+ unbal = hh;
+ parent_unbal = parent;
+ unbal_depth = depth;
+ }
+ cmp = cmp_n_n(h, hh);
+ if (cmp == 0)
+ // Duplicate key.
+ return hh;
+ parent = hh;
+ hh = cmp < 0 ? get_lt(hh) : get_gt(hh);
+ branch[depth++] = cmp > 0;
+ } while (hh != null());
+
+ // Add node to insert as leaf of tree.
+ if (cmp < 0)
+ set_lt(parent, h);
+ else
+ set_gt(parent, h);
+
+ depth = unbal_depth;
+
+ if (unbal == null())
+ hh = abs.root;
+ else {
+ cmp = branch[depth++] ? 1 : -1;
+ unbal_bf = get_bf(unbal);
+ if (cmp < 0)
+ unbal_bf--;
+ else // cmp > 0
+ unbal_bf++;
+ hh = cmp < 0 ? get_lt(unbal) : get_gt(unbal);
+ if ((unbal_bf != -2) && (unbal_bf != 2)) {
+ // No rebalancing of tree is necessary.
+ set_bf(unbal, unbal_bf);
+ unbal = null();
+ }
+ }
+
+ if (hh != null())
+ while (h != hh) {
+ cmp = branch[depth++] ? 1 : -1;
+ if (cmp < 0) {
+ set_bf(hh, -1);
+ hh = get_lt(hh);
+ } else { // cmp > 0
+ set_bf(hh, 1);
+ hh = get_gt(hh);
+ }
+ }
+
+ if (unbal != null()) {
+ unbal = balance(unbal);
+ if (parent_unbal == null())
+ abs.root = unbal;
+ else {
+ depth = unbal_depth - 1;
+ cmp = branch[depth] ? 1 : -1;
+ if (cmp < 0)
+ set_lt(parent_unbal, unbal);
+ else // cmp > 0
+ set_gt(parent_unbal, unbal);
+ }
+ }
+ }
+
+ return h;
+}
+
+template <class Abstractor, unsigned maxDepth, class BSet>
+inline typename AVLTree<Abstractor, maxDepth, BSet>::handle
+AVLTree<Abstractor, maxDepth, BSet>::search(key k, typename AVLTree<Abstractor, maxDepth, BSet>::SearchType st)
+{
+ const int MASK_HIGH_BIT = (int) ~ ((~ (unsigned) 0) >> 1);
+
+ int cmp, target_cmp;
+ handle match_h = null();
+ handle h = abs.root;
+
+ if (st & LESS)
+ target_cmp = 1;
+ else if (st & GREATER)
+ target_cmp = -1;
+ else
+ target_cmp = 0;
+
+ while (h != null()) {
+ cmp = cmp_k_n(k, h);
+ if (cmp == 0) {
+ if (st & EQUAL) {
+ match_h = h;
+ break;
+ }
+ cmp = -target_cmp;
+ } else if (target_cmp != 0)
+ if (!((cmp ^ target_cmp) & MASK_HIGH_BIT))
+ // cmp and target_cmp are both positive or both negative.
+ match_h = h;
+ h = cmp < 0 ? get_lt(h) : get_gt(h);
+ }
+
+ return match_h;
+}
+
+template <class Abstractor, unsigned maxDepth, class BSet>
+inline typename AVLTree<Abstractor, maxDepth, BSet>::handle
+AVLTree<Abstractor, maxDepth, BSet>::search_least()
+{
+ handle h = abs.root, parent = null();
+
+ while (h != null()) {
+ parent = h;
+ h = get_lt(h);
+ }
+
+ return parent;
+}
+
+template <class Abstractor, unsigned maxDepth, class BSet>
+inline typename AVLTree<Abstractor, maxDepth, BSet>::handle
+AVLTree<Abstractor, maxDepth, BSet>::search_greatest()
+{
+ handle h = abs.root, parent = null();
+
+ while (h != null()) {
+ parent = h;
+ h = get_gt(h);
+ }
+
+ return parent;
+}
+
+template <class Abstractor, unsigned maxDepth, class BSet>
+inline typename AVLTree<Abstractor, maxDepth, BSet>::handle
+AVLTree<Abstractor, maxDepth, BSet>::remove(key k)
+{
+ // Zero-based depth in tree.
+ unsigned depth = 0, rm_depth;
+
+ // Records a path into the tree. If branch[n] is true, indicates
+ // take greater branch from the nth node in the path, otherwise
+ // take the less branch. branch[0] gives branch from root, and
+ // so on.
+ BSet branch;
+
+ handle h = abs.root;
+ handle parent = null(), child;
+ int cmp, cmp_shortened_sub_with_path = 0;
+
+ for (;;) {
+ if (h == null())
+ // No node in tree with given key.
+ return null();
+ cmp = cmp_k_n(k, h);
+ if (cmp == 0)
+ // Found node to remove.
+ break;
+ parent = h;
+ h = cmp < 0 ? get_lt(h) : get_gt(h);
+ branch[depth++] = cmp > 0;
+ cmp_shortened_sub_with_path = cmp;
+ }
+ handle rm = h;
+ handle parent_rm = parent;
+ rm_depth = depth;
+
+ // If the node to remove is not a leaf node, we need to get a
+ // leaf node, or a node with a single leaf as its child, to put
+ // in the place of the node to remove. We will get the greatest
+ // node in the less subtree (of the node to remove), or the least
+ // node in the greater subtree. We take the leaf node from the
+ // deeper subtree, if there is one.
+
+ if (get_bf(h) < 0) {
+ child = get_lt(h);
+ branch[depth] = false;
+ cmp = -1;
+ } else {
+ child = get_gt(h);
+ branch[depth] = true;
+ cmp = 1;
+ }
+ depth++;
+
+ if (child != null()) {
+ cmp = -cmp;
+ do {
+ parent = h;
+ h = child;
+ if (cmp < 0) {
+ child = get_lt(h);
+ branch[depth] = false;
+ } else {
+ child = get_gt(h);
+ branch[depth] = true;
+ }
+ depth++;
+ } while (child != null());
+
+ if (parent == rm)
+ // Only went through do loop once. Deleted node will be replaced
+ // in the tree structure by one of its immediate children.
+ cmp_shortened_sub_with_path = -cmp;
+ else
+ cmp_shortened_sub_with_path = cmp;
+
+ // Get the handle of the opposite child, which may not be null.
+ child = cmp > 0 ? get_lt(h) : get_gt(h);
+ }
+
+ if (parent == null())
+ // There were only 1 or 2 nodes in this tree.
+ abs.root = child;
+ else if (cmp_shortened_sub_with_path < 0)
+ set_lt(parent, child);
+ else
+ set_gt(parent, child);
+
+ // "path" is the parent of the subtree being eliminated or reduced
+ // from a depth of 2 to 1. If "path" is the node to be removed, we
+ // set path to the node we're about to poke into the position of the
+ // node to be removed.
+ handle path = parent == rm ? h : parent;
+
+ if (h != rm) {
+ // Poke in the replacement for the node to be removed.
+ set_lt(h, get_lt(rm));
+ set_gt(h, get_gt(rm));
+ set_bf(h, get_bf(rm));
+ if (parent_rm == null())
+ abs.root = h;
+ else {
+ depth = rm_depth - 1;
+ if (branch[depth])
+ set_gt(parent_rm, h);
+ else
+ set_lt(parent_rm, h);
+ }
+ }
+
+ if (path != null()) {
+ // Create a temporary linked list from the parent of the path node
+ // to the root node.
+ h = abs.root;
+ parent = null();
+ depth = 0;
+ while (h != path) {
+ if (branch[depth++]) {
+ child = get_gt(h);
+ set_gt(h, parent);
+ } else {
+ child = get_lt(h);
+ set_lt(h, parent);
+ }
+ parent = h;
+ h = child;
+ }
+
+ // Climb from the path node to the root node using the linked
+ // list, restoring the tree structure and rebalancing as necessary.
+ bool reduced_depth = true;
+ int bf;
+ cmp = cmp_shortened_sub_with_path;
+ for (;;) {
+ if (reduced_depth) {
+ bf = get_bf(h);
+ if (cmp < 0)
+ bf++;
+ else // cmp > 0
+ bf--;
+ if ((bf == -2) || (bf == 2)) {
+ h = balance(h);
+ bf = get_bf(h);
+ } else
+ set_bf(h, bf);
+ reduced_depth = (bf == 0);
+ }
+ if (parent == null())
+ break;
+ child = h;
+ h = parent;
+ cmp = branch[--depth] ? 1 : -1;
+ if (cmp < 0) {
+ parent = get_lt(h);
+ set_lt(h, child);
+ } else {
+ parent = get_gt(h);
+ set_gt(h, child);
+ }
+ }
+ abs.root = h;
+ }
+
+ return rm;
+}
+
+template <class Abstractor, unsigned maxDepth, class BSet>
+inline typename AVLTree<Abstractor, maxDepth, BSet>::handle
+AVLTree<Abstractor, maxDepth, BSet>::subst(handle new_node)
+{
+ handle h = abs.root;
+ handle parent = null();
+ int cmp, last_cmp;
+
+ /* Search for node already in tree with same key. */
+ for (;;) {
+ if (h == null())
+ /* No node in tree with same key as new node. */
+ return null();
+ cmp = cmp_n_n(new_node, h);
+ if (cmp == 0)
+ /* Found the node to substitute new one for. */
+ break;
+ last_cmp = cmp;
+ parent = h;
+ h = cmp < 0 ? get_lt(h) : get_gt(h);
+ }
+
+ /* Copy tree housekeeping fields from node in tree to new node. */
+ set_lt(new_node, get_lt(h));
+ set_gt(new_node, get_gt(h));
+ set_bf(new_node, get_bf(h));
+
+ if (parent == null())
+ /* New node is also new root. */
+ abs.root = new_node;
+ else {
+ /* Make parent point to new node. */
+ if (last_cmp < 0)
+ set_lt(parent, new_node);
+ else
+ set_gt(parent, new_node);
+ }
+
+ return h;
+}
+
+}
+
+#endif
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/AlwaysInline.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/AlwaysInline.h
new file mode 100644
index 0000000..64fdd99
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/AlwaysInline.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2005, 2007, 2008 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include "Platform.h"
+
+#ifndef ALWAYS_INLINE
+#if COMPILER(GCC) && defined(NDEBUG) && !COMPILER(MINGW)
+#define ALWAYS_INLINE inline __attribute__((__always_inline__))
+#elif COMPILER(MSVC) && defined(NDEBUG)
+#define ALWAYS_INLINE __forceinline
+#else
+#define ALWAYS_INLINE inline
+#endif
+#endif
+
+#ifndef NEVER_INLINE
+#if COMPILER(GCC)
+#define NEVER_INLINE __attribute__((__noinline__))
+#else
+#define NEVER_INLINE
+#endif
+#endif
+
+#ifndef UNLIKELY
+#if COMPILER(GCC)
+#define UNLIKELY(x) __builtin_expect((x), 0)
+#else
+#define UNLIKELY(x) (x)
+#endif
+#endif
+
+#ifndef LIKELY
+#if COMPILER(GCC)
+#define LIKELY(x) __builtin_expect((x), 1)
+#else
+#define LIKELY(x) (x)
+#endif
+#endif
+
+#ifndef NO_RETURN
+#if COMPILER(GCC)
+#define NO_RETURN __attribute((__noreturn__))
+#else
+#define NO_RETURN
+#endif
+#endif
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Assertions.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Assertions.cpp
new file mode 100644
index 0000000..54daf23
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Assertions.cpp
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2003, 2006, 2007 Apple Inc. All rights reserved.
+ * Copyright (C) 2007-2009 Torch Mobile, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "Assertions.h"
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+
+#if PLATFORM(MAC)
+#include <CoreFoundation/CFString.h>
+#endif
+
+#if COMPILER(MSVC) && !PLATFORM(WINCE)
+#ifndef WINVER
+#define WINVER 0x0500
+#endif
+#ifndef _WIN32_WINNT
+#define _WIN32_WINNT 0x0500
+#endif
+#include <windows.h>
+#include <crtdbg.h>
+#endif
+
+#if PLATFORM(WINCE)
+#include <winbase.h>
+#endif
+
+extern "C" {
+
+WTF_ATTRIBUTE_PRINTF(1, 0)
+static void vprintf_stderr_common(const char* format, va_list args)
+{
+#if PLATFORM(MAC)
+ if (strstr(format, "%@")) {
+ CFStringRef cfFormat = CFStringCreateWithCString(NULL, format, kCFStringEncodingUTF8);
+ CFStringRef str = CFStringCreateWithFormatAndArguments(NULL, NULL, cfFormat, args);
+
+ int length = CFStringGetMaximumSizeForEncoding(CFStringGetLength(str), kCFStringEncodingUTF8);
+ char* buffer = (char*)malloc(length + 1);
+
+ CFStringGetCString(str, buffer, length, kCFStringEncodingUTF8);
+
+ fputs(buffer, stderr);
+
+ free(buffer);
+ CFRelease(str);
+ CFRelease(cfFormat);
+ } else
+#elif COMPILER(MSVC) && !defined(WINCEBASIC)
+# if !defined(_WIN32_WCE) || (_WIN32_WCE >= 0x600)
+ if (IsDebuggerPresent())
+# endif
+ {
+ size_t size = 1024;
+
+ do {
+ char* buffer = (char*)malloc(size);
+
+ if (buffer == NULL)
+ break;
+
+ if (_vsnprintf(buffer, size, format, args) != -1) {
+#if PLATFORM(WINCE)
+ // WinCE only supports wide chars
+ wchar_t* wideBuffer = (wchar_t*)malloc(size * sizeof(wchar_t));
+ if (wideBuffer == NULL)
+ break;
+ for (unsigned int i = 0; i < size; ++i) {
+ if (!(wideBuffer[i] = buffer[i]))
+ break;
+ }
+ OutputDebugStringW(wideBuffer);
+ free(wideBuffer);
+#else
+ OutputDebugStringA(buffer);
+#endif
+ free(buffer);
+ break;
+ }
+
+ free(buffer);
+ size *= 2;
+ } while (size > 1024);
+ }
+#endif
+ vfprintf(stderr, format, args);
+}
+
+WTF_ATTRIBUTE_PRINTF(1, 2)
+static void printf_stderr_common(const char* format, ...)
+{
+ va_list args;
+ va_start(args, format);
+ vprintf_stderr_common(format, args);
+ va_end(args);
+}
+
+static void printCallSite(const char* file, int line, const char* function)
+{
+#if PLATFORM(WIN) && !PLATFORM(WINCE) && defined _DEBUG
+ _CrtDbgReport(_CRT_WARN, file, line, NULL, "%s\n", function);
+#else
+ printf_stderr_common("(%s:%d %s)\n", file, line, function);
+#endif
+}
+
+void WTFReportAssertionFailure(const char* file, int line, const char* function, const char* assertion)
+{
+ if (assertion)
+ printf_stderr_common("ASSERTION FAILED: %s\n", assertion);
+ else
+ printf_stderr_common("SHOULD NEVER BE REACHED\n");
+ printCallSite(file, line, function);
+}
+
+void WTFReportAssertionFailureWithMessage(const char* file, int line, const char* function, const char* assertion, const char* format, ...)
+{
+ printf_stderr_common("ASSERTION FAILED: ");
+ va_list args;
+ va_start(args, format);
+ vprintf_stderr_common(format, args);
+ va_end(args);
+ printf_stderr_common("\n%s\n", assertion);
+ printCallSite(file, line, function);
+}
+
+void WTFReportArgumentAssertionFailure(const char* file, int line, const char* function, const char* argName, const char* assertion)
+{
+ printf_stderr_common("ARGUMENT BAD: %s, %s\n", argName, assertion);
+ printCallSite(file, line, function);
+}
+
+void WTFReportFatalError(const char* file, int line, const char* function, const char* format, ...)
+{
+ printf_stderr_common("FATAL ERROR: ");
+ va_list args;
+ va_start(args, format);
+ vprintf_stderr_common(format, args);
+ va_end(args);
+ printf_stderr_common("\n");
+ printCallSite(file, line, function);
+}
+
+void WTFReportError(const char* file, int line, const char* function, const char* format, ...)
+{
+ printf_stderr_common("ERROR: ");
+ va_list args;
+ va_start(args, format);
+ vprintf_stderr_common(format, args);
+ va_end(args);
+ printf_stderr_common("\n");
+ printCallSite(file, line, function);
+}
+
+void WTFLog(WTFLogChannel* channel, const char* format, ...)
+{
+ if (channel->state != WTFLogChannelOn)
+ return;
+
+ va_list args;
+ va_start(args, format);
+ vprintf_stderr_common(format, args);
+ va_end(args);
+ if (format[strlen(format) - 1] != '\n')
+ printf_stderr_common("\n");
+}
+
+void WTFLogVerbose(const char* file, int line, const char* function, WTFLogChannel* channel, const char* format, ...)
+{
+ if (channel->state != WTFLogChannelOn)
+ return;
+
+ va_list args;
+ va_start(args, format);
+ vprintf_stderr_common(format, args);
+ va_end(args);
+ if (format[strlen(format) - 1] != '\n')
+ printf_stderr_common("\n");
+ printCallSite(file, line, function);
+}
+
+} // extern "C"
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Assertions.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Assertions.h
new file mode 100644
index 0000000..d691357
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Assertions.h
@@ -0,0 +1,249 @@
+/*
+ * Copyright (C) 2003, 2006, 2007 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_Assertions_h
+#define WTF_Assertions_h
+
+/*
+ no namespaces because this file has to be includable from C and Objective-C
+
+ Note, this file uses many GCC extensions, but it should be compatible with
+ C, Objective C, C++, and Objective C++.
+
+ For non-debug builds, everything is disabled by default.
+ Defining any of the symbols explicitly prevents this from having any effect.
+
+ MSVC7 note: variadic macro support was added in MSVC8, so for now we disable
+ those macros in MSVC7. For more info, see the MSDN document on variadic
+ macros here:
+
+ http://msdn2.microsoft.com/en-us/library/ms177415(VS.80).aspx
+*/
+
+#include "Platform.h"
+
+#if COMPILER(MSVC)
+#include <stddef.h>
+#else
+#include <inttypes.h>
+#endif
+
+#ifdef NDEBUG
+#define ASSERTIONS_DISABLED_DEFAULT 1
+#else
+#define ASSERTIONS_DISABLED_DEFAULT 0
+#endif
+
+#ifndef ASSERT_DISABLED
+#define ASSERT_DISABLED ASSERTIONS_DISABLED_DEFAULT
+#endif
+
+#ifndef ASSERT_ARG_DISABLED
+#define ASSERT_ARG_DISABLED ASSERTIONS_DISABLED_DEFAULT
+#endif
+
+#ifndef FATAL_DISABLED
+#define FATAL_DISABLED ASSERTIONS_DISABLED_DEFAULT
+#endif
+
+#ifndef ERROR_DISABLED
+#define ERROR_DISABLED ASSERTIONS_DISABLED_DEFAULT
+#endif
+
+#ifndef LOG_DISABLED
+#define LOG_DISABLED ASSERTIONS_DISABLED_DEFAULT
+#endif
+
+#if COMPILER(GCC)
+#define WTF_PRETTY_FUNCTION __PRETTY_FUNCTION__
+#else
+#define WTF_PRETTY_FUNCTION __FUNCTION__
+#endif
+
+/* WTF logging functions can process %@ in the format string to log a NSObject* but the printf format attribute
+ emits a warning when %@ is used in the format string. Until <rdar://problem/5195437> is resolved we can't include
+ the attribute when being used from Objective-C code in case it decides to use %@. */
+#if COMPILER(GCC) && !defined(__OBJC__)
+#define WTF_ATTRIBUTE_PRINTF(formatStringArgument, extraArguments) __attribute__((__format__(printf, formatStringArgument, extraArguments)))
+#else
+#define WTF_ATTRIBUTE_PRINTF(formatStringArgument, extraArguments)
+#endif
+
+/* These helper functions are always declared, but not necessarily always defined if the corresponding function is disabled. */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum { WTFLogChannelOff, WTFLogChannelOn } WTFLogChannelState;
+
+typedef struct {
+ unsigned mask;
+ const char *defaultName;
+ WTFLogChannelState state;
+} WTFLogChannel;
+
+void WTFReportAssertionFailure(const char* file, int line, const char* function, const char* assertion);
+void WTFReportAssertionFailureWithMessage(const char* file, int line, const char* function, const char* assertion, const char* format, ...) WTF_ATTRIBUTE_PRINTF(5, 6);
+void WTFReportArgumentAssertionFailure(const char* file, int line, const char* function, const char* argName, const char* assertion);
+void WTFReportFatalError(const char* file, int line, const char* function, const char* format, ...) WTF_ATTRIBUTE_PRINTF(4, 5);
+void WTFReportError(const char* file, int line, const char* function, const char* format, ...) WTF_ATTRIBUTE_PRINTF(4, 5);
+void WTFLog(WTFLogChannel* channel, const char* format, ...) WTF_ATTRIBUTE_PRINTF(2, 3);
+void WTFLogVerbose(const char* file, int line, const char* function, WTFLogChannel* channel, const char* format, ...) WTF_ATTRIBUTE_PRINTF(5, 6);
+
+#ifdef __cplusplus
+}
+#endif
+
+/* CRASH -- gets us into the debugger or the crash reporter -- signals are ignored by the crash reporter so we must do better */
+
+#ifndef CRASH
+#define CRASH() do { \
+ *(int *)(uintptr_t)0xbbadbeef = 0; \
+ ((void(*)())0)(); /* More reliable, but doesn't say BBADBEEF */ \
+} while(false)
+#endif
+
+/* ASSERT, ASSERT_WITH_MESSAGE, ASSERT_NOT_REACHED */
+
+#if PLATFORM(WINCE) && !PLATFORM(TORCHMOBILE)
+/* FIXME: We include this here only to avoid a conflict with the ASSERT macro. */
+#include <windows.h>
+#undef min
+#undef max
+#undef ERROR
+#endif
+
+#if PLATFORM(WIN_OS) || PLATFORM(SYMBIAN)
+/* FIXME: Change to use something other than ASSERT to avoid this conflict with the underlying platform */
+#undef ASSERT
+#endif
+
+#if ASSERT_DISABLED
+
+#define ASSERT(assertion) ((void)0)
+#if COMPILER(MSVC7) || COMPILER(WINSCW)
+#define ASSERT_WITH_MESSAGE(assertion) ((void)0)
+#else
+#define ASSERT_WITH_MESSAGE(assertion, ...) ((void)0)
+#endif /* COMPILER(MSVC7) */
+#define ASSERT_NOT_REACHED() ((void)0)
+#define ASSERT_UNUSED(variable, assertion) ((void)variable)
+
+#else
+
+#define ASSERT(assertion) do \
+ if (!(assertion)) { \
+ WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion); \
+ CRASH(); \
+ } \
+while (0)
+#if COMPILER(MSVC7) || COMPILER(WINSCW)
+#define ASSERT_WITH_MESSAGE(assertion) ((void)0)
+#else
+#define ASSERT_WITH_MESSAGE(assertion, ...) do \
+ if (!(assertion)) { \
+ WTFReportAssertionFailureWithMessage(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion, __VA_ARGS__); \
+ CRASH(); \
+ } \
+while (0)
+#endif /* COMPILER(MSVC7) */
+#define ASSERT_NOT_REACHED() do { \
+ WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, 0); \
+ CRASH(); \
+} while (0)
+
+#define ASSERT_UNUSED(variable, assertion) ASSERT(assertion)
+
+#endif
+
+/* ASSERT_ARG */
+
+#if ASSERT_ARG_DISABLED
+
+#define ASSERT_ARG(argName, assertion) ((void)0)
+
+#else
+
+#define ASSERT_ARG(argName, assertion) do \
+ if (!(assertion)) { \
+ WTFReportArgumentAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #argName, #assertion); \
+ CRASH(); \
+ } \
+while (0)
+
+#endif
+
+/* COMPILE_ASSERT */
+#ifndef COMPILE_ASSERT
+#define COMPILE_ASSERT(exp, name) typedef int dummy##name [(exp) ? 1 : -1]
+#endif
+
+/* FATAL */
+
+#if FATAL_DISABLED && !COMPILER(MSVC7) && !COMPILER(WINSCW)
+#define FATAL(...) ((void)0)
+#elif COMPILER(MSVC7)
+#define FATAL() ((void)0)
+#else
+#define FATAL(...) do { \
+ WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, __VA_ARGS__); \
+ CRASH(); \
+} while (0)
+#endif
+
+/* LOG_ERROR */
+
+#if ERROR_DISABLED && !COMPILER(MSVC7) && !COMPILER(WINSCW)
+#define LOG_ERROR(...) ((void)0)
+#elif COMPILER(MSVC7) || COMPILER(WINSCW)
+#define LOG_ERROR() ((void)0)
+#else
+#define LOG_ERROR(...) WTFReportError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, __VA_ARGS__)
+#endif
+
+/* LOG */
+
+#if LOG_DISABLED && !COMPILER(MSVC7) && !COMPILER(WINSCW)
+#define LOG(channel, ...) ((void)0)
+#elif COMPILER(MSVC7) || COMPILER(WINSCW)
+#define LOG() ((void)0)
+#else
+#define LOG(channel, ...) WTFLog(&JOIN_LOG_CHANNEL_WITH_PREFIX(LOG_CHANNEL_PREFIX, channel), __VA_ARGS__)
+#define JOIN_LOG_CHANNEL_WITH_PREFIX(prefix, channel) JOIN_LOG_CHANNEL_WITH_PREFIX_LEVEL_2(prefix, channel)
+#define JOIN_LOG_CHANNEL_WITH_PREFIX_LEVEL_2(prefix, channel) prefix ## channel
+#endif
+
+/* LOG_VERBOSE */
+
+#if LOG_DISABLED && !COMPILER(MSVC7) && !COMPILER(WINSCW)
+#define LOG_VERBOSE(channel, ...) ((void)0)
+#elif COMPILER(MSVC7) || COMPILER(WINSCW)
+#define LOG_VERBOSE(channel) ((void)0)
+#else
+#define LOG_VERBOSE(channel, ...) WTFLogVerbose(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, &JOIN_LOG_CHANNEL_WITH_PREFIX(LOG_CHANNEL_PREFIX, channel), __VA_ARGS__)
+#endif
+
+#endif /* WTF_Assertions_h */
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ByteArray.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ByteArray.cpp
new file mode 100644
index 0000000..526f147
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ByteArray.cpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ByteArray.h"
+
+namespace WTF {
+
+PassRefPtr<ByteArray> ByteArray::create(size_t size)
+{
+ unsigned char* buffer = new unsigned char[size + sizeof(ByteArray) - sizeof(size_t)];
+ ASSERT((reinterpret_cast<size_t>(buffer) & 3) == 0);
+ return adoptRef(new (buffer) ByteArray(size));
+}
+
+}
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ByteArray.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ByteArray.h
new file mode 100644
index 0000000..96e9cc2
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ByteArray.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ByteArray_h
+#define ByteArray_h
+
+#include <wtf/PassRefPtr.h>
+#include <wtf/RefCounted.h>
+
+namespace WTF {
+ class ByteArray : public RefCountedBase {
+ public:
+ unsigned length() const { return m_size; }
+
+ void set(unsigned index, double value)
+ {
+ if (index >= m_size)
+ return;
+ if (!(value > 0)) // Clamp NaN to 0
+ value = 0;
+ else if (value > 255)
+ value = 255;
+ m_data[index] = static_cast<unsigned char>(value + 0.5);
+ }
+
+ bool get(unsigned index, unsigned char& result) const
+ {
+ if (index >= m_size)
+ return false;
+ result = m_data[index];
+ return true;
+ }
+
+ unsigned char* data() { return m_data; }
+
+ void deref()
+ {
+ if (derefBase()) {
+ // We allocated with new unsigned char[] in create(),
+ // and then used placement new to construct the object.
+ this->~ByteArray();
+ delete[] reinterpret_cast<unsigned char*>(this);
+ }
+ }
+
+ static PassRefPtr<ByteArray> create(size_t size);
+
+ private:
+ ByteArray(size_t size)
+ : m_size(size)
+ {
+ }
+ size_t m_size;
+ unsigned char m_data[sizeof(size_t)];
+ };
+}
+
+#endif
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/CONTRIBUTORS.pthreads-win32 b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/CONTRIBUTORS.pthreads-win32
new file mode 100644
index 0000000..7de0f26
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/CONTRIBUTORS.pthreads-win32
@@ -0,0 +1,137 @@
+This is a copy of CONTRIBUTORS file for the Pthreads-win32 library, downloaded
+from http://sourceware.org/cgi-bin/cvsweb.cgi/~checkout~/pthreads/CONTRIBUTORS?rev=1.32&cvsroot=pthreads-win32
+
+Included here to compliment the Pthreads-win32 license header in wtf/ThreadingWin.cpp file.
+WebKit is using derived sources of ThreadCondition code from Pthreads-win32.
+
+-------------------------------------------------------------------------------
+
+Contributors (in approximate order of appearance)
+
+[See also the ChangeLog file where individuals are
+attributed in log entries. Likewise in the FAQ file.]
+
+Ben Elliston bje at cygnus dot com
+ Initiated the project;
+ setup the project infrastructure (CVS, web page, etc.);
+ early prototype routines.
+Ross Johnson rpj at callisto dot canberra dot edu dot au
+ early prototype routines;
+ ongoing project coordination/maintenance;
+ implementation of spin locks and barriers;
+ various enhancements;
+ bug fixes;
+ documentation;
+ testsuite.
+Robert Colquhoun rjc at trump dot net dot au
+ Early bug fixes.
+John E. Bossom John dot Bossom at cognos dot com
+ Contributed substantial original working implementation;
+ bug fixes;
+ ongoing guidance and standards interpretation.
+Anders Norlander anorland at hem2 dot passagen dot se
+ Early enhancements and runtime checking for supported
+ Win32 routines.
+Tor Lillqvist tml at iki dot fi
+ General enhancements;
+ early bug fixes to condition variables.
+Scott Lightner scott at curriculum dot com
+ Bug fix.
+Kevin Ruland Kevin dot Ruland at anheuser-busch dot com
+ Various bug fixes.
+Mike Russo miker at eai dot com
+ Bug fix.
+Mark E. Armstrong avail at pacbell dot net
+ Bug fixes.
+Lorin Hochstein lmh at xiphos dot ca
+ general bug fixes; bug fixes to condition variables.
+Peter Slacik Peter dot Slacik at tatramed dot sk
+ Bug fixes.
+Mumit Khan khan at xraylith dot wisc dot edu
+ Fixes to work with Mingw32.
+Milan Gardian mg at tatramed dot sk
+ Bug fixes and reports/analyses of obscure problems.
+Aurelio Medina aureliom at crt dot com
+ First implementation of read-write locks.
+Graham Dumpleton Graham dot Dumpleton at ra dot pad dot otc dot telstra dot com dot au
+ Bug fix in condition variables.
+Tristan Savatier tristan at mpegtv dot com
+ WinCE port.
+Erik Hensema erik at hensema dot xs4all dot nl
+ Bug fixes.
+Rich Peters rpeters at micro-magic dot com
+Todd Owen towen at lucidcalm dot dropbear dot id dot au
+ Bug fixes to dll loading.
+Jason Nye jnye at nbnet dot nb dot ca
+ Implementation of async cancelation.
+Fred Forester fforest at eticomm dot net
+Kevin D. Clark kclark at cabletron dot com
+David Baggett dmb at itasoftware dot com
+ Bug fixes.
+Paul Redondo paul at matchvision dot com
+Scott McCaskill scott at 3dfx dot com
+ Bug fixes.
+Jef Gearhart jgearhart at tpssys dot com
+ Bug fix.
+Arthur Kantor akantor at bexusa dot com
+ Mutex enhancements.
+Steven Reddie smr at essemer dot com dot au
+ Bug fix.
+Alexander Terekhov TEREKHOV at de dot ibm dot com
+ Re-implemented and improved read-write locks;
+ (with Louis Thomas) re-implemented and improved
+ condition variables;
+ enhancements to semaphores;
+ enhancements to mutexes;
+ new mutex implementation in 'futex' style;
+ suggested a robust implementation of pthread_once
+ similar to that implemented by V.Kliathcko;
+ system clock change handling re CV timeouts;
+ bug fixes.
+Thomas Pfaff tpfaff at gmx dot net
+ Changes to make C version usable with C++ applications;
+ re-implemented mutex routines to avoid Win32 mutexes
+ and TryEnterCriticalSection;
+ procedure to fix Mingw32 thread-safety issues.
+Franco Bez franco dot bez at gmx dot de
+ procedure to fix Mingw32 thread-safety issues.
+Louis Thomas lthomas at arbitrade dot com
+ (with Alexander Terekhov) re-implemented and improved
+ condition variables.
+David Korn dgk at research dot att dot com
+ Ported to UWIN.
+Phil Frisbie, Jr. phil at hawksoft dot com
+ Bug fix.
+Ralf Brese Ralf dot Brese at pdb4 dot siemens dot de
+ Bug fix.
+prionx at juno dot com prionx at juno dot com
+ Bug fixes.
+Max Woodbury mtew at cds dot duke dot edu
+ POSIX versioning conditionals;
+ reduced namespace pollution;
+ idea to separate routines to reduce statically
+ linked image sizes.
+Rob Fanner rfanner at stonethree dot com
+ Bug fix.
+Michael Johnson michaelj at maine dot rr dot com
+ Bug fix.
+Nicolas Barry boozai at yahoo dot com
+ Bug fixes.
+Piet van Bruggen pietvb at newbridges dot nl
+ Bug fix.
+Makoto Kato raven at oldskool dot jp
+ AMD64 port.
+Panagiotis E. Hadjidoukas peh at hpclab dot ceid dot upatras dot gr
+ Contributed the QueueUserAPCEx package which
+ makes preemptive async cancelation possible.
+Will Bryant will dot bryant at ecosm dot com
+ Borland compiler patch and makefile.
+Anuj Goyal anuj dot goyal at gmail dot com
+ Port to Digital Mars compiler.
+Gottlob Frege gottlobfrege at gmail dot com
+ re-implemented pthread_once (version 2)
+ (pthread_once cancellation added by rpj).
+Vladimir Kliatchko vladimir at kliatchko dot com
+ reimplemented pthread_once with the same form
+ as described by A.Terekhov (later version 2);
+ implementation of MCS (Mellor-Crummey/Scott) locks. \ No newline at end of file
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/CrossThreadRefCounted.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/CrossThreadRefCounted.h
new file mode 100644
index 0000000..6a05211
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/CrossThreadRefCounted.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2009 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CrossThreadRefCounted_h
+#define CrossThreadRefCounted_h
+
+#include <wtf/Noncopyable.h>
+#include <wtf/PassRefPtr.h>
+#include <wtf/RefCounted.h>
+#include <wtf/Threading.h>
+
+namespace WTF {
+
+ // Used to allowing sharing data across classes and threads (like ThreadedSafeShared).
+ //
+ // Why not just use ThreadSafeShared?
+ // ThreadSafeShared can have a significant perf impact when used in low level classes
+ // (like UString) that get ref/deref'ed a lot. This class has the benefit of doing fast ref
+ // counts like RefPtr whenever possible, but it has the downside that you need to copy it
+ // to use it on another thread.
+ //
+ // Is this class threadsafe?
+ // While each instance of the class is not threadsafe, the copied instance is threadsafe
+ // with respect to the original and any other copies. The underlying m_data is jointly
+ // owned by the original instance and all copies.
+ template<class T>
+ class CrossThreadRefCounted : public Noncopyable {
+ public:
+ static PassRefPtr<CrossThreadRefCounted<T> > create(T* data)
+ {
+ return adoptRef(new CrossThreadRefCounted<T>(data, 0));
+ }
+
+ // Used to make an instance that can be used on another thread.
+ PassRefPtr<CrossThreadRefCounted<T> > crossThreadCopy();
+
+ void ref();
+ void deref();
+ T* release();
+
+ bool isShared() const
+ {
+ return !m_refCounter.hasOneRef() || (m_threadSafeRefCounter && !m_threadSafeRefCounter->hasOneRef());
+ }
+
+#ifndef NDEBUG
+ bool mayBePassedToAnotherThread() const { ASSERT(!m_threadId); return m_refCounter.hasOneRef(); }
+#endif
+
+ private:
+ CrossThreadRefCounted(T* data, ThreadSafeSharedBase* threadedCounter)
+ : m_threadSafeRefCounter(threadedCounter)
+ , m_data(data)
+#ifndef NDEBUG
+ , m_threadId(0)
+#endif
+ {
+ }
+
+ ~CrossThreadRefCounted()
+ {
+ if (!m_threadSafeRefCounter)
+ delete m_data;
+ }
+
+ void threadSafeDeref();
+
+ RefCountedBase m_refCounter;
+ ThreadSafeSharedBase* m_threadSafeRefCounter;
+ T* m_data;
+#ifndef NDEBUG
+ ThreadIdentifier m_threadId;
+#endif
+ };
+
+ template<class T>
+ void CrossThreadRefCounted<T>::ref()
+ {
+ ASSERT(!m_threadId || m_threadId == currentThread());
+ m_refCounter.ref();
+#ifndef NDEBUG
+ // Store the threadId as soon as the ref count gets to 2.
+ // The class gets created with a ref count of 1 and then passed
+ // to another thread where to ref count get increased. This
+ // is a heuristic but it seems to always work and has helped
+ // find some bugs.
+ if (!m_threadId && m_refCounter.refCount() == 2)
+ m_threadId = currentThread();
+#endif
+ }
+
+ template<class T>
+ void CrossThreadRefCounted<T>::deref()
+ {
+ ASSERT(!m_threadId || m_threadId == currentThread());
+ if (m_refCounter.derefBase()) {
+ threadSafeDeref();
+ delete this;
+ } else {
+#ifndef NDEBUG
+ // Clear the threadId when the ref goes to 1 because it
+ // is safe to be passed to another thread at this point.
+ if (m_threadId && m_refCounter.refCount() == 1)
+ m_threadId = 0;
+#endif
+ }
+ }
+
+ template<class T>
+ T* CrossThreadRefCounted<T>::release()
+ {
+ ASSERT(!isShared());
+
+ T* data = m_data;
+ m_data = 0;
+ return data;
+ }
+
+ template<class T>
+ PassRefPtr<CrossThreadRefCounted<T> > CrossThreadRefCounted<T>::crossThreadCopy()
+ {
+ if (m_threadSafeRefCounter)
+ m_threadSafeRefCounter->ref();
+ else
+ m_threadSafeRefCounter = new ThreadSafeSharedBase(2);
+ return adoptRef(new CrossThreadRefCounted<T>(m_data, m_threadSafeRefCounter));
+ }
+
+
+ template<class T>
+ void CrossThreadRefCounted<T>::threadSafeDeref()
+ {
+ if (m_threadSafeRefCounter && m_threadSafeRefCounter->derefBase()) {
+ delete m_threadSafeRefCounter;
+ m_threadSafeRefCounter = 0;
+ }
+ }
+} // namespace WTF
+
+using WTF::CrossThreadRefCounted;
+
+#endif // CrossThreadRefCounted_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/CurrentTime.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/CurrentTime.cpp
new file mode 100644
index 0000000..73c2c5c
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/CurrentTime.cpp
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2006 Apple Computer, Inc. All rights reserved.
+ * Copyright (C) 2008 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "CurrentTime.h"
+
+#if PLATFORM(WIN_OS)
+// Windows is first since we want to use hires timers, despite PLATFORM(CF)
+// being defined.
+// If defined, WIN32_LEAN_AND_MEAN disables timeBeginPeriod/timeEndPeriod.
+#undef WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include <math.h>
+#include <stdint.h>
+#if HAVE(SYS_TIMEB_H)
+#include <sys/timeb.h>
+#endif
+#if !PLATFORM(WINCE)
+#include <sys/types.h>
+#endif
+#include <time.h>
+#elif PLATFORM(CF)
+#include <CoreFoundation/CFDate.h>
+#elif PLATFORM(GTK)
+#include <glib.h>
+#elif PLATFORM(WX)
+#include <wx/datetime.h>
+#else // Posix systems relying on the gettimeofday()
+#include <sys/time.h>
+#endif
+
+namespace WTF {
+
+const double msPerSecond = 1000.0;
+
+#if PLATFORM(WIN_OS)
+
+static LARGE_INTEGER qpcFrequency;
+static bool syncedTime;
+
+static double highResUpTime()
+{
+ // We use QPC, but only after sanity checking its result, due to bugs:
+ // http://support.microsoft.com/kb/274323
+ // http://support.microsoft.com/kb/895980
+ // http://msdn.microsoft.com/en-us/library/ms644904.aspx ("...you can get different results on different processors due to bugs in the basic input/output system (BIOS) or the hardware abstraction layer (HAL)."
+
+ static LARGE_INTEGER qpcLast;
+ static DWORD tickCountLast;
+ static bool inited;
+
+ LARGE_INTEGER qpc;
+ QueryPerformanceCounter(&qpc);
+ DWORD tickCount = GetTickCount();
+
+ if (inited) {
+ __int64 qpcElapsed = ((qpc.QuadPart - qpcLast.QuadPart) * 1000) / qpcFrequency.QuadPart;
+ __int64 tickCountElapsed;
+ if (tickCount >= tickCountLast)
+ tickCountElapsed = (tickCount - tickCountLast);
+ else {
+#if COMPILER(MINGW)
+ __int64 tickCountLarge = tickCount + 0x100000000ULL;
+#else
+ __int64 tickCountLarge = tickCount + 0x100000000I64;
+#endif
+ tickCountElapsed = tickCountLarge - tickCountLast;
+ }
+
+ // force a re-sync if QueryPerformanceCounter differs from GetTickCount by more than 500ms.
+ // (500ms value is from http://support.microsoft.com/kb/274323)
+ __int64 diff = tickCountElapsed - qpcElapsed;
+ if (diff > 500 || diff < -500)
+ syncedTime = false;
+ } else
+ inited = true;
+
+ qpcLast = qpc;
+ tickCountLast = tickCount;
+
+ return (1000.0 * qpc.QuadPart) / static_cast<double>(qpcFrequency.QuadPart);
+}
+
+static double lowResUTCTime()
+{
+#if PLATFORM(WINCE)
+ SYSTEMTIME systemTime;
+ GetSystemTime(&systemTime);
+ struct tm tmtime;
+ tmtime.tm_year = systemTime.wYear - 1900;
+ tmtime.tm_mon = systemTime.wMonth - 1;
+ tmtime.tm_mday = systemTime.wDay;
+ tmtime.tm_wday = systemTime.wDayOfWeek;
+ tmtime.tm_hour = systemTime.wHour;
+ tmtime.tm_min = systemTime.wMinute;
+ tmtime.tm_sec = systemTime.wSecond;
+ time_t timet = mktime(&tmtime);
+ return timet * msPerSecond + systemTime.wMilliseconds;
+#else
+ struct _timeb timebuffer;
+ _ftime(&timebuffer);
+ return timebuffer.time * msPerSecond + timebuffer.millitm;
+#endif
+}
+
+static bool qpcAvailable()
+{
+ static bool available;
+ static bool checked;
+
+ if (checked)
+ return available;
+
+ available = QueryPerformanceFrequency(&qpcFrequency);
+ checked = true;
+ return available;
+}
+
+double currentTime()
+{
+ // Use a combination of ftime and QueryPerformanceCounter.
+ // ftime returns the information we want, but doesn't have sufficient resolution.
+ // QueryPerformanceCounter has high resolution, but is only usable to measure time intervals.
+ // To combine them, we call ftime and QueryPerformanceCounter initially. Later calls will use QueryPerformanceCounter
+ // by itself, adding the delta to the saved ftime. We periodically re-sync to correct for drift.
+ static bool started;
+ static double syncLowResUTCTime;
+ static double syncHighResUpTime;
+ static double lastUTCTime;
+
+ double lowResTime = lowResUTCTime();
+
+ if (!qpcAvailable())
+ return lowResTime / 1000.0;
+
+ double highResTime = highResUpTime();
+
+ if (!syncedTime) {
+ timeBeginPeriod(1); // increase time resolution around low-res time getter
+ syncLowResUTCTime = lowResTime = lowResUTCTime();
+ timeEndPeriod(1); // restore time resolution
+ syncHighResUpTime = highResTime;
+ syncedTime = true;
+ }
+
+ double highResElapsed = highResTime - syncHighResUpTime;
+ double utc = syncLowResUTCTime + highResElapsed;
+
+ // force a clock re-sync if we've drifted
+ double lowResElapsed = lowResTime - syncLowResUTCTime;
+ const double maximumAllowedDriftMsec = 15.625 * 2.0; // 2x the typical low-res accuracy
+ if (fabs(highResElapsed - lowResElapsed) > maximumAllowedDriftMsec)
+ syncedTime = false;
+
+ // make sure time doesn't run backwards (only correct if difference is < 2 seconds, since DST or clock changes could occur)
+ const double backwardTimeLimit = 2000.0;
+ if (utc < lastUTCTime && (lastUTCTime - utc) < backwardTimeLimit)
+ return lastUTCTime / 1000.0;
+ lastUTCTime = utc;
+ return utc / 1000.0;
+}
+
+#elif PLATFORM(CF)
+
+double currentTime()
+{
+ return CFAbsoluteTimeGetCurrent() + kCFAbsoluteTimeIntervalSince1970;
+}
+
+#elif PLATFORM(GTK)
+
+// Note: GTK on Windows will pick up the PLATFORM(WIN) implementation above which provides
+// better accuracy compared with Windows implementation of g_get_current_time:
+// (http://www.google.com/codesearch/p?hl=en#HHnNRjks1t0/glib-2.5.2/glib/gmain.c&q=g_get_current_time).
+// Non-Windows GTK builds could use gettimeofday() directly but for the sake of consistency lets use GTK function.
+double currentTime()
+{
+ GTimeVal now;
+ g_get_current_time(&now);
+ return static_cast<double>(now.tv_sec) + static_cast<double>(now.tv_usec / 1000000.0);
+}
+
+#elif PLATFORM(WX)
+
+double currentTime()
+{
+ wxDateTime now = wxDateTime::UNow();
+ return (double)now.GetTicks() + (double)(now.GetMillisecond() / 1000.0);
+}
+
+#else // Other Posix systems rely on the gettimeofday().
+
+double currentTime()
+{
+ struct timeval now;
+ struct timezone zone;
+
+ gettimeofday(&now, &zone);
+ return static_cast<double>(now.tv_sec) + (double)(now.tv_usec / 1000000.0);
+}
+
+#endif
+
+} // namespace WTF
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/CurrentTime.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/CurrentTime.h
new file mode 100644
index 0000000..31f1ec8
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/CurrentTime.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2006 Apple Computer, Inc. All rights reserved.
+ * Copyright (C) 2008 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CurrentTime_h
+#define CurrentTime_h
+
+namespace WTF {
+
+ // Returns the current system (UTC) time in seconds, starting January 1, 1970.
+ // Precision varies depending on a platform but usually is as good or better
+ // than a millisecond.
+ double currentTime();
+
+} // namespace WTF
+
+using WTF::currentTime;
+
+#endif // CurrentTime_h
+
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/DateMath.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/DateMath.cpp
new file mode 100644
index 0000000..0b76649
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/DateMath.cpp
@@ -0,0 +1,917 @@
+/*
+ * Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
+ * Copyright (C) 2006, 2007 Apple Inc. All rights reserved.
+ * Copyright (C) 2009 Google Inc. All rights reserved.
+ * Copyright (C) 2007-2009 Torch Mobile, Inc.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Alternatively, the contents of this file may be used under the terms
+ * of either the Mozilla Public License Version 1.1, found at
+ * http://www.mozilla.org/MPL/ (the "MPL") or the GNU General Public
+ * License Version 2.0, found at http://www.fsf.org/copyleft/gpl.html
+ * (the "GPL"), in which case the provisions of the MPL or the GPL are
+ * applicable instead of those above. If you wish to allow use of your
+ * version of this file only under the terms of one of those two
+ * licenses (the MPL or the GPL) and not to allow others to use your
+ * version of this file under the LGPL, indicate your decision by
+ * deletingthe provisions above and replace them with the notice and
+ * other provisions required by the MPL or the GPL, as the case may be.
+ * If you do not delete the provisions above, a recipient may use your
+ * version of this file under any of the LGPL, the MPL or the GPL.
+ */
+
+#include "config.h"
+#include "DateMath.h"
+
+#include "Assertions.h"
+#include "ASCIICType.h"
+#include "CurrentTime.h"
+#include "MathExtras.h"
+#include "StringExtras.h"
+
+#include <algorithm>
+#include <limits.h>
+#include <limits>
+#include <stdint.h>
+#include <time.h>
+
+
+#if HAVE(ERRNO_H)
+#include <errno.h>
+#endif
+
+#if PLATFORM(DARWIN)
+#include <notify.h>
+#endif
+
+#if HAVE(SYS_TIME_H)
+#include <sys/time.h>
+#endif
+
+#if HAVE(SYS_TIMEB_H)
+#include <sys/timeb.h>
+#endif
+
+#define NaN std::numeric_limits<double>::quiet_NaN()
+
+namespace WTF {
+
+/* Constants */
+
+static const double minutesPerDay = 24.0 * 60.0;
+static const double secondsPerDay = 24.0 * 60.0 * 60.0;
+static const double secondsPerYear = 24.0 * 60.0 * 60.0 * 365.0;
+
+static const double usecPerSec = 1000000.0;
+
+static const double maxUnixTime = 2145859200.0; // 12/31/2037
+
+// Day of year for the first day of each month, where index 0 is January, and day 0 is January 1.
+// First for non-leap years, then for leap years.
+static const int firstDayOfMonth[2][12] = {
+ {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334},
+ {0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335}
+};
+
+static inline bool isLeapYear(int year)
+{
+ if (year % 4 != 0)
+ return false;
+ if (year % 400 == 0)
+ return true;
+ if (year % 100 == 0)
+ return false;
+ return true;
+}
+
+static inline int daysInYear(int year)
+{
+ return 365 + isLeapYear(year);
+}
+
+static inline double daysFrom1970ToYear(int year)
+{
+ // The Gregorian Calendar rules for leap years:
+ // Every fourth year is a leap year. 2004, 2008, and 2012 are leap years.
+ // However, every hundredth year is not a leap year. 1900 and 2100 are not leap years.
+ // Every four hundred years, there's a leap year after all. 2000 and 2400 are leap years.
+
+ static const int leapDaysBefore1971By4Rule = 1970 / 4;
+ static const int excludedLeapDaysBefore1971By100Rule = 1970 / 100;
+ static const int leapDaysBefore1971By400Rule = 1970 / 400;
+
+ const double yearMinusOne = year - 1;
+ const double yearsToAddBy4Rule = floor(yearMinusOne / 4.0) - leapDaysBefore1971By4Rule;
+ const double yearsToExcludeBy100Rule = floor(yearMinusOne / 100.0) - excludedLeapDaysBefore1971By100Rule;
+ const double yearsToAddBy400Rule = floor(yearMinusOne / 400.0) - leapDaysBefore1971By400Rule;
+
+ return 365.0 * (year - 1970) + yearsToAddBy4Rule - yearsToExcludeBy100Rule + yearsToAddBy400Rule;
+}
+
+static inline double msToDays(double ms)
+{
+ return floor(ms / msPerDay);
+}
+
+static inline int msToYear(double ms)
+{
+ int approxYear = static_cast<int>(floor(ms / (msPerDay * 365.2425)) + 1970);
+ double msFromApproxYearTo1970 = msPerDay * daysFrom1970ToYear(approxYear);
+ if (msFromApproxYearTo1970 > ms)
+ return approxYear - 1;
+ if (msFromApproxYearTo1970 + msPerDay * daysInYear(approxYear) <= ms)
+ return approxYear + 1;
+ return approxYear;
+}
+
+static inline int dayInYear(double ms, int year)
+{
+ return static_cast<int>(msToDays(ms) - daysFrom1970ToYear(year));
+}
+
+static inline double msToMilliseconds(double ms)
+{
+ double result = fmod(ms, msPerDay);
+ if (result < 0)
+ result += msPerDay;
+ return result;
+}
+
+// 0: Sunday, 1: Monday, etc.
+static inline int msToWeekDay(double ms)
+{
+ int wd = (static_cast<int>(msToDays(ms)) + 4) % 7;
+ if (wd < 0)
+ wd += 7;
+ return wd;
+}
+
+static inline int msToSeconds(double ms)
+{
+ double result = fmod(floor(ms / msPerSecond), secondsPerMinute);
+ if (result < 0)
+ result += secondsPerMinute;
+ return static_cast<int>(result);
+}
+
+static inline int msToMinutes(double ms)
+{
+ double result = fmod(floor(ms / msPerMinute), minutesPerHour);
+ if (result < 0)
+ result += minutesPerHour;
+ return static_cast<int>(result);
+}
+
+static inline int msToHours(double ms)
+{
+ double result = fmod(floor(ms/msPerHour), hoursPerDay);
+ if (result < 0)
+ result += hoursPerDay;
+ return static_cast<int>(result);
+}
+
+static inline int monthFromDayInYear(int dayInYear, bool leapYear)
+{
+ const int d = dayInYear;
+ int step;
+
+ if (d < (step = 31))
+ return 0;
+ step += (leapYear ? 29 : 28);
+ if (d < step)
+ return 1;
+ if (d < (step += 31))
+ return 2;
+ if (d < (step += 30))
+ return 3;
+ if (d < (step += 31))
+ return 4;
+ if (d < (step += 30))
+ return 5;
+ if (d < (step += 31))
+ return 6;
+ if (d < (step += 31))
+ return 7;
+ if (d < (step += 30))
+ return 8;
+ if (d < (step += 31))
+ return 9;
+ if (d < (step += 30))
+ return 10;
+ return 11;
+}
+
+static inline bool checkMonth(int dayInYear, int& startDayOfThisMonth, int& startDayOfNextMonth, int daysInThisMonth)
+{
+ startDayOfThisMonth = startDayOfNextMonth;
+ startDayOfNextMonth += daysInThisMonth;
+ return (dayInYear <= startDayOfNextMonth);
+}
+
+static inline int dayInMonthFromDayInYear(int dayInYear, bool leapYear)
+{
+ const int d = dayInYear;
+ int step;
+ int next = 30;
+
+ if (d <= next)
+ return d + 1;
+ const int daysInFeb = (leapYear ? 29 : 28);
+ if (checkMonth(d, step, next, daysInFeb))
+ return d - step;
+ if (checkMonth(d, step, next, 31))
+ return d - step;
+ if (checkMonth(d, step, next, 30))
+ return d - step;
+ if (checkMonth(d, step, next, 31))
+ return d - step;
+ if (checkMonth(d, step, next, 30))
+ return d - step;
+ if (checkMonth(d, step, next, 31))
+ return d - step;
+ if (checkMonth(d, step, next, 31))
+ return d - step;
+ if (checkMonth(d, step, next, 30))
+ return d - step;
+ if (checkMonth(d, step, next, 31))
+ return d - step;
+ if (checkMonth(d, step, next, 30))
+ return d - step;
+ step = next;
+ return d - step;
+}
+
+static inline int monthToDayInYear(int month, bool isLeapYear)
+{
+ return firstDayOfMonth[isLeapYear][month];
+}
+
+static inline double timeToMS(double hour, double min, double sec, double ms)
+{
+ return (((hour * minutesPerHour + min) * secondsPerMinute + sec) * msPerSecond + ms);
+}
+
+static int dateToDayInYear(int year, int month, int day)
+{
+ year += month / 12;
+
+ month %= 12;
+ if (month < 0) {
+ month += 12;
+ --year;
+ }
+
+ int yearday = static_cast<int>(floor(daysFrom1970ToYear(year)));
+ int monthday = monthToDayInYear(month, isLeapYear(year));
+
+ return yearday + monthday + day - 1;
+}
+
+double getCurrentUTCTime()
+{
+ return floor(getCurrentUTCTimeWithMicroseconds());
+}
+
+// Returns current time in milliseconds since 1 Jan 1970.
+double getCurrentUTCTimeWithMicroseconds()
+{
+ return currentTime() * 1000.0;
+}
+
+void getLocalTime(const time_t* localTime, struct tm* localTM)
+{
+#if COMPILER(MSVC7) || COMPILER(MINGW) || PLATFORM(WINCE)
+ *localTM = *localtime(localTime);
+#elif COMPILER(MSVC)
+ localtime_s(localTM, localTime);
+#else
+ localtime_r(localTime, localTM);
+#endif
+}
+
+// There is a hard limit at 2038 that we currently do not have a workaround
+// for (rdar://problem/5052975).
+static inline int maximumYearForDST()
+{
+ return 2037;
+}
+
+static inline int minimumYearForDST()
+{
+ // Because of the 2038 issue (see maximumYearForDST) if the current year is
+ // greater than the max year minus 27 (2010), we want to use the max year
+ // minus 27 instead, to ensure there is a range of 28 years that all years
+ // can map to.
+ return std::min(msToYear(getCurrentUTCTime()), maximumYearForDST() - 27) ;
+}
+
+/*
+ * Find an equivalent year for the one given, where equivalence is deterined by
+ * the two years having the same leapness and the first day of the year, falling
+ * on the same day of the week.
+ *
+ * This function returns a year between this current year and 2037, however this
+ * function will potentially return incorrect results if the current year is after
+ * 2010, (rdar://problem/5052975), if the year passed in is before 1900 or after
+ * 2100, (rdar://problem/5055038).
+ */
+int equivalentYearForDST(int year)
+{
+ // It is ok if the cached year is not the current year as long as the rules
+ // for DST did not change between the two years; if they did the app would need
+ // to be restarted.
+ static int minYear = minimumYearForDST();
+ int maxYear = maximumYearForDST();
+
+ int difference;
+ if (year > maxYear)
+ difference = minYear - year;
+ else if (year < minYear)
+ difference = maxYear - year;
+ else
+ return year;
+
+ int quotient = difference / 28;
+ int product = (quotient) * 28;
+
+ year += product;
+ ASSERT((year >= minYear && year <= maxYear) || (product - year == static_cast<int>(NaN)));
+ return year;
+}
+
+static int32_t calculateUTCOffset()
+{
+ time_t localTime = time(0);
+ tm localt;
+ getLocalTime(&localTime, &localt);
+
+ // Get the difference between this time zone and UTC on the 1st of January of this year.
+ localt.tm_sec = 0;
+ localt.tm_min = 0;
+ localt.tm_hour = 0;
+ localt.tm_mday = 1;
+ localt.tm_mon = 0;
+ // Not setting localt.tm_year!
+ localt.tm_wday = 0;
+ localt.tm_yday = 0;
+ localt.tm_isdst = 0;
+#if HAVE(TM_GMTOFF)
+ localt.tm_gmtoff = 0;
+#endif
+#if HAVE(TM_ZONE)
+ localt.tm_zone = 0;
+#endif
+
+#if HAVE(TIMEGM)
+ time_t utcOffset = timegm(&localt) - mktime(&localt);
+#else
+ // Using a canned date of 01/01/2009 on platforms with weaker date-handling foo.
+ localt.tm_year = 109;
+ time_t utcOffset = 1230768000 - mktime(&localt);
+#endif
+
+ return static_cast<int32_t>(utcOffset * 1000);
+}
+
+#if PLATFORM(DARWIN)
+static int32_t s_cachedUTCOffset; // In milliseconds. An assumption here is that access to an int32_t variable is atomic on platforms that take this code path.
+static bool s_haveCachedUTCOffset;
+static int s_notificationToken;
+#endif
+
+/*
+ * Get the difference in milliseconds between this time zone and UTC (GMT)
+ * NOT including DST.
+ */
+double getUTCOffset()
+{
+#if PLATFORM(DARWIN)
+ if (s_haveCachedUTCOffset) {
+ int notified;
+ uint32_t status = notify_check(s_notificationToken, &notified);
+ if (status == NOTIFY_STATUS_OK && !notified)
+ return s_cachedUTCOffset;
+ }
+#endif
+
+ int32_t utcOffset = calculateUTCOffset();
+
+#if PLATFORM(DARWIN)
+ // Theoretically, it is possible that several threads will be executing this code at once, in which case we will have a race condition,
+ // and a newer value may be overwritten. In practice, time zones don't change that often.
+ s_cachedUTCOffset = utcOffset;
+#endif
+
+ return utcOffset;
+}
+
+/*
+ * Get the DST offset for the time passed in. Takes
+ * seconds (not milliseconds) and cannot handle dates before 1970
+ * on some OS'
+ */
+static double getDSTOffsetSimple(double localTimeSeconds, double utcOffset)
+{
+ if (localTimeSeconds > maxUnixTime)
+ localTimeSeconds = maxUnixTime;
+ else if (localTimeSeconds < 0) // Go ahead a day to make localtime work (does not work with 0)
+ localTimeSeconds += secondsPerDay;
+
+ //input is UTC so we have to shift back to local time to determine DST thus the + getUTCOffset()
+ double offsetTime = (localTimeSeconds * msPerSecond) + utcOffset;
+
+ // Offset from UTC but doesn't include DST obviously
+ int offsetHour = msToHours(offsetTime);
+ int offsetMinute = msToMinutes(offsetTime);
+
+ // FIXME: time_t has a potential problem in 2038
+ time_t localTime = static_cast<time_t>(localTimeSeconds);
+
+ tm localTM;
+ getLocalTime(&localTime, &localTM);
+
+ double diff = ((localTM.tm_hour - offsetHour) * secondsPerHour) + ((localTM.tm_min - offsetMinute) * 60);
+
+ if (diff < 0)
+ diff += secondsPerDay;
+
+ return (diff * msPerSecond);
+}
+
+// Get the DST offset, given a time in UTC
+static double getDSTOffset(double ms, double utcOffset)
+{
+ // On Mac OS X, the call to localtime (see getDSTOffsetSimple) will return historically accurate
+ // DST information (e.g. New Zealand did not have DST from 1946 to 1974) however the JavaScript
+ // standard explicitly dictates that historical information should not be considered when
+ // determining DST. For this reason we shift away from years that localtime can handle but would
+ // return historically accurate information.
+ int year = msToYear(ms);
+ int equivalentYear = equivalentYearForDST(year);
+ if (year != equivalentYear) {
+ bool leapYear = isLeapYear(year);
+ int dayInYearLocal = dayInYear(ms, year);
+ int dayInMonth = dayInMonthFromDayInYear(dayInYearLocal, leapYear);
+ int month = monthFromDayInYear(dayInYearLocal, leapYear);
+ int day = dateToDayInYear(equivalentYear, month, dayInMonth);
+ ms = (day * msPerDay) + msToMilliseconds(ms);
+ }
+
+ return getDSTOffsetSimple(ms / msPerSecond, utcOffset);
+}
+
+double gregorianDateTimeToMS(const GregorianDateTime& t, double milliSeconds, bool inputIsUTC)
+{
+ int day = dateToDayInYear(t.year + 1900, t.month, t.monthDay);
+ double ms = timeToMS(t.hour, t.minute, t.second, milliSeconds);
+ double result = (day * msPerDay) + ms;
+
+ if (!inputIsUTC) { // convert to UTC
+ double utcOffset = getUTCOffset();
+ result -= utcOffset;
+ result -= getDSTOffset(result, utcOffset);
+ }
+
+ return result;
+}
+
+void msToGregorianDateTime(double ms, bool outputIsUTC, GregorianDateTime& tm)
+{
+ // input is UTC
+ double dstOff = 0.0;
+ const double utcOff = getUTCOffset();
+
+ if (!outputIsUTC) { // convert to local time
+ dstOff = getDSTOffset(ms, utcOff);
+ ms += dstOff + utcOff;
+ }
+
+ const int year = msToYear(ms);
+ tm.second = msToSeconds(ms);
+ tm.minute = msToMinutes(ms);
+ tm.hour = msToHours(ms);
+ tm.weekDay = msToWeekDay(ms);
+ tm.yearDay = dayInYear(ms, year);
+ tm.monthDay = dayInMonthFromDayInYear(tm.yearDay, isLeapYear(year));
+ tm.month = monthFromDayInYear(tm.yearDay, isLeapYear(year));
+ tm.year = year - 1900;
+ tm.isDST = dstOff != 0.0;
+
+ tm.utcOffset = outputIsUTC ? 0 : static_cast<long>((dstOff + utcOff) / msPerSecond);
+ tm.timeZone = NULL;
+}
+
+void initializeDates()
+{
+#ifndef NDEBUG
+ static bool alreadyInitialized;
+ ASSERT(!alreadyInitialized++);
+#endif
+
+ equivalentYearForDST(2000); // Need to call once to initialize a static used in this function.
+#if PLATFORM(DARWIN)
+ // Register for a notification whenever the time zone changes.
+ uint32_t status = notify_register_check("com.apple.system.timezone", &s_notificationToken);
+ if (status == NOTIFY_STATUS_OK) {
+ s_cachedUTCOffset = calculateUTCOffset();
+ s_haveCachedUTCOffset = true;
+ }
+#endif
+}
+
+static inline double ymdhmsToSeconds(long year, int mon, int day, int hour, int minute, int second)
+{
+ double days = (day - 32075)
+ + floor(1461 * (year + 4800.0 + (mon - 14) / 12) / 4)
+ + 367 * (mon - 2 - (mon - 14) / 12 * 12) / 12
+ - floor(3 * ((year + 4900.0 + (mon - 14) / 12) / 100) / 4)
+ - 2440588;
+ return ((days * hoursPerDay + hour) * minutesPerHour + minute) * secondsPerMinute + second;
+}
+
+// We follow the recommendation of RFC 2822 to consider all
+// obsolete time zones not listed here equivalent to "-0000".
+static const struct KnownZone {
+#if !PLATFORM(WIN_OS)
+ const
+#endif
+ char tzName[4];
+ int tzOffset;
+} known_zones[] = {
+ { "UT", 0 },
+ { "GMT", 0 },
+ { "EST", -300 },
+ { "EDT", -240 },
+ { "CST", -360 },
+ { "CDT", -300 },
+ { "MST", -420 },
+ { "MDT", -360 },
+ { "PST", -480 },
+ { "PDT", -420 }
+};
+
+inline static void skipSpacesAndComments(const char*& s)
+{
+ int nesting = 0;
+ char ch;
+ while ((ch = *s)) {
+ if (!isASCIISpace(ch)) {
+ if (ch == '(')
+ nesting++;
+ else if (ch == ')' && nesting > 0)
+ nesting--;
+ else if (nesting == 0)
+ break;
+ }
+ s++;
+ }
+}
+
+// returns 0-11 (Jan-Dec); -1 on failure
+static int findMonth(const char* monthStr)
+{
+ ASSERT(monthStr);
+ char needle[4];
+ for (int i = 0; i < 3; ++i) {
+ if (!*monthStr)
+ return -1;
+ needle[i] = static_cast<char>(toASCIILower(*monthStr++));
+ }
+ needle[3] = '\0';
+ const char *haystack = "janfebmaraprmayjunjulaugsepoctnovdec";
+ const char *str = strstr(haystack, needle);
+ if (str) {
+ int position = static_cast<int>(str - haystack);
+ if (position % 3 == 0)
+ return position / 3;
+ }
+ return -1;
+}
+
+static bool parseLong(const char* string, char** stopPosition, int base, long* result)
+{
+ *result = strtol(string, stopPosition, base);
+ // Avoid the use of errno as it is not available on Windows CE
+ if (string == *stopPosition || *result == LONG_MIN || *result == LONG_MAX)
+ return false;
+ return true;
+}
+
+double parseDateFromNullTerminatedCharacters(const char* dateString)
+{
+ // This parses a date in the form:
+ // Tuesday, 09-Nov-99 23:12:40 GMT
+ // or
+ // Sat, 01-Jan-2000 08:00:00 GMT
+ // or
+ // Sat, 01 Jan 2000 08:00:00 GMT
+ // or
+ // 01 Jan 99 22:00 +0100 (exceptions in rfc822/rfc2822)
+ // ### non RFC formats, added for Javascript:
+ // [Wednesday] January 09 1999 23:12:40 GMT
+ // [Wednesday] January 09 23:12:40 GMT 1999
+ //
+ // We ignore the weekday.
+
+ // Skip leading space
+ skipSpacesAndComments(dateString);
+
+ long month = -1;
+ const char *wordStart = dateString;
+ // Check contents of first words if not number
+ while (*dateString && !isASCIIDigit(*dateString)) {
+ if (isASCIISpace(*dateString) || *dateString == '(') {
+ if (dateString - wordStart >= 3)
+ month = findMonth(wordStart);
+ skipSpacesAndComments(dateString);
+ wordStart = dateString;
+ } else
+ dateString++;
+ }
+
+ // Missing delimiter between month and day (like "January29")?
+ if (month == -1 && wordStart != dateString)
+ month = findMonth(wordStart);
+
+ skipSpacesAndComments(dateString);
+
+ if (!*dateString)
+ return NaN;
+
+ // ' 09-Nov-99 23:12:40 GMT'
+ char* newPosStr;
+ long day;
+ if (!parseLong(dateString, &newPosStr, 10, &day))
+ return NaN;
+ dateString = newPosStr;
+
+ if (!*dateString)
+ return NaN;
+
+ if (day < 0)
+ return NaN;
+
+ long year = 0;
+ if (day > 31) {
+ // ### where is the boundary and what happens below?
+ if (*dateString != '/')
+ return NaN;
+ // looks like a YYYY/MM/DD date
+ if (!*++dateString)
+ return NaN;
+ year = day;
+ if (!parseLong(dateString, &newPosStr, 10, &month))
+ return NaN;
+ month -= 1;
+ dateString = newPosStr;
+ if (*dateString++ != '/' || !*dateString)
+ return NaN;
+ if (!parseLong(dateString, &newPosStr, 10, &day))
+ return NaN;
+ dateString = newPosStr;
+ } else if (*dateString == '/' && month == -1) {
+ dateString++;
+ // This looks like a MM/DD/YYYY date, not an RFC date.
+ month = day - 1; // 0-based
+ if (!parseLong(dateString, &newPosStr, 10, &day))
+ return NaN;
+ if (day < 1 || day > 31)
+ return NaN;
+ dateString = newPosStr;
+ if (*dateString == '/')
+ dateString++;
+ if (!*dateString)
+ return NaN;
+ } else {
+ if (*dateString == '-')
+ dateString++;
+
+ skipSpacesAndComments(dateString);
+
+ if (*dateString == ',')
+ dateString++;
+
+ if (month == -1) { // not found yet
+ month = findMonth(dateString);
+ if (month == -1)
+ return NaN;
+
+ while (*dateString && *dateString != '-' && *dateString != ',' && !isASCIISpace(*dateString))
+ dateString++;
+
+ if (!*dateString)
+ return NaN;
+
+ // '-99 23:12:40 GMT'
+ if (*dateString != '-' && *dateString != '/' && *dateString != ',' && !isASCIISpace(*dateString))
+ return NaN;
+ dateString++;
+ }
+ }
+
+ if (month < 0 || month > 11)
+ return NaN;
+
+ // '99 23:12:40 GMT'
+ if (year <= 0 && *dateString) {
+ if (!parseLong(dateString, &newPosStr, 10, &year))
+ return NaN;
+ }
+
+ // Don't fail if the time is missing.
+ long hour = 0;
+ long minute = 0;
+ long second = 0;
+ if (!*newPosStr)
+ dateString = newPosStr;
+ else {
+ // ' 23:12:40 GMT'
+ if (!(isASCIISpace(*newPosStr) || *newPosStr == ',')) {
+ if (*newPosStr != ':')
+ return NaN;
+ // There was no year; the number was the hour.
+ year = -1;
+ } else {
+ // in the normal case (we parsed the year), advance to the next number
+ dateString = ++newPosStr;
+ skipSpacesAndComments(dateString);
+ }
+
+ parseLong(dateString, &newPosStr, 10, &hour);
+ // Do not check for errno here since we want to continue
+ // even if errno was set becasue we are still looking
+ // for the timezone!
+
+ // Read a number? If not, this might be a timezone name.
+ if (newPosStr != dateString) {
+ dateString = newPosStr;
+
+ if (hour < 0 || hour > 23)
+ return NaN;
+
+ if (!*dateString)
+ return NaN;
+
+ // ':12:40 GMT'
+ if (*dateString++ != ':')
+ return NaN;
+
+ if (!parseLong(dateString, &newPosStr, 10, &minute))
+ return NaN;
+ dateString = newPosStr;
+
+ if (minute < 0 || minute > 59)
+ return NaN;
+
+ // ':40 GMT'
+ if (*dateString && *dateString != ':' && !isASCIISpace(*dateString))
+ return NaN;
+
+ // seconds are optional in rfc822 + rfc2822
+ if (*dateString ==':') {
+ dateString++;
+
+ if (!parseLong(dateString, &newPosStr, 10, &second))
+ return NaN;
+ dateString = newPosStr;
+
+ if (second < 0 || second > 59)
+ return NaN;
+ }
+
+ skipSpacesAndComments(dateString);
+
+ if (strncasecmp(dateString, "AM", 2) == 0) {
+ if (hour > 12)
+ return NaN;
+ if (hour == 12)
+ hour = 0;
+ dateString += 2;
+ skipSpacesAndComments(dateString);
+ } else if (strncasecmp(dateString, "PM", 2) == 0) {
+ if (hour > 12)
+ return NaN;
+ if (hour != 12)
+ hour += 12;
+ dateString += 2;
+ skipSpacesAndComments(dateString);
+ }
+ }
+ }
+
+ bool haveTZ = false;
+ int offset = 0;
+
+ // Don't fail if the time zone is missing.
+ // Some websites omit the time zone (4275206).
+ if (*dateString) {
+ if (strncasecmp(dateString, "GMT", 3) == 0 || strncasecmp(dateString, "UTC", 3) == 0) {
+ dateString += 3;
+ haveTZ = true;
+ }
+
+ if (*dateString == '+' || *dateString == '-') {
+ long o;
+ if (!parseLong(dateString, &newPosStr, 10, &o))
+ return NaN;
+ dateString = newPosStr;
+
+ if (o < -9959 || o > 9959)
+ return NaN;
+
+ int sgn = (o < 0) ? -1 : 1;
+ o = labs(o);
+ if (*dateString != ':') {
+ offset = ((o / 100) * 60 + (o % 100)) * sgn;
+ } else { // GMT+05:00
+ long o2;
+ if (!parseLong(dateString, &newPosStr, 10, &o2))
+ return NaN;
+ dateString = newPosStr;
+ offset = (o * 60 + o2) * sgn;
+ }
+ haveTZ = true;
+ } else {
+ for (int i = 0; i < int(sizeof(known_zones) / sizeof(KnownZone)); i++) {
+ if (0 == strncasecmp(dateString, known_zones[i].tzName, strlen(known_zones[i].tzName))) {
+ offset = known_zones[i].tzOffset;
+ dateString += strlen(known_zones[i].tzName);
+ haveTZ = true;
+ break;
+ }
+ }
+ }
+ }
+
+ skipSpacesAndComments(dateString);
+
+ if (*dateString && year == -1) {
+ if (!parseLong(dateString, &newPosStr, 10, &year))
+ return NaN;
+ dateString = newPosStr;
+ }
+
+ skipSpacesAndComments(dateString);
+
+ // Trailing garbage
+ if (*dateString)
+ return NaN;
+
+ // Y2K: Handle 2 digit years.
+ if (year >= 0 && year < 100) {
+ if (year < 50)
+ year += 2000;
+ else
+ year += 1900;
+ }
+
+ // fall back to local timezone
+ if (!haveTZ) {
+ GregorianDateTime t;
+ t.monthDay = day;
+ t.month = month;
+ t.year = year - 1900;
+ t.isDST = -1;
+ t.second = second;
+ t.minute = minute;
+ t.hour = hour;
+
+ // Use our gregorianDateTimeToMS() rather than mktime() as the latter can't handle the full year range.
+ return gregorianDateTimeToMS(t, 0, false);
+ }
+
+ return (ymdhmsToSeconds(year, month + 1, day, hour, minute, second) - (offset * 60.0)) * msPerSecond;
+}
+
+double timeClip(double t)
+{
+ if (!isfinite(t))
+ return NaN;
+ if (fabs(t) > 8.64E15)
+ return NaN;
+ return trunc(t);
+}
+
+
+} // namespace WTF
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/DateMath.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/DateMath.h
new file mode 100644
index 0000000..6110f76
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/DateMath.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
+ * Copyright (C) 2006, 2007 Apple Inc. All rights reserved.
+ * Copyright (C) 2009 Google Inc. All rights reserved.
+ *
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ */
+
+#ifndef DateMath_h
+#define DateMath_h
+
+#include <time.h>
+#include <string.h>
+#include <wtf/Noncopyable.h>
+
+namespace WTF {
+
+struct GregorianDateTime;
+
+void initializeDates();
+void msToGregorianDateTime(double, bool outputIsUTC, GregorianDateTime&);
+double gregorianDateTimeToMS(const GregorianDateTime&, double, bool inputIsUTC);
+double getUTCOffset();
+int equivalentYearForDST(int year);
+double getCurrentUTCTime();
+double getCurrentUTCTimeWithMicroseconds();
+void getLocalTime(const time_t*, tm*);
+
+// Not really math related, but this is currently the only shared place to put these.
+double parseDateFromNullTerminatedCharacters(const char*);
+double timeClip(double);
+
+const char * const weekdayName[7] = { "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun" };
+const char * const monthName[12] = { "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" };
+
+const double hoursPerDay = 24.0;
+const double minutesPerHour = 60.0;
+const double secondsPerHour = 60.0 * 60.0;
+const double secondsPerMinute = 60.0;
+const double msPerSecond = 1000.0;
+const double msPerMinute = 60.0 * 1000.0;
+const double msPerHour = 60.0 * 60.0 * 1000.0;
+const double msPerDay = 24.0 * 60.0 * 60.0 * 1000.0;
+
+// Intentionally overridding the default tm of the system
+// Tee members of tm differ on various operating systems.
+struct GregorianDateTime : Noncopyable {
+ GregorianDateTime()
+ : second(0)
+ , minute(0)
+ , hour(0)
+ , weekDay(0)
+ , monthDay(0)
+ , yearDay(0)
+ , month(0)
+ , year(0)
+ , isDST(0)
+ , utcOffset(0)
+ , timeZone(0)
+ {
+ }
+
+ ~GregorianDateTime()
+ {
+ delete [] timeZone;
+ }
+
+ GregorianDateTime(const tm& inTm)
+ : second(inTm.tm_sec)
+ , minute(inTm.tm_min)
+ , hour(inTm.tm_hour)
+ , weekDay(inTm.tm_wday)
+ , monthDay(inTm.tm_mday)
+ , yearDay(inTm.tm_yday)
+ , month(inTm.tm_mon)
+ , year(inTm.tm_year)
+ , isDST(inTm.tm_isdst)
+ {
+#if HAVE(TM_GMTOFF)
+ utcOffset = static_cast<int>(inTm.tm_gmtoff);
+#else
+ utcOffset = static_cast<int>(getUTCOffset() / msPerSecond + (isDST ? secondsPerHour : 0));
+#endif
+
+#if HAVE(TM_ZONE)
+ int inZoneSize = strlen(inTm.tm_zone) + 1;
+ timeZone = new char[inZoneSize];
+ strncpy(timeZone, inTm.tm_zone, inZoneSize);
+#else
+ timeZone = 0;
+#endif
+ }
+
+ operator tm() const
+ {
+ tm ret;
+ memset(&ret, 0, sizeof(ret));
+
+ ret.tm_sec = second;
+ ret.tm_min = minute;
+ ret.tm_hour = hour;
+ ret.tm_wday = weekDay;
+ ret.tm_mday = monthDay;
+ ret.tm_yday = yearDay;
+ ret.tm_mon = month;
+ ret.tm_year = year;
+ ret.tm_isdst = isDST;
+
+#if HAVE(TM_GMTOFF)
+ ret.tm_gmtoff = static_cast<long>(utcOffset);
+#endif
+#if HAVE(TM_ZONE)
+ ret.tm_zone = timeZone;
+#endif
+
+ return ret;
+ }
+
+ void copyFrom(const GregorianDateTime& rhs)
+ {
+ second = rhs.second;
+ minute = rhs.minute;
+ hour = rhs.hour;
+ weekDay = rhs.weekDay;
+ monthDay = rhs.monthDay;
+ yearDay = rhs.yearDay;
+ month = rhs.month;
+ year = rhs.year;
+ isDST = rhs.isDST;
+ utcOffset = rhs.utcOffset;
+ if (rhs.timeZone) {
+ int inZoneSize = strlen(rhs.timeZone) + 1;
+ timeZone = new char[inZoneSize];
+ strncpy(timeZone, rhs.timeZone, inZoneSize);
+ } else
+ timeZone = 0;
+ }
+
+ int second;
+ int minute;
+ int hour;
+ int weekDay;
+ int monthDay;
+ int yearDay;
+ int month;
+ int year;
+ int isDST;
+ int utcOffset;
+ char* timeZone;
+};
+
+static inline int gmtoffset(const GregorianDateTime& t)
+{
+ return t.utcOffset;
+}
+
+} // namespace WTF
+
+#endif // DateMath_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Deque.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Deque.h
new file mode 100644
index 0000000..3c3d378
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Deque.h
@@ -0,0 +1,669 @@
+/*
+ * Copyright (C) 2007, 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2009 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_Deque_h
+#define WTF_Deque_h
+
+// FIXME: Could move what Vector and Deque share into a separate file.
+// Deque doesn't actually use Vector.
+
+#include "Vector.h"
+
+namespace WTF {
+
+ template<typename T> class DequeIteratorBase;
+ template<typename T> class DequeIterator;
+ template<typename T> class DequeConstIterator;
+ template<typename T> class DequeReverseIterator;
+ template<typename T> class DequeConstReverseIterator;
+
+ template<typename T>
+ class Deque : public FastAllocBase {
+ public:
+ typedef DequeIterator<T> iterator;
+ typedef DequeConstIterator<T> const_iterator;
+ typedef DequeReverseIterator<T> reverse_iterator;
+ typedef DequeConstReverseIterator<T> const_reverse_iterator;
+
+ Deque();
+ Deque(const Deque<T>&);
+ Deque& operator=(const Deque<T>&);
+ ~Deque();
+
+ void swap(Deque<T>&);
+
+ size_t size() const { return m_start <= m_end ? m_end - m_start : m_end + m_buffer.capacity() - m_start; }
+ bool isEmpty() const { return m_start == m_end; }
+
+ iterator begin() { return iterator(this, m_start); }
+ iterator end() { return iterator(this, m_end); }
+ const_iterator begin() const { return const_iterator(this, m_start); }
+ const_iterator end() const { return const_iterator(this, m_end); }
+ reverse_iterator rbegin() { return reverse_iterator(this, m_end); }
+ reverse_iterator rend() { return reverse_iterator(this, m_start); }
+ const_reverse_iterator rbegin() const { return const_reverse_iterator(this, m_end); }
+ const_reverse_iterator rend() const { return const_reverse_iterator(this, m_start); }
+
+ T& first() { ASSERT(m_start != m_end); return m_buffer.buffer()[m_start]; }
+ const T& first() const { ASSERT(m_start != m_end); return m_buffer.buffer()[m_start]; }
+
+ template<typename U> void append(const U&);
+ template<typename U> void prepend(const U&);
+ void removeFirst();
+ void remove(iterator&);
+ void remove(const_iterator&);
+
+ void clear();
+
+ template<typename Predicate>
+ iterator findIf(Predicate&);
+
+ private:
+ friend class DequeIteratorBase<T>;
+
+ typedef VectorBuffer<T, 0> Buffer;
+ typedef VectorTypeOperations<T> TypeOperations;
+ typedef DequeIteratorBase<T> IteratorBase;
+
+ void remove(size_t position);
+ void invalidateIterators();
+ void destroyAll();
+ void checkValidity() const;
+ void checkIndexValidity(size_t) const;
+ void expandCapacityIfNeeded();
+ void expandCapacity();
+
+ size_t m_start;
+ size_t m_end;
+ Buffer m_buffer;
+#ifndef NDEBUG
+ mutable IteratorBase* m_iterators;
+#endif
+ };
+
+ template<typename T>
+ class DequeIteratorBase {
+ private:
+ typedef DequeIteratorBase<T> Base;
+
+ protected:
+ DequeIteratorBase();
+ DequeIteratorBase(const Deque<T>*, size_t);
+ DequeIteratorBase(const Base&);
+ Base& operator=(const Base&);
+ ~DequeIteratorBase();
+
+ void assign(const Base& other) { *this = other; }
+
+ void increment();
+ void decrement();
+
+ T* before() const;
+ T* after() const;
+
+ bool isEqual(const Base&) const;
+
+ private:
+ void addToIteratorsList();
+ void removeFromIteratorsList();
+ void checkValidity() const;
+ void checkValidity(const Base&) const;
+
+ Deque<T>* m_deque;
+ size_t m_index;
+
+ friend class Deque<T>;
+
+#ifndef NDEBUG
+ mutable DequeIteratorBase* m_next;
+ mutable DequeIteratorBase* m_previous;
+#endif
+ };
+
+ template<typename T>
+ class DequeIterator : public DequeIteratorBase<T> {
+ private:
+ typedef DequeIteratorBase<T> Base;
+ typedef DequeIterator<T> Iterator;
+
+ public:
+ DequeIterator(Deque<T>* deque, size_t index) : Base(deque, index) { }
+
+ DequeIterator(const Iterator& other) : Base(other) { }
+ DequeIterator& operator=(const Iterator& other) { Base::assign(other); return *this; }
+
+ T& operator*() const { return *Base::after(); }
+ T* operator->() const { return Base::after(); }
+
+ bool operator==(const Iterator& other) const { return Base::isEqual(other); }
+ bool operator!=(const Iterator& other) const { return !Base::isEqual(other); }
+
+ Iterator& operator++() { Base::increment(); return *this; }
+ // postfix ++ intentionally omitted
+ Iterator& operator--() { Base::decrement(); return *this; }
+ // postfix -- intentionally omitted
+ };
+
+ template<typename T>
+ class DequeConstIterator : public DequeIteratorBase<T> {
+ private:
+ typedef DequeIteratorBase<T> Base;
+ typedef DequeConstIterator<T> Iterator;
+ typedef DequeIterator<T> NonConstIterator;
+
+ public:
+ DequeConstIterator(const Deque<T>* deque, size_t index) : Base(deque, index) { }
+
+ DequeConstIterator(const Iterator& other) : Base(other) { }
+ DequeConstIterator(const NonConstIterator& other) : Base(other) { }
+ DequeConstIterator& operator=(const Iterator& other) { Base::assign(other); return *this; }
+ DequeConstIterator& operator=(const NonConstIterator& other) { Base::assign(other); return *this; }
+
+ const T& operator*() const { return *Base::after(); }
+ const T* operator->() const { return Base::after(); }
+
+ bool operator==(const Iterator& other) const { return Base::isEqual(other); }
+ bool operator!=(const Iterator& other) const { return !Base::isEqual(other); }
+
+ Iterator& operator++() { Base::increment(); return *this; }
+ // postfix ++ intentionally omitted
+ Iterator& operator--() { Base::decrement(); return *this; }
+ // postfix -- intentionally omitted
+ };
+
+ template<typename T>
+ class DequeReverseIterator : public DequeIteratorBase<T> {
+ private:
+ typedef DequeIteratorBase<T> Base;
+ typedef DequeReverseIterator<T> Iterator;
+
+ public:
+ DequeReverseIterator(const Deque<T>* deque, size_t index) : Base(deque, index) { }
+
+ DequeReverseIterator(const Iterator& other) : Base(other) { }
+ DequeReverseIterator& operator=(const Iterator& other) { Base::assign(other); return *this; }
+
+ T& operator*() const { return *Base::before(); }
+ T* operator->() const { return Base::before(); }
+
+ bool operator==(const Iterator& other) const { return Base::isEqual(other); }
+ bool operator!=(const Iterator& other) const { return !Base::isEqual(other); }
+
+ Iterator& operator++() { Base::decrement(); return *this; }
+ // postfix ++ intentionally omitted
+ Iterator& operator--() { Base::increment(); return *this; }
+ // postfix -- intentionally omitted
+ };
+
+ template<typename T>
+ class DequeConstReverseIterator : public DequeIteratorBase<T> {
+ private:
+ typedef DequeIteratorBase<T> Base;
+ typedef DequeConstReverseIterator<T> Iterator;
+ typedef DequeReverseIterator<T> NonConstIterator;
+
+ public:
+ DequeConstReverseIterator(const Deque<T>* deque, size_t index) : Base(deque, index) { }
+
+ DequeConstReverseIterator(const Iterator& other) : Base(other) { }
+ DequeConstReverseIterator(const NonConstIterator& other) : Base(other) { }
+ DequeConstReverseIterator& operator=(const Iterator& other) { Base::assign(other); return *this; }
+ DequeConstReverseIterator& operator=(const NonConstIterator& other) { Base::assign(other); return *this; }
+
+ const T& operator*() const { return *Base::before(); }
+ const T* operator->() const { return Base::before(); }
+
+ bool operator==(const Iterator& other) const { return Base::isEqual(other); }
+ bool operator!=(const Iterator& other) const { return !Base::isEqual(other); }
+
+ Iterator& operator++() { Base::decrement(); return *this; }
+ // postfix ++ intentionally omitted
+ Iterator& operator--() { Base::increment(); return *this; }
+ // postfix -- intentionally omitted
+ };
+
+#ifdef NDEBUG
+ template<typename T> inline void Deque<T>::checkValidity() const { }
+ template<typename T> inline void Deque<T>::checkIndexValidity(size_t) const { }
+ template<typename T> inline void Deque<T>::invalidateIterators() { }
+#else
+ template<typename T>
+ void Deque<T>::checkValidity() const
+ {
+ if (!m_buffer.capacity()) {
+ ASSERT(!m_start);
+ ASSERT(!m_end);
+ } else {
+ ASSERT(m_start < m_buffer.capacity());
+ ASSERT(m_end < m_buffer.capacity());
+ }
+ }
+
+ template<typename T>
+ void Deque<T>::checkIndexValidity(size_t index) const
+ {
+ ASSERT(index <= m_buffer.capacity());
+ if (m_start <= m_end) {
+ ASSERT(index >= m_start);
+ ASSERT(index <= m_end);
+ } else {
+ ASSERT(index >= m_start || index <= m_end);
+ }
+ }
+
+ template<typename T>
+ void Deque<T>::invalidateIterators()
+ {
+ IteratorBase* next;
+ for (IteratorBase* p = m_iterators; p; p = next) {
+ next = p->m_next;
+ p->m_deque = 0;
+ p->m_next = 0;
+ p->m_previous = 0;
+ }
+ m_iterators = 0;
+ }
+#endif
+
+ template<typename T>
+ inline Deque<T>::Deque()
+ : m_start(0)
+ , m_end(0)
+#ifndef NDEBUG
+ , m_iterators(0)
+#endif
+ {
+ checkValidity();
+ }
+
+ template<typename T>
+ inline Deque<T>::Deque(const Deque<T>& other)
+ : m_start(other.m_start)
+ , m_end(other.m_end)
+ , m_buffer(other.m_buffer.capacity())
+#ifndef NDEBUG
+ , m_iterators(0)
+#endif
+ {
+ const T* otherBuffer = other.m_buffer.buffer();
+ if (m_start <= m_end)
+ TypeOperations::uninitializedCopy(otherBuffer + m_start, otherBuffer + m_end, m_buffer.buffer() + m_start);
+ else {
+ TypeOperations::uninitializedCopy(otherBuffer, otherBuffer + m_end, m_buffer.buffer());
+ TypeOperations::uninitializedCopy(otherBuffer + m_start, otherBuffer + m_buffer.capacity(), m_buffer.buffer() + m_start);
+ }
+ }
+
+ template<typename T>
+ void deleteAllValues(const Deque<T>& collection)
+ {
+ typedef typename Deque<T>::const_iterator iterator;
+ iterator end = collection.end();
+ for (iterator it = collection.begin(); it != end; ++it)
+ delete *it;
+ }
+
+ template<typename T>
+ inline Deque<T>& Deque<T>::operator=(const Deque<T>& other)
+ {
+ Deque<T> copy(other);
+ swap(copy);
+ return *this;
+ }
+
+ template<typename T>
+ inline void Deque<T>::destroyAll()
+ {
+ if (m_start <= m_end)
+ TypeOperations::destruct(m_buffer.buffer() + m_start, m_buffer.buffer() + m_end);
+ else {
+ TypeOperations::destruct(m_buffer.buffer(), m_buffer.buffer() + m_end);
+ TypeOperations::destruct(m_buffer.buffer() + m_start, m_buffer.buffer() + m_buffer.capacity());
+ }
+ }
+
+ template<typename T>
+ inline Deque<T>::~Deque()
+ {
+ checkValidity();
+ invalidateIterators();
+ destroyAll();
+ }
+
+ template<typename T>
+ inline void Deque<T>::swap(Deque<T>& other)
+ {
+ checkValidity();
+ other.checkValidity();
+ invalidateIterators();
+ std::swap(m_start, other.m_start);
+ std::swap(m_end, other.m_end);
+ m_buffer.swap(other.m_buffer);
+ checkValidity();
+ other.checkValidity();
+ }
+
+ template<typename T>
+ inline void Deque<T>::clear()
+ {
+ checkValidity();
+ invalidateIterators();
+ destroyAll();
+ m_start = 0;
+ m_end = 0;
+ checkValidity();
+ }
+
+ template<typename T>
+ template<typename Predicate>
+ inline DequeIterator<T> Deque<T>::findIf(Predicate& predicate)
+ {
+ iterator end_iterator = end();
+ for (iterator it = begin(); it != end_iterator; ++it) {
+ if (predicate(*it))
+ return it;
+ }
+ return end_iterator;
+ }
+
+ template<typename T>
+ inline void Deque<T>::expandCapacityIfNeeded()
+ {
+ if (m_start) {
+ if (m_end + 1 != m_start)
+ return;
+ } else if (m_end) {
+ if (m_end != m_buffer.capacity() - 1)
+ return;
+ } else if (m_buffer.capacity())
+ return;
+
+ expandCapacity();
+ }
+
+ template<typename T>
+ void Deque<T>::expandCapacity()
+ {
+ checkValidity();
+ size_t oldCapacity = m_buffer.capacity();
+ size_t newCapacity = max(static_cast<size_t>(16), oldCapacity + oldCapacity / 4 + 1);
+ T* oldBuffer = m_buffer.buffer();
+ m_buffer.allocateBuffer(newCapacity);
+ if (m_start <= m_end)
+ TypeOperations::move(oldBuffer + m_start, oldBuffer + m_end, m_buffer.buffer() + m_start);
+ else {
+ TypeOperations::move(oldBuffer, oldBuffer + m_end, m_buffer.buffer());
+ size_t newStart = newCapacity - (oldCapacity - m_start);
+ TypeOperations::move(oldBuffer + m_start, oldBuffer + oldCapacity, m_buffer.buffer() + newStart);
+ m_start = newStart;
+ }
+ m_buffer.deallocateBuffer(oldBuffer);
+ checkValidity();
+ }
+
+ template<typename T> template<typename U>
+ inline void Deque<T>::append(const U& value)
+ {
+ checkValidity();
+ expandCapacityIfNeeded();
+ new (&m_buffer.buffer()[m_end]) T(value);
+ if (m_end == m_buffer.capacity() - 1)
+ m_end = 0;
+ else
+ ++m_end;
+ checkValidity();
+ }
+
+ template<typename T> template<typename U>
+ inline void Deque<T>::prepend(const U& value)
+ {
+ checkValidity();
+ expandCapacityIfNeeded();
+ if (!m_start)
+ m_start = m_buffer.capacity() - 1;
+ else
+ --m_start;
+ new (&m_buffer.buffer()[m_start]) T(value);
+ checkValidity();
+ }
+
+ template<typename T>
+ inline void Deque<T>::removeFirst()
+ {
+ checkValidity();
+ invalidateIterators();
+ ASSERT(!isEmpty());
+ TypeOperations::destruct(&m_buffer.buffer()[m_start], &m_buffer.buffer()[m_start + 1]);
+ if (m_start == m_buffer.capacity() - 1)
+ m_start = 0;
+ else
+ ++m_start;
+ checkValidity();
+ }
+
+ template<typename T>
+ inline void Deque<T>::remove(iterator& it)
+ {
+ it.checkValidity();
+ remove(it.m_index);
+ }
+
+ template<typename T>
+ inline void Deque<T>::remove(const_iterator& it)
+ {
+ it.checkValidity();
+ remove(it.m_index);
+ }
+
+ template<typename T>
+ inline void Deque<T>::remove(size_t position)
+ {
+ if (position == m_end)
+ return;
+
+ checkValidity();
+ invalidateIterators();
+
+ T* buffer = m_buffer.buffer();
+ TypeOperations::destruct(&buffer[position], &buffer[position + 1]);
+
+ // Find which segment of the circular buffer contained the remove element, and only move elements in that part.
+ if (position >= m_start) {
+ TypeOperations::moveOverlapping(buffer + m_start, buffer + position, buffer + m_start + 1);
+ m_start = (m_start + 1) % m_buffer.capacity();
+ } else {
+ TypeOperations::moveOverlapping(buffer + position + 1, buffer + m_end, buffer + position);
+ m_end = (m_end - 1 + m_buffer.capacity()) % m_buffer.capacity();
+ }
+ checkValidity();
+ }
+
+#ifdef NDEBUG
+ template<typename T> inline void DequeIteratorBase<T>::checkValidity() const { }
+ template<typename T> inline void DequeIteratorBase<T>::checkValidity(const DequeIteratorBase<T>&) const { }
+ template<typename T> inline void DequeIteratorBase<T>::addToIteratorsList() { }
+ template<typename T> inline void DequeIteratorBase<T>::removeFromIteratorsList() { }
+#else
+ template<typename T>
+ void DequeIteratorBase<T>::checkValidity() const
+ {
+ ASSERT(m_deque);
+ m_deque->checkIndexValidity(m_index);
+ }
+
+ template<typename T>
+ void DequeIteratorBase<T>::checkValidity(const Base& other) const
+ {
+ checkValidity();
+ other.checkValidity();
+ ASSERT(m_deque == other.m_deque);
+ }
+
+ template<typename T>
+ void DequeIteratorBase<T>::addToIteratorsList()
+ {
+ if (!m_deque)
+ m_next = 0;
+ else {
+ m_next = m_deque->m_iterators;
+ m_deque->m_iterators = this;
+ if (m_next)
+ m_next->m_previous = this;
+ }
+ m_previous = 0;
+ }
+
+ template<typename T>
+ void DequeIteratorBase<T>::removeFromIteratorsList()
+ {
+ if (!m_deque) {
+ ASSERT(!m_next);
+ ASSERT(!m_previous);
+ } else {
+ if (m_next) {
+ ASSERT(m_next->m_previous == this);
+ m_next->m_previous = m_previous;
+ }
+ if (m_previous) {
+ ASSERT(m_deque->m_iterators != this);
+ ASSERT(m_previous->m_next == this);
+ m_previous->m_next = m_next;
+ } else {
+ ASSERT(m_deque->m_iterators == this);
+ m_deque->m_iterators = m_next;
+ }
+ }
+ m_next = 0;
+ m_previous = 0;
+ }
+#endif
+
+ template<typename T>
+ inline DequeIteratorBase<T>::DequeIteratorBase()
+ : m_deque(0)
+ {
+ }
+
+ template<typename T>
+ inline DequeIteratorBase<T>::DequeIteratorBase(const Deque<T>* deque, size_t index)
+ : m_deque(const_cast<Deque<T>*>(deque))
+ , m_index(index)
+ {
+ addToIteratorsList();
+ checkValidity();
+ }
+
+ template<typename T>
+ inline DequeIteratorBase<T>::DequeIteratorBase(const Base& other)
+ : m_deque(other.m_deque)
+ , m_index(other.m_index)
+ {
+ addToIteratorsList();
+ checkValidity();
+ }
+
+ template<typename T>
+ inline DequeIteratorBase<T>& DequeIteratorBase<T>::operator=(const Base& other)
+ {
+ checkValidity();
+ other.checkValidity();
+ removeFromIteratorsList();
+
+ m_deque = other.m_deque;
+ m_index = other.m_index;
+ addToIteratorsList();
+ checkValidity();
+ return *this;
+ }
+
+ template<typename T>
+ inline DequeIteratorBase<T>::~DequeIteratorBase()
+ {
+#ifndef NDEBUG
+ removeFromIteratorsList();
+ m_deque = 0;
+#endif
+ }
+
+ template<typename T>
+ inline bool DequeIteratorBase<T>::isEqual(const Base& other) const
+ {
+ checkValidity(other);
+ return m_index == other.m_index;
+ }
+
+ template<typename T>
+ inline void DequeIteratorBase<T>::increment()
+ {
+ checkValidity();
+ ASSERT(m_index != m_deque->m_end);
+ ASSERT(m_deque->m_buffer.capacity());
+ if (m_index == m_deque->m_buffer.capacity() - 1)
+ m_index = 0;
+ else
+ ++m_index;
+ checkValidity();
+ }
+
+ template<typename T>
+ inline void DequeIteratorBase<T>::decrement()
+ {
+ checkValidity();
+ ASSERT(m_index != m_deque->m_start);
+ ASSERT(m_deque->m_buffer.capacity());
+ if (!m_index)
+ m_index = m_deque->m_buffer.capacity() - 1;
+ else
+ --m_index;
+ checkValidity();
+ }
+
+ template<typename T>
+ inline T* DequeIteratorBase<T>::after() const
+ {
+ checkValidity();
+ ASSERT(m_index != m_deque->m_end);
+ return &m_deque->m_buffer.buffer()[m_index];
+ }
+
+ template<typename T>
+ inline T* DequeIteratorBase<T>::before() const
+ {
+ checkValidity();
+ ASSERT(m_index != m_deque->m_start);
+ if (!m_index)
+ return &m_deque->m_buffer.buffer()[m_deque->m_buffer.capacity() - 1];
+ return &m_deque->m_buffer.buffer()[m_index - 1];
+ }
+
+} // namespace WTF
+
+using WTF::Deque;
+
+#endif // WTF_Deque_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/DisallowCType.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/DisallowCType.h
new file mode 100644
index 0000000..5dccb0e
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/DisallowCType.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2007 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_DisallowCType_h
+#define WTF_DisallowCType_h
+
+// The behavior of many of the functions in the <ctype.h> header is dependent
+// on the current locale. But almost all uses of these functions are for
+// locale-independent, ASCII-specific purposes. In WebKit code we use our own
+// ASCII-specific functions instead. This header makes sure we get a compile-time
+// error if we use one of the <ctype.h> functions by accident.
+
+#include <ctype.h>
+
+#undef isalnum
+#undef isalpha
+#undef isascii
+#undef isblank
+#undef iscntrl
+#undef isdigit
+#undef isgraph
+#undef islower
+#undef isprint
+#undef ispunct
+#undef isspace
+#undef isupper
+#undef isxdigit
+#undef toascii
+#undef tolower
+#undef toupper
+
+#define isalnum WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h
+#define isalpha WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h
+#define isascii WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h
+#define isblank WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h
+#define iscntrl WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h
+#define isdigit WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h
+#define isgraph WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h
+#define islower WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h
+#define isprint WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h
+#define ispunct WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h
+#define isspace WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h
+#define isupper WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h
+#define isxdigit WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h
+#define toascii WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h
+#define tolower WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h
+#define toupper WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h
+
+#endif
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/FastAllocBase.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/FastAllocBase.h
new file mode 100644
index 0000000..9fcbbc1
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/FastAllocBase.h
@@ -0,0 +1,403 @@
+/*
+ * Copyright (C) 2008, 2009 Paul Pedriana <ppedriana@ea.com>. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef FastAllocBase_h
+#define FastAllocBase_h
+
+// Provides customizable overrides of fastMalloc/fastFree and operator new/delete
+//
+// Provided functionality:
+// namespace WTF {
+// class FastAllocBase;
+//
+// T* fastNew<T>();
+// T* fastNew<T>(arg);
+// T* fastNew<T>(arg, arg);
+// T* fastNewArray<T>(count);
+// void fastDelete(T* p);
+// void fastDeleteArray(T* p);
+// void fastNonNullDelete(T* p);
+// void fastNonNullDeleteArray(T* p);
+// }
+//
+// FastDelete assumes that the underlying
+//
+// Example usage:
+// class Widget : public FastAllocBase { ... };
+//
+// char* charPtr = fastNew<char>();
+// fastDelete(charPtr);
+//
+// char* charArrayPtr = fastNewArray<char>(37);
+// fastDeleteArray(charArrayPtr);
+//
+// void** voidPtrPtr = fastNew<void*>();
+// fastDelete(voidPtrPtr);
+//
+// void** voidPtrArrayPtr = fastNewArray<void*>(37);
+// fastDeleteArray(voidPtrArrayPtr);
+//
+// POD* podPtr = fastNew<POD>();
+// fastDelete(podPtr);
+//
+// POD* podArrayPtr = fastNewArray<POD>(37);
+// fastDeleteArray(podArrayPtr);
+//
+// Object* objectPtr = fastNew<Object>();
+// fastDelete(objectPtr);
+//
+// Object* objectArrayPtr = fastNewArray<Object>(37);
+// fastDeleteArray(objectArrayPtr);
+//
+
+#include <new>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include "Assertions.h"
+#include "FastMalloc.h"
+#include "TypeTraits.h"
+
+namespace WTF {
+
+ class FastAllocBase {
+ public:
+ // Placement operator new.
+ void* operator new(size_t, void* p) { return p; }
+ void* operator new[](size_t, void* p) { return p; }
+
+ void* operator new(size_t size)
+ {
+ void* p = fastMalloc(size);
+ fastMallocMatchValidateMalloc(p, Internal::AllocTypeClassNew);
+ return p;
+ }
+
+ void operator delete(void* p)
+ {
+ fastMallocMatchValidateFree(p, Internal::AllocTypeClassNew);
+ fastFree(p);
+ }
+
+ void* operator new[](size_t size)
+ {
+ void* p = fastMalloc(size);
+ fastMallocMatchValidateMalloc(p, Internal::AllocTypeClassNewArray);
+ return p;
+ }
+
+ void operator delete[](void* p)
+ {
+ fastMallocMatchValidateFree(p, Internal::AllocTypeClassNewArray);
+ fastFree(p);
+ }
+ };
+
+ // fastNew / fastDelete
+
+ template <typename T>
+ inline T* fastNew()
+ {
+ void* p = fastMalloc(sizeof(T));
+
+ if (!p)
+ return 0;
+
+ fastMallocMatchValidateMalloc(p, Internal::AllocTypeFastNew);
+ return ::new(p) T;
+ }
+
+ template <typename T, typename Arg1>
+ inline T* fastNew(Arg1 arg1)
+ {
+ void* p = fastMalloc(sizeof(T));
+
+ if (!p)
+ return 0;
+
+ fastMallocMatchValidateMalloc(p, Internal::AllocTypeFastNew);
+ return ::new(p) T(arg1);
+ }
+
+ template <typename T, typename Arg1, typename Arg2>
+ inline T* fastNew(Arg1 arg1, Arg2 arg2)
+ {
+ void* p = fastMalloc(sizeof(T));
+
+ if (!p)
+ return 0;
+
+ fastMallocMatchValidateMalloc(p, Internal::AllocTypeFastNew);
+ return ::new(p) T(arg1, arg2);
+ }
+
+ template <typename T, typename Arg1, typename Arg2, typename Arg3>
+ inline T* fastNew(Arg1 arg1, Arg2 arg2, Arg3 arg3)
+ {
+ void* p = fastMalloc(sizeof(T));
+
+ if (!p)
+ return 0;
+
+ fastMallocMatchValidateMalloc(p, Internal::AllocTypeFastNew);
+ return ::new(p) T(arg1, arg2, arg3);
+ }
+
+ template <typename T, typename Arg1, typename Arg2, typename Arg3, typename Arg4>
+ inline T* fastNew(Arg1 arg1, Arg2 arg2, Arg3 arg3, Arg4 arg4)
+ {
+ void* p = fastMalloc(sizeof(T));
+
+ if (!p)
+ return 0;
+
+ fastMallocMatchValidateMalloc(p, Internal::AllocTypeFastNew);
+ return ::new(p) T(arg1, arg2, arg3, arg4);
+ }
+
+ template <typename T, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5>
+ inline T* fastNew(Arg1 arg1, Arg2 arg2, Arg3 arg3, Arg4 arg4, Arg5 arg5)
+ {
+ void* p = fastMalloc(sizeof(T));
+
+ if (!p)
+ return 0;
+
+ fastMallocMatchValidateMalloc(p, Internal::AllocTypeFastNew);
+ return ::new(p) T(arg1, arg2, arg3, arg4, arg5);
+ }
+
+ namespace Internal {
+
+ // We define a union of pointer to an integer and pointer to T.
+ // When non-POD arrays are allocated we add a few leading bytes to tell what
+ // the size of the array is. We return to the user the pointer to T.
+ // The way to think of it is as if we allocate a struct like so:
+ // struct Array {
+ // AllocAlignmentInteger m_size;
+ // T m_T[array count];
+ // };
+
+ template <typename T>
+ union ArraySize {
+ AllocAlignmentInteger* size;
+ T* t;
+ };
+
+ // This is a support template for fastNewArray.
+ // This handles the case wherein T has a trivial ctor and a trivial dtor.
+ template <typename T, bool trivialCtor, bool trivialDtor>
+ struct NewArrayImpl {
+ static T* fastNewArray(size_t count)
+ {
+ T* p = static_cast<T*>(fastMalloc(sizeof(T) * count));
+ fastMallocMatchValidateMalloc(p, Internal::AllocTypeFastNewArray);
+ return p;
+ }
+ };
+
+ // This is a support template for fastNewArray.
+ // This handles the case wherein T has a non-trivial ctor and a trivial dtor.
+ template <typename T>
+ struct NewArrayImpl<T, false, true> {
+ static T* fastNewArray(size_t count)
+ {
+ T* p = static_cast<T*>(fastMalloc(sizeof(T) * count));
+
+ if (!p)
+ return 0;
+
+ fastMallocMatchValidateMalloc(p, Internal::AllocTypeFastNewArray);
+
+ for (T* pObject = p, *pObjectEnd = pObject + count; pObject != pObjectEnd; ++pObject)
+ ::new(pObject) T;
+
+ return p;
+ }
+ };
+
+ // This is a support template for fastNewArray.
+ // This handles the case wherein T has a trivial ctor and a non-trivial dtor.
+ template <typename T>
+ struct NewArrayImpl<T, true, false> {
+ static T* fastNewArray(size_t count)
+ {
+ void* p = fastMalloc(sizeof(AllocAlignmentInteger) + (sizeof(T) * count));
+ ArraySize<T> a = { static_cast<AllocAlignmentInteger*>(p) };
+
+ if (!p)
+ return 0;
+
+ fastMallocMatchValidateMalloc(p, Internal::AllocTypeFastNewArray);
+ *a.size++ = count;
+ // No need to construct the objects in this case.
+
+ return a.t;
+ }
+ };
+
+ // This is a support template for fastNewArray.
+ // This handles the case wherein T has a non-trivial ctor and a non-trivial dtor.
+ template <typename T>
+ struct NewArrayImpl<T, false, false> {
+ static T* fastNewArray(size_t count)
+ {
+ void* p = fastMalloc(sizeof(AllocAlignmentInteger) + (sizeof(T) * count));
+ ArraySize<T> a = { static_cast<AllocAlignmentInteger*>(p) };
+
+ if (!p)
+ return 0;
+
+ fastMallocMatchValidateMalloc(p, Internal::AllocTypeFastNewArray);
+ *a.size++ = count;
+
+ for (T* pT = a.t, *pTEnd = pT + count; pT != pTEnd; ++pT)
+ ::new(pT) T;
+
+ return a.t;
+ }
+ };
+ } // namespace Internal
+
+ template <typename T>
+ inline T* fastNewArray(size_t count)
+ {
+ return Internal::NewArrayImpl<T, WTF::HasTrivialConstructor<T>::value, WTF::HasTrivialDestructor<T>::value>::fastNewArray(count);
+ }
+
+ template <typename T>
+ inline void fastDelete(T* p)
+ {
+ if (!p)
+ return;
+
+ fastMallocMatchValidateFree(p, Internal::AllocTypeFastNew);
+ p->~T();
+ fastFree(p);
+ }
+
+ namespace Internal {
+ // This is a support template for fastDeleteArray.
+ // This handles the case wherein T has a trivial dtor.
+ template <typename T, bool trivialDtor>
+ struct DeleteArrayImpl {
+ static void fastDeleteArray(void* p)
+ {
+ // No need to destruct the objects in this case.
+ // We expect that fastFree checks for null.
+ fastMallocMatchValidateFree(p, Internal::AllocTypeFastNewArray);
+ fastFree(p);
+ }
+ };
+
+ // This is a support template for fastDeleteArray.
+ // This handles the case wherein T has a non-trivial dtor.
+ template <typename T>
+ struct DeleteArrayImpl<T, false> {
+ static void fastDeleteArray(T* p)
+ {
+ if (!p)
+ return;
+
+ ArraySize<T> a;
+ a.t = p;
+ a.size--; // Decrement size pointer
+
+ T* pEnd = p + *a.size;
+ while (pEnd-- != p)
+ pEnd->~T();
+
+ fastMallocMatchValidateFree(a.size, Internal::AllocTypeFastNewArray);
+ fastFree(a.size);
+ }
+ };
+
+ } // namespace Internal
+
+ template <typename T>
+ void fastDeleteArray(T* p)
+ {
+ Internal::DeleteArrayImpl<T, WTF::HasTrivialDestructor<T>::value>::fastDeleteArray(p);
+ }
+
+
+ template <typename T>
+ inline void fastNonNullDelete(T* p)
+ {
+ fastMallocMatchValidateFree(p, Internal::AllocTypeFastNew);
+ p->~T();
+ fastFree(p);
+ }
+
+ namespace Internal {
+ // This is a support template for fastDeleteArray.
+ // This handles the case wherein T has a trivial dtor.
+ template <typename T, bool trivialDtor>
+ struct NonNullDeleteArrayImpl {
+ static void fastNonNullDeleteArray(void* p)
+ {
+ fastMallocMatchValidateFree(p, Internal::AllocTypeFastNewArray);
+ // No need to destruct the objects in this case.
+ fastFree(p);
+ }
+ };
+
+ // This is a support template for fastDeleteArray.
+ // This handles the case wherein T has a non-trivial dtor.
+ template <typename T>
+ struct NonNullDeleteArrayImpl<T, false> {
+ static void fastNonNullDeleteArray(T* p)
+ {
+ ArraySize<T> a;
+ a.t = p;
+ a.size--;
+
+ T* pEnd = p + *a.size;
+ while (pEnd-- != p)
+ pEnd->~T();
+
+ fastMallocMatchValidateFree(a.size, Internal::AllocTypeFastNewArray);
+ fastFree(a.size);
+ }
+ };
+
+ } // namespace Internal
+
+ template <typename T>
+ void fastNonNullDeleteArray(T* p)
+ {
+ Internal::NonNullDeleteArrayImpl<T, WTF::HasTrivialDestructor<T>::value>::fastNonNullDeleteArray(p);
+ }
+
+
+} // namespace WTF
+
+// Using WTF::FastAllocBase to avoid using FastAllocBase's explicit qualification by WTF::.
+using WTF::FastAllocBase;
+
+#endif // FastAllocBase_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/FastMalloc.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/FastMalloc.cpp
new file mode 100644
index 0000000..c855a41
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/FastMalloc.cpp
@@ -0,0 +1,4148 @@
+// Copyright (c) 2005, 2007, Google Inc.
+// All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// ---
+// Author: Sanjay Ghemawat <opensource@google.com>
+//
+// A malloc that uses a per-thread cache to satisfy small malloc requests.
+// (The time for malloc/free of a small object drops from 300 ns to 50 ns.)
+//
+// See doc/tcmalloc.html for a high-level
+// description of how this malloc works.
+//
+// SYNCHRONIZATION
+// 1. The thread-specific lists are accessed without acquiring any locks.
+// This is safe because each such list is only accessed by one thread.
+// 2. We have a lock per central free-list, and hold it while manipulating
+// the central free list for a particular size.
+// 3. The central page allocator is protected by "pageheap_lock".
+// 4. The pagemap (which maps from page-number to descriptor),
+// can be read without holding any locks, and written while holding
+// the "pageheap_lock".
+// 5. To improve performance, a subset of the information one can get
+// from the pagemap is cached in a data structure, pagemap_cache_,
+// that atomically reads and writes its entries. This cache can be
+// read and written without locking.
+//
+// This multi-threaded access to the pagemap is safe for fairly
+// subtle reasons. We basically assume that when an object X is
+// allocated by thread A and deallocated by thread B, there must
+// have been appropriate synchronization in the handoff of object
+// X from thread A to thread B. The same logic applies to pagemap_cache_.
+//
+// THE PAGEID-TO-SIZECLASS CACHE
+// Hot PageID-to-sizeclass mappings are held by pagemap_cache_. If this cache
+// returns 0 for a particular PageID then that means "no information," not that
+// the sizeclass is 0. The cache may have stale information for pages that do
+// not hold the beginning of any free()'able object. Staleness is eliminated
+// in Populate() for pages with sizeclass > 0 objects, and in do_malloc() and
+// do_memalign() for all other relevant pages.
+//
+// TODO: Bias reclamation to larger addresses
+// TODO: implement mallinfo/mallopt
+// TODO: Better testing
+//
+// 9/28/2003 (new page-level allocator replaces ptmalloc2):
+// * malloc/free of small objects goes from ~300 ns to ~50 ns.
+// * allocation of a reasonably complicated struct
+// goes from about 1100 ns to about 300 ns.
+
+#include "config.h"
+#include "FastMalloc.h"
+
+#include "Assertions.h"
+#include <limits>
+#if ENABLE(JSC_MULTIPLE_THREADS)
+#include <pthread.h>
+#endif
+
+#ifndef NO_TCMALLOC_SAMPLES
+#ifdef WTF_CHANGES
+#define NO_TCMALLOC_SAMPLES
+#endif
+#endif
+
+#if !defined(USE_SYSTEM_MALLOC) && defined(NDEBUG)
+#define FORCE_SYSTEM_MALLOC 0
+#else
+#define FORCE_SYSTEM_MALLOC 1
+#endif
+
+#ifndef NDEBUG
+namespace WTF {
+
+#if ENABLE(JSC_MULTIPLE_THREADS)
+static pthread_key_t isForbiddenKey;
+static pthread_once_t isForbiddenKeyOnce = PTHREAD_ONCE_INIT;
+static void initializeIsForbiddenKey()
+{
+ pthread_key_create(&isForbiddenKey, 0);
+}
+
+static bool isForbidden()
+{
+ pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey);
+ return !!pthread_getspecific(isForbiddenKey);
+}
+
+void fastMallocForbid()
+{
+ pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey);
+ pthread_setspecific(isForbiddenKey, &isForbiddenKey);
+}
+
+void fastMallocAllow()
+{
+ pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey);
+ pthread_setspecific(isForbiddenKey, 0);
+}
+
+#else
+
+static bool staticIsForbidden;
+static bool isForbidden()
+{
+ return staticIsForbidden;
+}
+
+void fastMallocForbid()
+{
+ staticIsForbidden = true;
+}
+
+void fastMallocAllow()
+{
+ staticIsForbidden = false;
+}
+#endif // ENABLE(JSC_MULTIPLE_THREADS)
+
+} // namespace WTF
+#endif // NDEBUG
+
+#include <string.h>
+
+namespace WTF {
+
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+
+namespace Internal {
+
+void fastMallocMatchFailed(void*)
+{
+ CRASH();
+}
+
+} // namespace Internal
+
+#endif
+
+void* fastZeroedMalloc(size_t n)
+{
+ void* result = fastMalloc(n);
+ memset(result, 0, n);
+ return result;
+}
+
+void* tryFastZeroedMalloc(size_t n)
+{
+ void* result = tryFastMalloc(n);
+ if (!result)
+ return 0;
+ memset(result, 0, n);
+ return result;
+}
+
+} // namespace WTF
+
+#if FORCE_SYSTEM_MALLOC
+
+#include <stdlib.h>
+#if !PLATFORM(WIN_OS)
+ #include <pthread.h>
+#else
+ #include "windows.h"
+#endif
+
+namespace WTF {
+
+void* tryFastMalloc(size_t n)
+{
+ ASSERT(!isForbidden());
+
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+ if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= n) // If overflow would occur...
+ return 0;
+
+ void* result = malloc(n + sizeof(AllocAlignmentInteger));
+ if (!result)
+ return 0;
+
+ *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
+ result = static_cast<AllocAlignmentInteger*>(result) + 1;
+
+ return result;
+#else
+ return malloc(n);
+#endif
+}
+
+void* fastMalloc(size_t n)
+{
+ ASSERT(!isForbidden());
+
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+ void* result = tryFastMalloc(n);
+#else
+ void* result = malloc(n);
+#endif
+
+ if (!result)
+ CRASH();
+ return result;
+}
+
+void* tryFastCalloc(size_t n_elements, size_t element_size)
+{
+ ASSERT(!isForbidden());
+
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+ size_t totalBytes = n_elements * element_size;
+ if (n_elements > 1 && element_size && (totalBytes / element_size) != n_elements || (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= totalBytes))
+ return 0;
+
+ totalBytes += sizeof(AllocAlignmentInteger);
+ void* result = malloc(totalBytes);
+ if (!result)
+ return 0;
+
+ memset(result, 0, totalBytes);
+ *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
+ result = static_cast<AllocAlignmentInteger*>(result) + 1;
+ return result;
+#else
+ return calloc(n_elements, element_size);
+#endif
+}
+
+void* fastCalloc(size_t n_elements, size_t element_size)
+{
+ ASSERT(!isForbidden());
+
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+ void* result = tryFastCalloc(n_elements, element_size);
+#else
+ void* result = calloc(n_elements, element_size);
+#endif
+
+ if (!result)
+ CRASH();
+ return result;
+}
+
+void fastFree(void* p)
+{
+ ASSERT(!isForbidden());
+
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+ if (!p)
+ return;
+
+ AllocAlignmentInteger* header = Internal::fastMallocMatchValidationValue(p);
+ if (*header != Internal::AllocTypeMalloc)
+ Internal::fastMallocMatchFailed(p);
+ free(header);
+#else
+ free(p);
+#endif
+}
+
+void* tryFastRealloc(void* p, size_t n)
+{
+ ASSERT(!isForbidden());
+
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+ if (p) {
+ if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= n) // If overflow would occur...
+ return 0;
+ AllocAlignmentInteger* header = Internal::fastMallocMatchValidationValue(p);
+ if (*header != Internal::AllocTypeMalloc)
+ Internal::fastMallocMatchFailed(p);
+ void* result = realloc(header, n + sizeof(AllocAlignmentInteger));
+ if (!result)
+ return 0;
+
+ // This should not be needed because the value is already there:
+ // *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
+ result = static_cast<AllocAlignmentInteger*>(result) + 1;
+ return result;
+ } else {
+ return fastMalloc(n);
+ }
+#else
+ return realloc(p, n);
+#endif
+}
+
+void* fastRealloc(void* p, size_t n)
+{
+ ASSERT(!isForbidden());
+
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+ void* result = tryFastRealloc(p, n);
+#else
+ void* result = realloc(p, n);
+#endif
+
+ if (!result)
+ CRASH();
+ return result;
+}
+
+void releaseFastMallocFreeMemory() { }
+
+FastMallocStatistics fastMallocStatistics()
+{
+ FastMallocStatistics statistics = { 0, 0, 0, 0 };
+ return statistics;
+}
+
+} // namespace WTF
+
+#if PLATFORM(DARWIN)
+// This symbol is present in the JavaScriptCore exports file even when FastMalloc is disabled.
+// It will never be used in this case, so it's type and value are less interesting than its presence.
+extern "C" const int jscore_fastmalloc_introspection = 0;
+#endif
+
+#else // FORCE_SYSTEM_MALLOC
+
+#if HAVE(STDINT_H)
+#include <stdint.h>
+#elif HAVE(INTTYPES_H)
+#include <inttypes.h>
+#else
+#include <sys/types.h>
+#endif
+
+#include "AlwaysInline.h"
+#include "Assertions.h"
+#include "TCPackedCache.h"
+#include "TCPageMap.h"
+#include "TCSpinLock.h"
+#include "TCSystemAlloc.h"
+#include <algorithm>
+#include <errno.h>
+#include <limits>
+#include <new>
+#include <pthread.h>
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdio.h>
+#if COMPILER(MSVC)
+#ifndef WIN32_LEAN_AND_MEAN
+#define WIN32_LEAN_AND_MEAN
+#endif
+#include <windows.h>
+#endif
+
+#if WTF_CHANGES
+
+#if PLATFORM(DARWIN)
+#include "MallocZoneSupport.h"
+#include <wtf/HashSet.h>
+#endif
+
+#ifndef PRIuS
+#define PRIuS "zu"
+#endif
+
+// Calling pthread_getspecific through a global function pointer is faster than a normal
+// call to the function on Mac OS X, and it's used in performance-critical code. So we
+// use a function pointer. But that's not necessarily faster on other platforms, and we had
+// problems with this technique on Windows, so we'll do this only on Mac OS X.
+#if PLATFORM(DARWIN)
+static void* (*pthread_getspecific_function_pointer)(pthread_key_t) = pthread_getspecific;
+#define pthread_getspecific(key) pthread_getspecific_function_pointer(key)
+#endif
+
+#define DEFINE_VARIABLE(type, name, value, meaning) \
+ namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \
+ type FLAGS_##name(value); \
+ char FLAGS_no##name; \
+ } \
+ using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name
+
+#define DEFINE_int64(name, value, meaning) \
+ DEFINE_VARIABLE(int64_t, name, value, meaning)
+
+#define DEFINE_double(name, value, meaning) \
+ DEFINE_VARIABLE(double, name, value, meaning)
+
+namespace WTF {
+
+#define malloc fastMalloc
+#define calloc fastCalloc
+#define free fastFree
+#define realloc fastRealloc
+
+#define MESSAGE LOG_ERROR
+#define CHECK_CONDITION ASSERT
+
+#if PLATFORM(DARWIN)
+class Span;
+class TCMalloc_Central_FreeListPadded;
+class TCMalloc_PageHeap;
+class TCMalloc_ThreadCache;
+template <typename T> class PageHeapAllocator;
+
+class FastMallocZone {
+public:
+ static void init();
+
+ static kern_return_t enumerate(task_t, void*, unsigned typeMmask, vm_address_t zoneAddress, memory_reader_t, vm_range_recorder_t);
+ static size_t goodSize(malloc_zone_t*, size_t size) { return size; }
+ static boolean_t check(malloc_zone_t*) { return true; }
+ static void print(malloc_zone_t*, boolean_t) { }
+ static void log(malloc_zone_t*, void*) { }
+ static void forceLock(malloc_zone_t*) { }
+ static void forceUnlock(malloc_zone_t*) { }
+ static void statistics(malloc_zone_t*, malloc_statistics_t* stats) { memset(stats, 0, sizeof(malloc_statistics_t)); }
+
+private:
+ FastMallocZone(TCMalloc_PageHeap*, TCMalloc_ThreadCache**, TCMalloc_Central_FreeListPadded*, PageHeapAllocator<Span>*, PageHeapAllocator<TCMalloc_ThreadCache>*);
+ static size_t size(malloc_zone_t*, const void*);
+ static void* zoneMalloc(malloc_zone_t*, size_t);
+ static void* zoneCalloc(malloc_zone_t*, size_t numItems, size_t size);
+ static void zoneFree(malloc_zone_t*, void*);
+ static void* zoneRealloc(malloc_zone_t*, void*, size_t);
+ static void* zoneValloc(malloc_zone_t*, size_t) { LOG_ERROR("valloc is not supported"); return 0; }
+ static void zoneDestroy(malloc_zone_t*) { }
+
+ malloc_zone_t m_zone;
+ TCMalloc_PageHeap* m_pageHeap;
+ TCMalloc_ThreadCache** m_threadHeaps;
+ TCMalloc_Central_FreeListPadded* m_centralCaches;
+ PageHeapAllocator<Span>* m_spanAllocator;
+ PageHeapAllocator<TCMalloc_ThreadCache>* m_pageHeapAllocator;
+};
+
+#endif
+
+#endif
+
+#ifndef WTF_CHANGES
+// This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
+// you're porting to a system where you really can't get a stacktrace.
+#ifdef NO_TCMALLOC_SAMPLES
+// We use #define so code compiles even if you #include stacktrace.h somehow.
+# define GetStackTrace(stack, depth, skip) (0)
+#else
+# include <google/stacktrace.h>
+#endif
+#endif
+
+// Even if we have support for thread-local storage in the compiler
+// and linker, the OS may not support it. We need to check that at
+// runtime. Right now, we have to keep a manual set of "bad" OSes.
+#if defined(HAVE_TLS)
+ static bool kernel_supports_tls = false; // be conservative
+ static inline bool KernelSupportsTLS() {
+ return kernel_supports_tls;
+ }
+# if !HAVE_DECL_UNAME // if too old for uname, probably too old for TLS
+ static void CheckIfKernelSupportsTLS() {
+ kernel_supports_tls = false;
+ }
+# else
+# include <sys/utsname.h> // DECL_UNAME checked for <sys/utsname.h> too
+ static void CheckIfKernelSupportsTLS() {
+ struct utsname buf;
+ if (uname(&buf) != 0) { // should be impossible
+ MESSAGE("uname failed assuming no TLS support (errno=%d)\n", errno);
+ kernel_supports_tls = false;
+ } else if (strcasecmp(buf.sysname, "linux") == 0) {
+ // The linux case: the first kernel to support TLS was 2.6.0
+ if (buf.release[0] < '2' && buf.release[1] == '.') // 0.x or 1.x
+ kernel_supports_tls = false;
+ else if (buf.release[0] == '2' && buf.release[1] == '.' &&
+ buf.release[2] >= '0' && buf.release[2] < '6' &&
+ buf.release[3] == '.') // 2.0 - 2.5
+ kernel_supports_tls = false;
+ else
+ kernel_supports_tls = true;
+ } else { // some other kernel, we'll be optimisitic
+ kernel_supports_tls = true;
+ }
+ // TODO(csilvers): VLOG(1) the tls status once we support RAW_VLOG
+ }
+# endif // HAVE_DECL_UNAME
+#endif // HAVE_TLS
+
+// __THROW is defined in glibc systems. It means, counter-intuitively,
+// "This function will never throw an exception." It's an optional
+// optimization tool, but we may need to use it to match glibc prototypes.
+#ifndef __THROW // I guess we're not on a glibc system
+# define __THROW // __THROW is just an optimization, so ok to make it ""
+#endif
+
+//-------------------------------------------------------------------
+// Configuration
+//-------------------------------------------------------------------
+
+// Not all possible combinations of the following parameters make
+// sense. In particular, if kMaxSize increases, you may have to
+// increase kNumClasses as well.
+static const size_t kPageShift = 12;
+static const size_t kPageSize = 1 << kPageShift;
+static const size_t kMaxSize = 8u * kPageSize;
+static const size_t kAlignShift = 3;
+static const size_t kAlignment = 1 << kAlignShift;
+static const size_t kNumClasses = 68;
+
+// Allocates a big block of memory for the pagemap once we reach more than
+// 128MB
+static const size_t kPageMapBigAllocationThreshold = 128 << 20;
+
+// Minimum number of pages to fetch from system at a time. Must be
+// significantly bigger than kBlockSize to amortize system-call
+// overhead, and also to reduce external fragementation. Also, we
+// should keep this value big because various incarnations of Linux
+// have small limits on the number of mmap() regions per
+// address-space.
+static const size_t kMinSystemAlloc = 1 << (20 - kPageShift);
+
+// Number of objects to move between a per-thread list and a central
+// list in one shot. We want this to be not too small so we can
+// amortize the lock overhead for accessing the central list. Making
+// it too big may temporarily cause unnecessary memory wastage in the
+// per-thread free list until the scavenger cleans up the list.
+static int num_objects_to_move[kNumClasses];
+
+// Maximum length we allow a per-thread free-list to have before we
+// move objects from it into the corresponding central free-list. We
+// want this big to avoid locking the central free-list too often. It
+// should not hurt to make this list somewhat big because the
+// scavenging code will shrink it down when its contents are not in use.
+static const int kMaxFreeListLength = 256;
+
+// Lower and upper bounds on the per-thread cache sizes
+static const size_t kMinThreadCacheSize = kMaxSize * 2;
+static const size_t kMaxThreadCacheSize = 2 << 20;
+
+// Default bound on the total amount of thread caches
+static const size_t kDefaultOverallThreadCacheSize = 16 << 20;
+
+// For all span-lengths < kMaxPages we keep an exact-size list.
+// REQUIRED: kMaxPages >= kMinSystemAlloc;
+static const size_t kMaxPages = kMinSystemAlloc;
+
+/* The smallest prime > 2^n */
+static int primes_list[] = {
+ // Small values might cause high rates of sampling
+ // and hence commented out.
+ // 2, 5, 11, 17, 37, 67, 131, 257,
+ // 521, 1031, 2053, 4099, 8209, 16411,
+ 32771, 65537, 131101, 262147, 524309, 1048583,
+ 2097169, 4194319, 8388617, 16777259, 33554467 };
+
+// Twice the approximate gap between sampling actions.
+// I.e., we take one sample approximately once every
+// tcmalloc_sample_parameter/2
+// bytes of allocation, i.e., ~ once every 128KB.
+// Must be a prime number.
+#ifdef NO_TCMALLOC_SAMPLES
+DEFINE_int64(tcmalloc_sample_parameter, 0,
+ "Unused: code is compiled with NO_TCMALLOC_SAMPLES");
+static size_t sample_period = 0;
+#else
+DEFINE_int64(tcmalloc_sample_parameter, 262147,
+ "Twice the approximate gap between sampling actions."
+ " Must be a prime number. Otherwise will be rounded up to a "
+ " larger prime number");
+static size_t sample_period = 262147;
+#endif
+
+// Protects sample_period above
+static SpinLock sample_period_lock = SPINLOCK_INITIALIZER;
+
+// Parameters for controlling how fast memory is returned to the OS.
+
+DEFINE_double(tcmalloc_release_rate, 1,
+ "Rate at which we release unused memory to the system. "
+ "Zero means we never release memory back to the system. "
+ "Increase this flag to return memory faster; decrease it "
+ "to return memory slower. Reasonable rates are in the "
+ "range [0,10]");
+
+//-------------------------------------------------------------------
+// Mapping from size to size_class and vice versa
+//-------------------------------------------------------------------
+
+// Sizes <= 1024 have an alignment >= 8. So for such sizes we have an
+// array indexed by ceil(size/8). Sizes > 1024 have an alignment >= 128.
+// So for these larger sizes we have an array indexed by ceil(size/128).
+//
+// We flatten both logical arrays into one physical array and use
+// arithmetic to compute an appropriate index. The constants used by
+// ClassIndex() were selected to make the flattening work.
+//
+// Examples:
+// Size Expression Index
+// -------------------------------------------------------
+// 0 (0 + 7) / 8 0
+// 1 (1 + 7) / 8 1
+// ...
+// 1024 (1024 + 7) / 8 128
+// 1025 (1025 + 127 + (120<<7)) / 128 129
+// ...
+// 32768 (32768 + 127 + (120<<7)) / 128 376
+static const size_t kMaxSmallSize = 1024;
+static const int shift_amount[2] = { 3, 7 }; // For divides by 8 or 128
+static const int add_amount[2] = { 7, 127 + (120 << 7) };
+static unsigned char class_array[377];
+
+// Compute index of the class_array[] entry for a given size
+static inline int ClassIndex(size_t s) {
+ const int i = (s > kMaxSmallSize);
+ return static_cast<int>((s + add_amount[i]) >> shift_amount[i]);
+}
+
+// Mapping from size class to max size storable in that class
+static size_t class_to_size[kNumClasses];
+
+// Mapping from size class to number of pages to allocate at a time
+static size_t class_to_pages[kNumClasses];
+
+// TransferCache is used to cache transfers of num_objects_to_move[size_class]
+// back and forth between thread caches and the central cache for a given size
+// class.
+struct TCEntry {
+ void *head; // Head of chain of objects.
+ void *tail; // Tail of chain of objects.
+};
+// A central cache freelist can have anywhere from 0 to kNumTransferEntries
+// slots to put link list chains into. To keep memory usage bounded the total
+// number of TCEntries across size classes is fixed. Currently each size
+// class is initially given one TCEntry which also means that the maximum any
+// one class can have is kNumClasses.
+static const int kNumTransferEntries = kNumClasses;
+
+// Note: the following only works for "n"s that fit in 32-bits, but
+// that is fine since we only use it for small sizes.
+static inline int LgFloor(size_t n) {
+ int log = 0;
+ for (int i = 4; i >= 0; --i) {
+ int shift = (1 << i);
+ size_t x = n >> shift;
+ if (x != 0) {
+ n = x;
+ log += shift;
+ }
+ }
+ ASSERT(n == 1);
+ return log;
+}
+
+// Some very basic linked list functions for dealing with using void * as
+// storage.
+
+static inline void *SLL_Next(void *t) {
+ return *(reinterpret_cast<void**>(t));
+}
+
+static inline void SLL_SetNext(void *t, void *n) {
+ *(reinterpret_cast<void**>(t)) = n;
+}
+
+static inline void SLL_Push(void **list, void *element) {
+ SLL_SetNext(element, *list);
+ *list = element;
+}
+
+static inline void *SLL_Pop(void **list) {
+ void *result = *list;
+ *list = SLL_Next(*list);
+ return result;
+}
+
+
+// Remove N elements from a linked list to which head points. head will be
+// modified to point to the new head. start and end will point to the first
+// and last nodes of the range. Note that end will point to NULL after this
+// function is called.
+static inline void SLL_PopRange(void **head, int N, void **start, void **end) {
+ if (N == 0) {
+ *start = NULL;
+ *end = NULL;
+ return;
+ }
+
+ void *tmp = *head;
+ for (int i = 1; i < N; ++i) {
+ tmp = SLL_Next(tmp);
+ }
+
+ *start = *head;
+ *end = tmp;
+ *head = SLL_Next(tmp);
+ // Unlink range from list.
+ SLL_SetNext(tmp, NULL);
+}
+
+static inline void SLL_PushRange(void **head, void *start, void *end) {
+ if (!start) return;
+ SLL_SetNext(end, *head);
+ *head = start;
+}
+
+static inline size_t SLL_Size(void *head) {
+ int count = 0;
+ while (head) {
+ count++;
+ head = SLL_Next(head);
+ }
+ return count;
+}
+
+// Setup helper functions.
+
+static ALWAYS_INLINE size_t SizeClass(size_t size) {
+ return class_array[ClassIndex(size)];
+}
+
+// Get the byte-size for a specified class
+static ALWAYS_INLINE size_t ByteSizeForClass(size_t cl) {
+ return class_to_size[cl];
+}
+static int NumMoveSize(size_t size) {
+ if (size == 0) return 0;
+ // Use approx 64k transfers between thread and central caches.
+ int num = static_cast<int>(64.0 * 1024.0 / size);
+ if (num < 2) num = 2;
+ // Clamp well below kMaxFreeListLength to avoid ping pong between central
+ // and thread caches.
+ if (num > static_cast<int>(0.8 * kMaxFreeListLength))
+ num = static_cast<int>(0.8 * kMaxFreeListLength);
+
+ // Also, avoid bringing in too many objects into small object free
+ // lists. There are lots of such lists, and if we allow each one to
+ // fetch too many at a time, we end up having to scavenge too often
+ // (especially when there are lots of threads and each thread gets a
+ // small allowance for its thread cache).
+ //
+ // TODO: Make thread cache free list sizes dynamic so that we do not
+ // have to equally divide a fixed resource amongst lots of threads.
+ if (num > 32) num = 32;
+
+ return num;
+}
+
+// Initialize the mapping arrays
+static void InitSizeClasses() {
+ // Do some sanity checking on add_amount[]/shift_amount[]/class_array[]
+ if (ClassIndex(0) < 0) {
+ MESSAGE("Invalid class index %d for size 0\n", ClassIndex(0));
+ CRASH();
+ }
+ if (static_cast<size_t>(ClassIndex(kMaxSize)) >= sizeof(class_array)) {
+ MESSAGE("Invalid class index %d for kMaxSize\n", ClassIndex(kMaxSize));
+ CRASH();
+ }
+
+ // Compute the size classes we want to use
+ size_t sc = 1; // Next size class to assign
+ unsigned char alignshift = kAlignShift;
+ int last_lg = -1;
+ for (size_t size = kAlignment; size <= kMaxSize; size += (1 << alignshift)) {
+ int lg = LgFloor(size);
+ if (lg > last_lg) {
+ // Increase alignment every so often.
+ //
+ // Since we double the alignment every time size doubles and
+ // size >= 128, this means that space wasted due to alignment is
+ // at most 16/128 i.e., 12.5%. Plus we cap the alignment at 256
+ // bytes, so the space wasted as a percentage starts falling for
+ // sizes > 2K.
+ if ((lg >= 7) && (alignshift < 8)) {
+ alignshift++;
+ }
+ last_lg = lg;
+ }
+
+ // Allocate enough pages so leftover is less than 1/8 of total.
+ // This bounds wasted space to at most 12.5%.
+ size_t psize = kPageSize;
+ while ((psize % size) > (psize >> 3)) {
+ psize += kPageSize;
+ }
+ const size_t my_pages = psize >> kPageShift;
+
+ if (sc > 1 && my_pages == class_to_pages[sc-1]) {
+ // See if we can merge this into the previous class without
+ // increasing the fragmentation of the previous class.
+ const size_t my_objects = (my_pages << kPageShift) / size;
+ const size_t prev_objects = (class_to_pages[sc-1] << kPageShift)
+ / class_to_size[sc-1];
+ if (my_objects == prev_objects) {
+ // Adjust last class to include this size
+ class_to_size[sc-1] = size;
+ continue;
+ }
+ }
+
+ // Add new class
+ class_to_pages[sc] = my_pages;
+ class_to_size[sc] = size;
+ sc++;
+ }
+ if (sc != kNumClasses) {
+ MESSAGE("wrong number of size classes: found %" PRIuS " instead of %d\n",
+ sc, int(kNumClasses));
+ CRASH();
+ }
+
+ // Initialize the mapping arrays
+ int next_size = 0;
+ for (unsigned char c = 1; c < kNumClasses; c++) {
+ const size_t max_size_in_class = class_to_size[c];
+ for (size_t s = next_size; s <= max_size_in_class; s += kAlignment) {
+ class_array[ClassIndex(s)] = c;
+ }
+ next_size = static_cast<int>(max_size_in_class + kAlignment);
+ }
+
+ // Double-check sizes just to be safe
+ for (size_t size = 0; size <= kMaxSize; size++) {
+ const size_t sc = SizeClass(size);
+ if (sc == 0) {
+ MESSAGE("Bad size class %" PRIuS " for %" PRIuS "\n", sc, size);
+ CRASH();
+ }
+ if (sc > 1 && size <= class_to_size[sc-1]) {
+ MESSAGE("Allocating unnecessarily large class %" PRIuS " for %" PRIuS
+ "\n", sc, size);
+ CRASH();
+ }
+ if (sc >= kNumClasses) {
+ MESSAGE("Bad size class %" PRIuS " for %" PRIuS "\n", sc, size);
+ CRASH();
+ }
+ const size_t s = class_to_size[sc];
+ if (size > s) {
+ MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %" PRIuS ")\n", s, size, sc);
+ CRASH();
+ }
+ if (s == 0) {
+ MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %" PRIuS ")\n", s, size, sc);
+ CRASH();
+ }
+ }
+
+ // Initialize the num_objects_to_move array.
+ for (size_t cl = 1; cl < kNumClasses; ++cl) {
+ num_objects_to_move[cl] = NumMoveSize(ByteSizeForClass(cl));
+ }
+
+#ifndef WTF_CHANGES
+ if (false) {
+ // Dump class sizes and maximum external wastage per size class
+ for (size_t cl = 1; cl < kNumClasses; ++cl) {
+ const int alloc_size = class_to_pages[cl] << kPageShift;
+ const int alloc_objs = alloc_size / class_to_size[cl];
+ const int min_used = (class_to_size[cl-1] + 1) * alloc_objs;
+ const int max_waste = alloc_size - min_used;
+ MESSAGE("SC %3d [ %8d .. %8d ] from %8d ; %2.0f%% maxwaste\n",
+ int(cl),
+ int(class_to_size[cl-1] + 1),
+ int(class_to_size[cl]),
+ int(class_to_pages[cl] << kPageShift),
+ max_waste * 100.0 / alloc_size
+ );
+ }
+ }
+#endif
+}
+
+// -------------------------------------------------------------------------
+// Simple allocator for objects of a specified type. External locking
+// is required before accessing one of these objects.
+// -------------------------------------------------------------------------
+
+// Metadata allocator -- keeps stats about how many bytes allocated
+static uint64_t metadata_system_bytes = 0;
+static void* MetaDataAlloc(size_t bytes) {
+ void* result = TCMalloc_SystemAlloc(bytes, 0);
+ if (result != NULL) {
+ metadata_system_bytes += bytes;
+ }
+ return result;
+}
+
+template <class T>
+class PageHeapAllocator {
+ private:
+ // How much to allocate from system at a time
+ static const size_t kAllocIncrement = 32 << 10;
+
+ // Aligned size of T
+ static const size_t kAlignedSize
+ = (((sizeof(T) + kAlignment - 1) / kAlignment) * kAlignment);
+
+ // Free area from which to carve new objects
+ char* free_area_;
+ size_t free_avail_;
+
+ // Linked list of all regions allocated by this allocator
+ void* allocated_regions_;
+
+ // Free list of already carved objects
+ void* free_list_;
+
+ // Number of allocated but unfreed objects
+ int inuse_;
+
+ public:
+ void Init() {
+ ASSERT(kAlignedSize <= kAllocIncrement);
+ inuse_ = 0;
+ allocated_regions_ = 0;
+ free_area_ = NULL;
+ free_avail_ = 0;
+ free_list_ = NULL;
+ }
+
+ T* New() {
+ // Consult free list
+ void* result;
+ if (free_list_ != NULL) {
+ result = free_list_;
+ free_list_ = *(reinterpret_cast<void**>(result));
+ } else {
+ if (free_avail_ < kAlignedSize) {
+ // Need more room
+ char* new_allocation = reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement));
+ if (!new_allocation)
+ CRASH();
+
+ *(void**)new_allocation = allocated_regions_;
+ allocated_regions_ = new_allocation;
+ free_area_ = new_allocation + kAlignedSize;
+ free_avail_ = kAllocIncrement - kAlignedSize;
+ }
+ result = free_area_;
+ free_area_ += kAlignedSize;
+ free_avail_ -= kAlignedSize;
+ }
+ inuse_++;
+ return reinterpret_cast<T*>(result);
+ }
+
+ void Delete(T* p) {
+ *(reinterpret_cast<void**>(p)) = free_list_;
+ free_list_ = p;
+ inuse_--;
+ }
+
+ int inuse() const { return inuse_; }
+
+#if defined(WTF_CHANGES) && PLATFORM(DARWIN)
+ template <class Recorder>
+ void recordAdministrativeRegions(Recorder& recorder, const RemoteMemoryReader& reader)
+ {
+ vm_address_t adminAllocation = reinterpret_cast<vm_address_t>(allocated_regions_);
+ while (adminAllocation) {
+ recorder.recordRegion(adminAllocation, kAllocIncrement);
+ adminAllocation = *reader(reinterpret_cast<vm_address_t*>(adminAllocation));
+ }
+ }
+#endif
+};
+
+// -------------------------------------------------------------------------
+// Span - a contiguous run of pages
+// -------------------------------------------------------------------------
+
+// Type that can hold a page number
+typedef uintptr_t PageID;
+
+// Type that can hold the length of a run of pages
+typedef uintptr_t Length;
+
+static const Length kMaxValidPages = (~static_cast<Length>(0)) >> kPageShift;
+
+// Convert byte size into pages. This won't overflow, but may return
+// an unreasonably large value if bytes is huge enough.
+static inline Length pages(size_t bytes) {
+ return (bytes >> kPageShift) +
+ ((bytes & (kPageSize - 1)) > 0 ? 1 : 0);
+}
+
+// Convert a user size into the number of bytes that will actually be
+// allocated
+static size_t AllocationSize(size_t bytes) {
+ if (bytes > kMaxSize) {
+ // Large object: we allocate an integral number of pages
+ ASSERT(bytes <= (kMaxValidPages << kPageShift));
+ return pages(bytes) << kPageShift;
+ } else {
+ // Small object: find the size class to which it belongs
+ return ByteSizeForClass(SizeClass(bytes));
+ }
+}
+
+// Information kept for a span (a contiguous run of pages).
+struct Span {
+ PageID start; // Starting page number
+ Length length; // Number of pages in span
+ Span* next; // Used when in link list
+ Span* prev; // Used when in link list
+ void* objects; // Linked list of free objects
+ unsigned int free : 1; // Is the span free
+#ifndef NO_TCMALLOC_SAMPLES
+ unsigned int sample : 1; // Sampled object?
+#endif
+ unsigned int sizeclass : 8; // Size-class for small objects (or 0)
+ unsigned int refcount : 11; // Number of non-free objects
+ bool decommitted : 1;
+
+#undef SPAN_HISTORY
+#ifdef SPAN_HISTORY
+ // For debugging, we can keep a log events per span
+ int nexthistory;
+ char history[64];
+ int value[64];
+#endif
+};
+
+#define ASSERT_SPAN_COMMITTED(span) ASSERT(!span->decommitted)
+
+#ifdef SPAN_HISTORY
+void Event(Span* span, char op, int v = 0) {
+ span->history[span->nexthistory] = op;
+ span->value[span->nexthistory] = v;
+ span->nexthistory++;
+ if (span->nexthistory == sizeof(span->history)) span->nexthistory = 0;
+}
+#else
+#define Event(s,o,v) ((void) 0)
+#endif
+
+// Allocator/deallocator for spans
+static PageHeapAllocator<Span> span_allocator;
+static Span* NewSpan(PageID p, Length len) {
+ Span* result = span_allocator.New();
+ memset(result, 0, sizeof(*result));
+ result->start = p;
+ result->length = len;
+#ifdef SPAN_HISTORY
+ result->nexthistory = 0;
+#endif
+ return result;
+}
+
+static inline void DeleteSpan(Span* span) {
+#ifndef NDEBUG
+ // In debug mode, trash the contents of deleted Spans
+ memset(span, 0x3f, sizeof(*span));
+#endif
+ span_allocator.Delete(span);
+}
+
+// -------------------------------------------------------------------------
+// Doubly linked list of spans.
+// -------------------------------------------------------------------------
+
+static inline void DLL_Init(Span* list) {
+ list->next = list;
+ list->prev = list;
+}
+
+static inline void DLL_Remove(Span* span) {
+ span->prev->next = span->next;
+ span->next->prev = span->prev;
+ span->prev = NULL;
+ span->next = NULL;
+}
+
+static ALWAYS_INLINE bool DLL_IsEmpty(const Span* list) {
+ return list->next == list;
+}
+
+static int DLL_Length(const Span* list) {
+ int result = 0;
+ for (Span* s = list->next; s != list; s = s->next) {
+ result++;
+ }
+ return result;
+}
+
+#if 0 /* Not needed at the moment -- causes compiler warnings if not used */
+static void DLL_Print(const char* label, const Span* list) {
+ MESSAGE("%-10s %p:", label, list);
+ for (const Span* s = list->next; s != list; s = s->next) {
+ MESSAGE(" <%p,%u,%u>", s, s->start, s->length);
+ }
+ MESSAGE("\n");
+}
+#endif
+
+static inline void DLL_Prepend(Span* list, Span* span) {
+ ASSERT(span->next == NULL);
+ ASSERT(span->prev == NULL);
+ span->next = list->next;
+ span->prev = list;
+ list->next->prev = span;
+ list->next = span;
+}
+
+// -------------------------------------------------------------------------
+// Stack traces kept for sampled allocations
+// The following state is protected by pageheap_lock_.
+// -------------------------------------------------------------------------
+
+// size/depth are made the same size as a pointer so that some generic
+// code below can conveniently cast them back and forth to void*.
+static const int kMaxStackDepth = 31;
+struct StackTrace {
+ uintptr_t size; // Size of object
+ uintptr_t depth; // Number of PC values stored in array below
+ void* stack[kMaxStackDepth];
+};
+static PageHeapAllocator<StackTrace> stacktrace_allocator;
+static Span sampled_objects;
+
+// -------------------------------------------------------------------------
+// Map from page-id to per-page data
+// -------------------------------------------------------------------------
+
+// We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.
+// We also use a simple one-level cache for hot PageID-to-sizeclass mappings,
+// because sometimes the sizeclass is all the information we need.
+
+// Selector class -- general selector uses 3-level map
+template <int BITS> class MapSelector {
+ public:
+ typedef TCMalloc_PageMap3<BITS-kPageShift> Type;
+ typedef PackedCache<BITS, uint64_t> CacheType;
+};
+
+#if defined(WTF_CHANGES)
+#if PLATFORM(X86_64)
+// On all known X86-64 platforms, the upper 16 bits are always unused and therefore
+// can be excluded from the PageMap key.
+// See http://en.wikipedia.org/wiki/X86-64#Virtual_address_space_details
+
+static const size_t kBitsUnusedOn64Bit = 16;
+#else
+static const size_t kBitsUnusedOn64Bit = 0;
+#endif
+
+// A three-level map for 64-bit machines
+template <> class MapSelector<64> {
+ public:
+ typedef TCMalloc_PageMap3<64 - kPageShift - kBitsUnusedOn64Bit> Type;
+ typedef PackedCache<64, uint64_t> CacheType;
+};
+#endif
+
+// A two-level map for 32-bit machines
+template <> class MapSelector<32> {
+ public:
+ typedef TCMalloc_PageMap2<32 - kPageShift> Type;
+ typedef PackedCache<32 - kPageShift, uint16_t> CacheType;
+};
+
+// -------------------------------------------------------------------------
+// Page-level allocator
+// * Eager coalescing
+//
+// Heap for page-level allocation. We allow allocating and freeing a
+// contiguous runs of pages (called a "span").
+// -------------------------------------------------------------------------
+
+class TCMalloc_PageHeap {
+ public:
+ void init();
+
+ // Allocate a run of "n" pages. Returns zero if out of memory.
+ Span* New(Length n);
+
+ // Delete the span "[p, p+n-1]".
+ // REQUIRES: span was returned by earlier call to New() and
+ // has not yet been deleted.
+ void Delete(Span* span);
+
+ // Mark an allocated span as being used for small objects of the
+ // specified size-class.
+ // REQUIRES: span was returned by an earlier call to New()
+ // and has not yet been deleted.
+ void RegisterSizeClass(Span* span, size_t sc);
+
+ // Split an allocated span into two spans: one of length "n" pages
+ // followed by another span of length "span->length - n" pages.
+ // Modifies "*span" to point to the first span of length "n" pages.
+ // Returns a pointer to the second span.
+ //
+ // REQUIRES: "0 < n < span->length"
+ // REQUIRES: !span->free
+ // REQUIRES: span->sizeclass == 0
+ Span* Split(Span* span, Length n);
+
+ // Return the descriptor for the specified page.
+ inline Span* GetDescriptor(PageID p) const {
+ return reinterpret_cast<Span*>(pagemap_.get(p));
+ }
+
+#ifdef WTF_CHANGES
+ inline Span* GetDescriptorEnsureSafe(PageID p)
+ {
+ pagemap_.Ensure(p, 1);
+ return GetDescriptor(p);
+ }
+
+ size_t ReturnedBytes() const;
+#endif
+
+ // Dump state to stderr
+#ifndef WTF_CHANGES
+ void Dump(TCMalloc_Printer* out);
+#endif
+
+ // Return number of bytes allocated from system
+ inline uint64_t SystemBytes() const { return system_bytes_; }
+
+ // Return number of free bytes in heap
+ uint64_t FreeBytes() const {
+ return (static_cast<uint64_t>(free_pages_) << kPageShift);
+ }
+
+ bool Check();
+ bool CheckList(Span* list, Length min_pages, Length max_pages);
+
+ // Release all pages on the free list for reuse by the OS:
+ void ReleaseFreePages();
+
+ // Return 0 if we have no information, or else the correct sizeclass for p.
+ // Reads and writes to pagemap_cache_ do not require locking.
+ // The entries are 64 bits on 64-bit hardware and 16 bits on
+ // 32-bit hardware, and we don't mind raciness as long as each read of
+ // an entry yields a valid entry, not a partially updated entry.
+ size_t GetSizeClassIfCached(PageID p) const {
+ return pagemap_cache_.GetOrDefault(p, 0);
+ }
+ void CacheSizeClass(PageID p, size_t cl) const { pagemap_cache_.Put(p, cl); }
+
+ private:
+ // Pick the appropriate map and cache types based on pointer size
+ typedef MapSelector<8*sizeof(uintptr_t)>::Type PageMap;
+ typedef MapSelector<8*sizeof(uintptr_t)>::CacheType PageMapCache;
+ PageMap pagemap_;
+ mutable PageMapCache pagemap_cache_;
+
+ // We segregate spans of a given size into two circular linked
+ // lists: one for normal spans, and one for spans whose memory
+ // has been returned to the system.
+ struct SpanList {
+ Span normal;
+ Span returned;
+ };
+
+ // List of free spans of length >= kMaxPages
+ SpanList large_;
+
+ // Array mapping from span length to a doubly linked list of free spans
+ SpanList free_[kMaxPages];
+
+ // Number of pages kept in free lists
+ uintptr_t free_pages_;
+
+ // Bytes allocated from system
+ uint64_t system_bytes_;
+
+ bool GrowHeap(Length n);
+
+ // REQUIRES span->length >= n
+ // Remove span from its free list, and move any leftover part of
+ // span into appropriate free lists. Also update "span" to have
+ // length exactly "n" and mark it as non-free so it can be returned
+ // to the client.
+ //
+ // "released" is true iff "span" was found on a "returned" list.
+ void Carve(Span* span, Length n, bool released);
+
+ void RecordSpan(Span* span) {
+ pagemap_.set(span->start, span);
+ if (span->length > 1) {
+ pagemap_.set(span->start + span->length - 1, span);
+ }
+ }
+
+ // Allocate a large span of length == n. If successful, returns a
+ // span of exactly the specified length. Else, returns NULL.
+ Span* AllocLarge(Length n);
+
+ // Incrementally release some memory to the system.
+ // IncrementalScavenge(n) is called whenever n pages are freed.
+ void IncrementalScavenge(Length n);
+
+ // Number of pages to deallocate before doing more scavenging
+ int64_t scavenge_counter_;
+
+ // Index of last free list we scavenged
+ size_t scavenge_index_;
+
+#if defined(WTF_CHANGES) && PLATFORM(DARWIN)
+ friend class FastMallocZone;
+#endif
+};
+
+void TCMalloc_PageHeap::init()
+{
+ pagemap_.init(MetaDataAlloc);
+ pagemap_cache_ = PageMapCache(0);
+ free_pages_ = 0;
+ system_bytes_ = 0;
+ scavenge_counter_ = 0;
+ // Start scavenging at kMaxPages list
+ scavenge_index_ = kMaxPages-1;
+ COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits);
+ DLL_Init(&large_.normal);
+ DLL_Init(&large_.returned);
+ for (size_t i = 0; i < kMaxPages; i++) {
+ DLL_Init(&free_[i].normal);
+ DLL_Init(&free_[i].returned);
+ }
+}
+
+inline Span* TCMalloc_PageHeap::New(Length n) {
+ ASSERT(Check());
+ ASSERT(n > 0);
+
+ // Find first size >= n that has a non-empty list
+ for (Length s = n; s < kMaxPages; s++) {
+ Span* ll = NULL;
+ bool released = false;
+ if (!DLL_IsEmpty(&free_[s].normal)) {
+ // Found normal span
+ ll = &free_[s].normal;
+ } else if (!DLL_IsEmpty(&free_[s].returned)) {
+ // Found returned span; reallocate it
+ ll = &free_[s].returned;
+ released = true;
+ } else {
+ // Keep looking in larger classes
+ continue;
+ }
+
+ Span* result = ll->next;
+ Carve(result, n, released);
+ if (result->decommitted) {
+ TCMalloc_SystemCommit(reinterpret_cast<void*>(result->start << kPageShift), static_cast<size_t>(n << kPageShift));
+ result->decommitted = false;
+ }
+ ASSERT(Check());
+ free_pages_ -= n;
+ return result;
+ }
+
+ Span* result = AllocLarge(n);
+ if (result != NULL) {
+ ASSERT_SPAN_COMMITTED(result);
+ return result;
+ }
+
+ // Grow the heap and try again
+ if (!GrowHeap(n)) {
+ ASSERT(Check());
+ return NULL;
+ }
+
+ return AllocLarge(n);
+}
+
+Span* TCMalloc_PageHeap::AllocLarge(Length n) {
+ // find the best span (closest to n in size).
+ // The following loops implements address-ordered best-fit.
+ bool from_released = false;
+ Span *best = NULL;
+
+ // Search through normal list
+ for (Span* span = large_.normal.next;
+ span != &large_.normal;
+ span = span->next) {
+ if (span->length >= n) {
+ if ((best == NULL)
+ || (span->length < best->length)
+ || ((span->length == best->length) && (span->start < best->start))) {
+ best = span;
+ from_released = false;
+ }
+ }
+ }
+
+ // Search through released list in case it has a better fit
+ for (Span* span = large_.returned.next;
+ span != &large_.returned;
+ span = span->next) {
+ if (span->length >= n) {
+ if ((best == NULL)
+ || (span->length < best->length)
+ || ((span->length == best->length) && (span->start < best->start))) {
+ best = span;
+ from_released = true;
+ }
+ }
+ }
+
+ if (best != NULL) {
+ Carve(best, n, from_released);
+ if (best->decommitted) {
+ TCMalloc_SystemCommit(reinterpret_cast<void*>(best->start << kPageShift), static_cast<size_t>(n << kPageShift));
+ best->decommitted = false;
+ }
+ ASSERT(Check());
+ free_pages_ -= n;
+ return best;
+ }
+ return NULL;
+}
+
+Span* TCMalloc_PageHeap::Split(Span* span, Length n) {
+ ASSERT(0 < n);
+ ASSERT(n < span->length);
+ ASSERT(!span->free);
+ ASSERT(span->sizeclass == 0);
+ Event(span, 'T', n);
+
+ const Length extra = span->length - n;
+ Span* leftover = NewSpan(span->start + n, extra);
+ Event(leftover, 'U', extra);
+ RecordSpan(leftover);
+ pagemap_.set(span->start + n - 1, span); // Update map from pageid to span
+ span->length = n;
+
+ return leftover;
+}
+
+static ALWAYS_INLINE void propagateDecommittedState(Span* destination, Span* source)
+{
+ destination->decommitted = source->decommitted;
+}
+
+inline void TCMalloc_PageHeap::Carve(Span* span, Length n, bool released) {
+ ASSERT(n > 0);
+ DLL_Remove(span);
+ span->free = 0;
+ Event(span, 'A', n);
+
+ const int extra = static_cast<int>(span->length - n);
+ ASSERT(extra >= 0);
+ if (extra > 0) {
+ Span* leftover = NewSpan(span->start + n, extra);
+ leftover->free = 1;
+ propagateDecommittedState(leftover, span);
+ Event(leftover, 'S', extra);
+ RecordSpan(leftover);
+
+ // Place leftover span on appropriate free list
+ SpanList* listpair = (static_cast<size_t>(extra) < kMaxPages) ? &free_[extra] : &large_;
+ Span* dst = released ? &listpair->returned : &listpair->normal;
+ DLL_Prepend(dst, leftover);
+
+ span->length = n;
+ pagemap_.set(span->start + n - 1, span);
+ }
+}
+
+static ALWAYS_INLINE void mergeDecommittedStates(Span* destination, Span* other)
+{
+ if (destination->decommitted && !other->decommitted) {
+ TCMalloc_SystemRelease(reinterpret_cast<void*>(other->start << kPageShift),
+ static_cast<size_t>(other->length << kPageShift));
+ } else if (other->decommitted && !destination->decommitted) {
+ TCMalloc_SystemRelease(reinterpret_cast<void*>(destination->start << kPageShift),
+ static_cast<size_t>(destination->length << kPageShift));
+ destination->decommitted = true;
+ }
+}
+
+inline void TCMalloc_PageHeap::Delete(Span* span) {
+ ASSERT(Check());
+ ASSERT(!span->free);
+ ASSERT(span->length > 0);
+ ASSERT(GetDescriptor(span->start) == span);
+ ASSERT(GetDescriptor(span->start + span->length - 1) == span);
+ span->sizeclass = 0;
+#ifndef NO_TCMALLOC_SAMPLES
+ span->sample = 0;
+#endif
+
+ // Coalesce -- we guarantee that "p" != 0, so no bounds checking
+ // necessary. We do not bother resetting the stale pagemap
+ // entries for the pieces we are merging together because we only
+ // care about the pagemap entries for the boundaries.
+ const PageID p = span->start;
+ const Length n = span->length;
+ Span* prev = GetDescriptor(p-1);
+ if (prev != NULL && prev->free) {
+ // Merge preceding span into this span
+ ASSERT(prev->start + prev->length == p);
+ const Length len = prev->length;
+ mergeDecommittedStates(span, prev);
+ DLL_Remove(prev);
+ DeleteSpan(prev);
+ span->start -= len;
+ span->length += len;
+ pagemap_.set(span->start, span);
+ Event(span, 'L', len);
+ }
+ Span* next = GetDescriptor(p+n);
+ if (next != NULL && next->free) {
+ // Merge next span into this span
+ ASSERT(next->start == p+n);
+ const Length len = next->length;
+ mergeDecommittedStates(span, next);
+ DLL_Remove(next);
+ DeleteSpan(next);
+ span->length += len;
+ pagemap_.set(span->start + span->length - 1, span);
+ Event(span, 'R', len);
+ }
+
+ Event(span, 'D', span->length);
+ span->free = 1;
+ if (span->decommitted) {
+ if (span->length < kMaxPages)
+ DLL_Prepend(&free_[span->length].returned, span);
+ else
+ DLL_Prepend(&large_.returned, span);
+ } else {
+ if (span->length < kMaxPages)
+ DLL_Prepend(&free_[span->length].normal, span);
+ else
+ DLL_Prepend(&large_.normal, span);
+ }
+ free_pages_ += n;
+
+ IncrementalScavenge(n);
+ ASSERT(Check());
+}
+
+void TCMalloc_PageHeap::IncrementalScavenge(Length n) {
+ // Fast path; not yet time to release memory
+ scavenge_counter_ -= n;
+ if (scavenge_counter_ >= 0) return; // Not yet time to scavenge
+
+ // If there is nothing to release, wait for so many pages before
+ // scavenging again. With 4K pages, this comes to 16MB of memory.
+ static const size_t kDefaultReleaseDelay = 1 << 8;
+
+ // Find index of free list to scavenge
+ size_t index = scavenge_index_ + 1;
+ for (size_t i = 0; i < kMaxPages+1; i++) {
+ if (index > kMaxPages) index = 0;
+ SpanList* slist = (index == kMaxPages) ? &large_ : &free_[index];
+ if (!DLL_IsEmpty(&slist->normal)) {
+ // Release the last span on the normal portion of this list
+ Span* s = slist->normal.prev;
+ DLL_Remove(s);
+ TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
+ static_cast<size_t>(s->length << kPageShift));
+ s->decommitted = true;
+ DLL_Prepend(&slist->returned, s);
+
+ scavenge_counter_ = std::max<size_t>(64UL, std::min<size_t>(kDefaultReleaseDelay, kDefaultReleaseDelay - (free_pages_ / kDefaultReleaseDelay)));
+
+ if (index == kMaxPages && !DLL_IsEmpty(&slist->normal))
+ scavenge_index_ = index - 1;
+ else
+ scavenge_index_ = index;
+ return;
+ }
+ index++;
+ }
+
+ // Nothing to scavenge, delay for a while
+ scavenge_counter_ = kDefaultReleaseDelay;
+}
+
+void TCMalloc_PageHeap::RegisterSizeClass(Span* span, size_t sc) {
+ // Associate span object with all interior pages as well
+ ASSERT(!span->free);
+ ASSERT(GetDescriptor(span->start) == span);
+ ASSERT(GetDescriptor(span->start+span->length-1) == span);
+ Event(span, 'C', sc);
+ span->sizeclass = static_cast<unsigned int>(sc);
+ for (Length i = 1; i < span->length-1; i++) {
+ pagemap_.set(span->start+i, span);
+ }
+}
+
+#ifdef WTF_CHANGES
+size_t TCMalloc_PageHeap::ReturnedBytes() const {
+ size_t result = 0;
+ for (unsigned s = 0; s < kMaxPages; s++) {
+ const int r_length = DLL_Length(&free_[s].returned);
+ unsigned r_pages = s * r_length;
+ result += r_pages << kPageShift;
+ }
+
+ for (Span* s = large_.returned.next; s != &large_.returned; s = s->next)
+ result += s->length << kPageShift;
+ return result;
+}
+#endif
+
+#ifndef WTF_CHANGES
+static double PagesToMB(uint64_t pages) {
+ return (pages << kPageShift) / 1048576.0;
+}
+
+void TCMalloc_PageHeap::Dump(TCMalloc_Printer* out) {
+ int nonempty_sizes = 0;
+ for (int s = 0; s < kMaxPages; s++) {
+ if (!DLL_IsEmpty(&free_[s].normal) || !DLL_IsEmpty(&free_[s].returned)) {
+ nonempty_sizes++;
+ }
+ }
+ out->printf("------------------------------------------------\n");
+ out->printf("PageHeap: %d sizes; %6.1f MB free\n",
+ nonempty_sizes, PagesToMB(free_pages_));
+ out->printf("------------------------------------------------\n");
+ uint64_t total_normal = 0;
+ uint64_t total_returned = 0;
+ for (int s = 0; s < kMaxPages; s++) {
+ const int n_length = DLL_Length(&free_[s].normal);
+ const int r_length = DLL_Length(&free_[s].returned);
+ if (n_length + r_length > 0) {
+ uint64_t n_pages = s * n_length;
+ uint64_t r_pages = s * r_length;
+ total_normal += n_pages;
+ total_returned += r_pages;
+ out->printf("%6u pages * %6u spans ~ %6.1f MB; %6.1f MB cum"
+ "; unmapped: %6.1f MB; %6.1f MB cum\n",
+ s,
+ (n_length + r_length),
+ PagesToMB(n_pages + r_pages),
+ PagesToMB(total_normal + total_returned),
+ PagesToMB(r_pages),
+ PagesToMB(total_returned));
+ }
+ }
+
+ uint64_t n_pages = 0;
+ uint64_t r_pages = 0;
+ int n_spans = 0;
+ int r_spans = 0;
+ out->printf("Normal large spans:\n");
+ for (Span* s = large_.normal.next; s != &large_.normal; s = s->next) {
+ out->printf(" [ %6" PRIuS " pages ] %6.1f MB\n",
+ s->length, PagesToMB(s->length));
+ n_pages += s->length;
+ n_spans++;
+ }
+ out->printf("Unmapped large spans:\n");
+ for (Span* s = large_.returned.next; s != &large_.returned; s = s->next) {
+ out->printf(" [ %6" PRIuS " pages ] %6.1f MB\n",
+ s->length, PagesToMB(s->length));
+ r_pages += s->length;
+ r_spans++;
+ }
+ total_normal += n_pages;
+ total_returned += r_pages;
+ out->printf(">255 large * %6u spans ~ %6.1f MB; %6.1f MB cum"
+ "; unmapped: %6.1f MB; %6.1f MB cum\n",
+ (n_spans + r_spans),
+ PagesToMB(n_pages + r_pages),
+ PagesToMB(total_normal + total_returned),
+ PagesToMB(r_pages),
+ PagesToMB(total_returned));
+}
+#endif
+
+bool TCMalloc_PageHeap::GrowHeap(Length n) {
+ ASSERT(kMaxPages >= kMinSystemAlloc);
+ if (n > kMaxValidPages) return false;
+ Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc);
+ size_t actual_size;
+ void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
+ if (ptr == NULL) {
+ if (n < ask) {
+ // Try growing just "n" pages
+ ask = n;
+ ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
+ }
+ if (ptr == NULL) return false;
+ }
+ ask = actual_size >> kPageShift;
+
+ uint64_t old_system_bytes = system_bytes_;
+ system_bytes_ += (ask << kPageShift);
+ const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
+ ASSERT(p > 0);
+
+ // If we have already a lot of pages allocated, just pre allocate a bunch of
+ // memory for the page map. This prevents fragmentation by pagemap metadata
+ // when a program keeps allocating and freeing large blocks.
+
+ if (old_system_bytes < kPageMapBigAllocationThreshold
+ && system_bytes_ >= kPageMapBigAllocationThreshold) {
+ pagemap_.PreallocateMoreMemory();
+ }
+
+ // Make sure pagemap_ has entries for all of the new pages.
+ // Plus ensure one before and one after so coalescing code
+ // does not need bounds-checking.
+ if (pagemap_.Ensure(p-1, ask+2)) {
+ // Pretend the new area is allocated and then Delete() it to
+ // cause any necessary coalescing to occur.
+ //
+ // We do not adjust free_pages_ here since Delete() will do it for us.
+ Span* span = NewSpan(p, ask);
+ RecordSpan(span);
+ Delete(span);
+ ASSERT(Check());
+ return true;
+ } else {
+ // We could not allocate memory within "pagemap_"
+ // TODO: Once we can return memory to the system, return the new span
+ return false;
+ }
+}
+
+bool TCMalloc_PageHeap::Check() {
+ ASSERT(free_[0].normal.next == &free_[0].normal);
+ ASSERT(free_[0].returned.next == &free_[0].returned);
+ CheckList(&large_.normal, kMaxPages, 1000000000);
+ CheckList(&large_.returned, kMaxPages, 1000000000);
+ for (Length s = 1; s < kMaxPages; s++) {
+ CheckList(&free_[s].normal, s, s);
+ CheckList(&free_[s].returned, s, s);
+ }
+ return true;
+}
+
+#if ASSERT_DISABLED
+bool TCMalloc_PageHeap::CheckList(Span*, Length, Length) {
+ return true;
+}
+#else
+bool TCMalloc_PageHeap::CheckList(Span* list, Length min_pages, Length max_pages) {
+ for (Span* s = list->next; s != list; s = s->next) {
+ CHECK_CONDITION(s->free);
+ CHECK_CONDITION(s->length >= min_pages);
+ CHECK_CONDITION(s->length <= max_pages);
+ CHECK_CONDITION(GetDescriptor(s->start) == s);
+ CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s);
+ }
+ return true;
+}
+#endif
+
+static void ReleaseFreeList(Span* list, Span* returned) {
+ // Walk backwards through list so that when we push these
+ // spans on the "returned" list, we preserve the order.
+ while (!DLL_IsEmpty(list)) {
+ Span* s = list->prev;
+ DLL_Remove(s);
+ DLL_Prepend(returned, s);
+ TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
+ static_cast<size_t>(s->length << kPageShift));
+ }
+}
+
+void TCMalloc_PageHeap::ReleaseFreePages() {
+ for (Length s = 0; s < kMaxPages; s++) {
+ ReleaseFreeList(&free_[s].normal, &free_[s].returned);
+ }
+ ReleaseFreeList(&large_.normal, &large_.returned);
+ ASSERT(Check());
+}
+
+//-------------------------------------------------------------------
+// Free list
+//-------------------------------------------------------------------
+
+class TCMalloc_ThreadCache_FreeList {
+ private:
+ void* list_; // Linked list of nodes
+ uint16_t length_; // Current length
+ uint16_t lowater_; // Low water mark for list length
+
+ public:
+ void Init() {
+ list_ = NULL;
+ length_ = 0;
+ lowater_ = 0;
+ }
+
+ // Return current length of list
+ int length() const {
+ return length_;
+ }
+
+ // Is list empty?
+ bool empty() const {
+ return list_ == NULL;
+ }
+
+ // Low-water mark management
+ int lowwatermark() const { return lowater_; }
+ void clear_lowwatermark() { lowater_ = length_; }
+
+ ALWAYS_INLINE void Push(void* ptr) {
+ SLL_Push(&list_, ptr);
+ length_++;
+ }
+
+ void PushRange(int N, void *start, void *end) {
+ SLL_PushRange(&list_, start, end);
+ length_ = length_ + static_cast<uint16_t>(N);
+ }
+
+ void PopRange(int N, void **start, void **end) {
+ SLL_PopRange(&list_, N, start, end);
+ ASSERT(length_ >= N);
+ length_ = length_ - static_cast<uint16_t>(N);
+ if (length_ < lowater_) lowater_ = length_;
+ }
+
+ ALWAYS_INLINE void* Pop() {
+ ASSERT(list_ != NULL);
+ length_--;
+ if (length_ < lowater_) lowater_ = length_;
+ return SLL_Pop(&list_);
+ }
+
+#ifdef WTF_CHANGES
+ template <class Finder, class Reader>
+ void enumerateFreeObjects(Finder& finder, const Reader& reader)
+ {
+ for (void* nextObject = list_; nextObject; nextObject = *reader(reinterpret_cast<void**>(nextObject)))
+ finder.visit(nextObject);
+ }
+#endif
+};
+
+//-------------------------------------------------------------------
+// Data kept per thread
+//-------------------------------------------------------------------
+
+class TCMalloc_ThreadCache {
+ private:
+ typedef TCMalloc_ThreadCache_FreeList FreeList;
+#if COMPILER(MSVC)
+ typedef DWORD ThreadIdentifier;
+#else
+ typedef pthread_t ThreadIdentifier;
+#endif
+
+ size_t size_; // Combined size of data
+ ThreadIdentifier tid_; // Which thread owns it
+ bool in_setspecific_; // Called pthread_setspecific?
+ FreeList list_[kNumClasses]; // Array indexed by size-class
+
+ // We sample allocations, biased by the size of the allocation
+ uint32_t rnd_; // Cheap random number generator
+ size_t bytes_until_sample_; // Bytes until we sample next
+
+ // Allocate a new heap. REQUIRES: pageheap_lock is held.
+ static inline TCMalloc_ThreadCache* NewHeap(ThreadIdentifier tid);
+
+ // Use only as pthread thread-specific destructor function.
+ static void DestroyThreadCache(void* ptr);
+ public:
+ // All ThreadCache objects are kept in a linked list (for stats collection)
+ TCMalloc_ThreadCache* next_;
+ TCMalloc_ThreadCache* prev_;
+
+ void Init(ThreadIdentifier tid);
+ void Cleanup();
+
+ // Accessors (mostly just for printing stats)
+ int freelist_length(size_t cl) const { return list_[cl].length(); }
+
+ // Total byte size in cache
+ size_t Size() const { return size_; }
+
+ void* Allocate(size_t size);
+ void Deallocate(void* ptr, size_t size_class);
+
+ void FetchFromCentralCache(size_t cl, size_t allocationSize);
+ void ReleaseToCentralCache(size_t cl, int N);
+ void Scavenge();
+ void Print() const;
+
+ // Record allocation of "k" bytes. Return true iff allocation
+ // should be sampled
+ bool SampleAllocation(size_t k);
+
+ // Pick next sampling point
+ void PickNextSample(size_t k);
+
+ static void InitModule();
+ static void InitTSD();
+ static TCMalloc_ThreadCache* GetThreadHeap();
+ static TCMalloc_ThreadCache* GetCache();
+ static TCMalloc_ThreadCache* GetCacheIfPresent();
+ static TCMalloc_ThreadCache* CreateCacheIfNecessary();
+ static void DeleteCache(TCMalloc_ThreadCache* heap);
+ static void BecomeIdle();
+ static void RecomputeThreadCacheSize();
+
+#ifdef WTF_CHANGES
+ template <class Finder, class Reader>
+ void enumerateFreeObjects(Finder& finder, const Reader& reader)
+ {
+ for (unsigned sizeClass = 0; sizeClass < kNumClasses; sizeClass++)
+ list_[sizeClass].enumerateFreeObjects(finder, reader);
+ }
+#endif
+};
+
+//-------------------------------------------------------------------
+// Data kept per size-class in central cache
+//-------------------------------------------------------------------
+
+class TCMalloc_Central_FreeList {
+ public:
+ void Init(size_t cl);
+
+ // These methods all do internal locking.
+
+ // Insert the specified range into the central freelist. N is the number of
+ // elements in the range.
+ void InsertRange(void *start, void *end, int N);
+
+ // Returns the actual number of fetched elements into N.
+ void RemoveRange(void **start, void **end, int *N);
+
+ // Returns the number of free objects in cache.
+ size_t length() {
+ SpinLockHolder h(&lock_);
+ return counter_;
+ }
+
+ // Returns the number of free objects in the transfer cache.
+ int tc_length() {
+ SpinLockHolder h(&lock_);
+ return used_slots_ * num_objects_to_move[size_class_];
+ }
+
+#ifdef WTF_CHANGES
+ template <class Finder, class Reader>
+ void enumerateFreeObjects(Finder& finder, const Reader& reader, TCMalloc_Central_FreeList* remoteCentralFreeList)
+ {
+ for (Span* span = &empty_; span && span != &empty_; span = (span->next ? reader(span->next) : 0))
+ ASSERT(!span->objects);
+
+ ASSERT(!nonempty_.objects);
+ static const ptrdiff_t nonemptyOffset = reinterpret_cast<const char*>(&nonempty_) - reinterpret_cast<const char*>(this);
+
+ Span* remoteNonempty = reinterpret_cast<Span*>(reinterpret_cast<char*>(remoteCentralFreeList) + nonemptyOffset);
+ Span* remoteSpan = nonempty_.next;
+
+ for (Span* span = reader(remoteSpan); span && remoteSpan != remoteNonempty; remoteSpan = span->next, span = (span->next ? reader(span->next) : 0)) {
+ for (void* nextObject = span->objects; nextObject; nextObject = *reader(reinterpret_cast<void**>(nextObject)))
+ finder.visit(nextObject);
+ }
+ }
+#endif
+
+ private:
+ // REQUIRES: lock_ is held
+ // Remove object from cache and return.
+ // Return NULL if no free entries in cache.
+ void* FetchFromSpans();
+
+ // REQUIRES: lock_ is held
+ // Remove object from cache and return. Fetches
+ // from pageheap if cache is empty. Only returns
+ // NULL on allocation failure.
+ void* FetchFromSpansSafe();
+
+ // REQUIRES: lock_ is held
+ // Release a linked list of objects to spans.
+ // May temporarily release lock_.
+ void ReleaseListToSpans(void *start);
+
+ // REQUIRES: lock_ is held
+ // Release an object to spans.
+ // May temporarily release lock_.
+ void ReleaseToSpans(void* object);
+
+ // REQUIRES: lock_ is held
+ // Populate cache by fetching from the page heap.
+ // May temporarily release lock_.
+ void Populate();
+
+ // REQUIRES: lock is held.
+ // Tries to make room for a TCEntry. If the cache is full it will try to
+ // expand it at the cost of some other cache size. Return false if there is
+ // no space.
+ bool MakeCacheSpace();
+
+ // REQUIRES: lock_ for locked_size_class is held.
+ // Picks a "random" size class to steal TCEntry slot from. In reality it
+ // just iterates over the sizeclasses but does so without taking a lock.
+ // Returns true on success.
+ // May temporarily lock a "random" size class.
+ static bool EvictRandomSizeClass(size_t locked_size_class, bool force);
+
+ // REQUIRES: lock_ is *not* held.
+ // Tries to shrink the Cache. If force is true it will relase objects to
+ // spans if it allows it to shrink the cache. Return false if it failed to
+ // shrink the cache. Decrements cache_size_ on succeess.
+ // May temporarily take lock_. If it takes lock_, the locked_size_class
+ // lock is released to the thread from holding two size class locks
+ // concurrently which could lead to a deadlock.
+ bool ShrinkCache(int locked_size_class, bool force);
+
+ // This lock protects all the data members. cached_entries and cache_size_
+ // may be looked at without holding the lock.
+ SpinLock lock_;
+
+ // We keep linked lists of empty and non-empty spans.
+ size_t size_class_; // My size class
+ Span empty_; // Dummy header for list of empty spans
+ Span nonempty_; // Dummy header for list of non-empty spans
+ size_t counter_; // Number of free objects in cache entry
+
+ // Here we reserve space for TCEntry cache slots. Since one size class can
+ // end up getting all the TCEntries quota in the system we just preallocate
+ // sufficient number of entries here.
+ TCEntry tc_slots_[kNumTransferEntries];
+
+ // Number of currently used cached entries in tc_slots_. This variable is
+ // updated under a lock but can be read without one.
+ int32_t used_slots_;
+ // The current number of slots for this size class. This is an
+ // adaptive value that is increased if there is lots of traffic
+ // on a given size class.
+ int32_t cache_size_;
+};
+
+// Pad each CentralCache object to multiple of 64 bytes
+class TCMalloc_Central_FreeListPadded : public TCMalloc_Central_FreeList {
+ private:
+ char pad_[(64 - (sizeof(TCMalloc_Central_FreeList) % 64)) % 64];
+};
+
+//-------------------------------------------------------------------
+// Global variables
+//-------------------------------------------------------------------
+
+// Central cache -- a collection of free-lists, one per size-class.
+// We have a separate lock per free-list to reduce contention.
+static TCMalloc_Central_FreeListPadded central_cache[kNumClasses];
+
+// Page-level allocator
+static SpinLock pageheap_lock = SPINLOCK_INITIALIZER;
+static void* pageheap_memory[(sizeof(TCMalloc_PageHeap) + sizeof(void*) - 1) / sizeof(void*)];
+static bool phinited = false;
+
+// Avoid extra level of indirection by making "pageheap" be just an alias
+// of pageheap_memory.
+typedef union {
+ void* m_memory;
+ TCMalloc_PageHeap* m_pageHeap;
+} PageHeapUnion;
+
+static inline TCMalloc_PageHeap* getPageHeap()
+{
+ PageHeapUnion u = { &pageheap_memory[0] };
+ return u.m_pageHeap;
+}
+
+#define pageheap getPageHeap()
+
+// If TLS is available, we also store a copy
+// of the per-thread object in a __thread variable
+// since __thread variables are faster to read
+// than pthread_getspecific(). We still need
+// pthread_setspecific() because __thread
+// variables provide no way to run cleanup
+// code when a thread is destroyed.
+#ifdef HAVE_TLS
+static __thread TCMalloc_ThreadCache *threadlocal_heap;
+#endif
+// Thread-specific key. Initialization here is somewhat tricky
+// because some Linux startup code invokes malloc() before it
+// is in a good enough state to handle pthread_keycreate().
+// Therefore, we use TSD keys only after tsd_inited is set to true.
+// Until then, we use a slow path to get the heap object.
+static bool tsd_inited = false;
+static pthread_key_t heap_key;
+#if COMPILER(MSVC)
+DWORD tlsIndex = TLS_OUT_OF_INDEXES;
+#endif
+
+static ALWAYS_INLINE void setThreadHeap(TCMalloc_ThreadCache* heap)
+{
+ // still do pthread_setspecific when using MSVC fast TLS to
+ // benefit from the delete callback.
+ pthread_setspecific(heap_key, heap);
+#if COMPILER(MSVC)
+ TlsSetValue(tlsIndex, heap);
+#endif
+}
+
+// Allocator for thread heaps
+static PageHeapAllocator<TCMalloc_ThreadCache> threadheap_allocator;
+
+// Linked list of heap objects. Protected by pageheap_lock.
+static TCMalloc_ThreadCache* thread_heaps = NULL;
+static int thread_heap_count = 0;
+
+// Overall thread cache size. Protected by pageheap_lock.
+static size_t overall_thread_cache_size = kDefaultOverallThreadCacheSize;
+
+// Global per-thread cache size. Writes are protected by
+// pageheap_lock. Reads are done without any locking, which should be
+// fine as long as size_t can be written atomically and we don't place
+// invariants between this variable and other pieces of state.
+static volatile size_t per_thread_cache_size = kMaxThreadCacheSize;
+
+//-------------------------------------------------------------------
+// Central cache implementation
+//-------------------------------------------------------------------
+
+void TCMalloc_Central_FreeList::Init(size_t cl) {
+ lock_.Init();
+ size_class_ = cl;
+ DLL_Init(&empty_);
+ DLL_Init(&nonempty_);
+ counter_ = 0;
+
+ cache_size_ = 1;
+ used_slots_ = 0;
+ ASSERT(cache_size_ <= kNumTransferEntries);
+}
+
+void TCMalloc_Central_FreeList::ReleaseListToSpans(void* start) {
+ while (start) {
+ void *next = SLL_Next(start);
+ ReleaseToSpans(start);
+ start = next;
+ }
+}
+
+ALWAYS_INLINE void TCMalloc_Central_FreeList::ReleaseToSpans(void* object) {
+ const PageID p = reinterpret_cast<uintptr_t>(object) >> kPageShift;
+ Span* span = pageheap->GetDescriptor(p);
+ ASSERT(span != NULL);
+ ASSERT(span->refcount > 0);
+
+ // If span is empty, move it to non-empty list
+ if (span->objects == NULL) {
+ DLL_Remove(span);
+ DLL_Prepend(&nonempty_, span);
+ Event(span, 'N', 0);
+ }
+
+ // The following check is expensive, so it is disabled by default
+ if (false) {
+ // Check that object does not occur in list
+ int got = 0;
+ for (void* p = span->objects; p != NULL; p = *((void**) p)) {
+ ASSERT(p != object);
+ got++;
+ }
+ ASSERT(got + span->refcount ==
+ (span->length<<kPageShift)/ByteSizeForClass(span->sizeclass));
+ }
+
+ counter_++;
+ span->refcount--;
+ if (span->refcount == 0) {
+ Event(span, '#', 0);
+ counter_ -= (span->length<<kPageShift) / ByteSizeForClass(span->sizeclass);
+ DLL_Remove(span);
+
+ // Release central list lock while operating on pageheap
+ lock_.Unlock();
+ {
+ SpinLockHolder h(&pageheap_lock);
+ pageheap->Delete(span);
+ }
+ lock_.Lock();
+ } else {
+ *(reinterpret_cast<void**>(object)) = span->objects;
+ span->objects = object;
+ }
+}
+
+ALWAYS_INLINE bool TCMalloc_Central_FreeList::EvictRandomSizeClass(
+ size_t locked_size_class, bool force) {
+ static int race_counter = 0;
+ int t = race_counter++; // Updated without a lock, but who cares.
+ if (t >= static_cast<int>(kNumClasses)) {
+ while (t >= static_cast<int>(kNumClasses)) {
+ t -= kNumClasses;
+ }
+ race_counter = t;
+ }
+ ASSERT(t >= 0);
+ ASSERT(t < static_cast<int>(kNumClasses));
+ if (t == static_cast<int>(locked_size_class)) return false;
+ return central_cache[t].ShrinkCache(static_cast<int>(locked_size_class), force);
+}
+
+bool TCMalloc_Central_FreeList::MakeCacheSpace() {
+ // Is there room in the cache?
+ if (used_slots_ < cache_size_) return true;
+ // Check if we can expand this cache?
+ if (cache_size_ == kNumTransferEntries) return false;
+ // Ok, we'll try to grab an entry from some other size class.
+ if (EvictRandomSizeClass(size_class_, false) ||
+ EvictRandomSizeClass(size_class_, true)) {
+ // Succeeded in evicting, we're going to make our cache larger.
+ cache_size_++;
+ return true;
+ }
+ return false;
+}
+
+
+namespace {
+class LockInverter {
+ private:
+ SpinLock *held_, *temp_;
+ public:
+ inline explicit LockInverter(SpinLock* held, SpinLock *temp)
+ : held_(held), temp_(temp) { held_->Unlock(); temp_->Lock(); }
+ inline ~LockInverter() { temp_->Unlock(); held_->Lock(); }
+};
+}
+
+bool TCMalloc_Central_FreeList::ShrinkCache(int locked_size_class, bool force) {
+ // Start with a quick check without taking a lock.
+ if (cache_size_ == 0) return false;
+ // We don't evict from a full cache unless we are 'forcing'.
+ if (force == false && used_slots_ == cache_size_) return false;
+
+ // Grab lock, but first release the other lock held by this thread. We use
+ // the lock inverter to ensure that we never hold two size class locks
+ // concurrently. That can create a deadlock because there is no well
+ // defined nesting order.
+ LockInverter li(&central_cache[locked_size_class].lock_, &lock_);
+ ASSERT(used_slots_ <= cache_size_);
+ ASSERT(0 <= cache_size_);
+ if (cache_size_ == 0) return false;
+ if (used_slots_ == cache_size_) {
+ if (force == false) return false;
+ // ReleaseListToSpans releases the lock, so we have to make all the
+ // updates to the central list before calling it.
+ cache_size_--;
+ used_slots_--;
+ ReleaseListToSpans(tc_slots_[used_slots_].head);
+ return true;
+ }
+ cache_size_--;
+ return true;
+}
+
+void TCMalloc_Central_FreeList::InsertRange(void *start, void *end, int N) {
+ SpinLockHolder h(&lock_);
+ if (N == num_objects_to_move[size_class_] &&
+ MakeCacheSpace()) {
+ int slot = used_slots_++;
+ ASSERT(slot >=0);
+ ASSERT(slot < kNumTransferEntries);
+ TCEntry *entry = &tc_slots_[slot];
+ entry->head = start;
+ entry->tail = end;
+ return;
+ }
+ ReleaseListToSpans(start);
+}
+
+void TCMalloc_Central_FreeList::RemoveRange(void **start, void **end, int *N) {
+ int num = *N;
+ ASSERT(num > 0);
+
+ SpinLockHolder h(&lock_);
+ if (num == num_objects_to_move[size_class_] && used_slots_ > 0) {
+ int slot = --used_slots_;
+ ASSERT(slot >= 0);
+ TCEntry *entry = &tc_slots_[slot];
+ *start = entry->head;
+ *end = entry->tail;
+ return;
+ }
+
+ // TODO: Prefetch multiple TCEntries?
+ void *tail = FetchFromSpansSafe();
+ if (!tail) {
+ // We are completely out of memory.
+ *start = *end = NULL;
+ *N = 0;
+ return;
+ }
+
+ SLL_SetNext(tail, NULL);
+ void *head = tail;
+ int count = 1;
+ while (count < num) {
+ void *t = FetchFromSpans();
+ if (!t) break;
+ SLL_Push(&head, t);
+ count++;
+ }
+ *start = head;
+ *end = tail;
+ *N = count;
+}
+
+
+void* TCMalloc_Central_FreeList::FetchFromSpansSafe() {
+ void *t = FetchFromSpans();
+ if (!t) {
+ Populate();
+ t = FetchFromSpans();
+ }
+ return t;
+}
+
+void* TCMalloc_Central_FreeList::FetchFromSpans() {
+ if (DLL_IsEmpty(&nonempty_)) return NULL;
+ Span* span = nonempty_.next;
+
+ ASSERT(span->objects != NULL);
+ ASSERT_SPAN_COMMITTED(span);
+ span->refcount++;
+ void* result = span->objects;
+ span->objects = *(reinterpret_cast<void**>(result));
+ if (span->objects == NULL) {
+ // Move to empty list
+ DLL_Remove(span);
+ DLL_Prepend(&empty_, span);
+ Event(span, 'E', 0);
+ }
+ counter_--;
+ return result;
+}
+
+// Fetch memory from the system and add to the central cache freelist.
+ALWAYS_INLINE void TCMalloc_Central_FreeList::Populate() {
+ // Release central list lock while operating on pageheap
+ lock_.Unlock();
+ const size_t npages = class_to_pages[size_class_];
+
+ Span* span;
+ {
+ SpinLockHolder h(&pageheap_lock);
+ span = pageheap->New(npages);
+ if (span) pageheap->RegisterSizeClass(span, size_class_);
+ }
+ if (span == NULL) {
+ MESSAGE("allocation failed: %d\n", errno);
+ lock_.Lock();
+ return;
+ }
+ ASSERT_SPAN_COMMITTED(span);
+ ASSERT(span->length == npages);
+ // Cache sizeclass info eagerly. Locking is not necessary.
+ // (Instead of being eager, we could just replace any stale info
+ // about this span, but that seems to be no better in practice.)
+ for (size_t i = 0; i < npages; i++) {
+ pageheap->CacheSizeClass(span->start + i, size_class_);
+ }
+
+ // Split the block into pieces and add to the free-list
+ // TODO: coloring of objects to avoid cache conflicts?
+ void** tail = &span->objects;
+ char* ptr = reinterpret_cast<char*>(span->start << kPageShift);
+ char* limit = ptr + (npages << kPageShift);
+ const size_t size = ByteSizeForClass(size_class_);
+ int num = 0;
+ char* nptr;
+ while ((nptr = ptr + size) <= limit) {
+ *tail = ptr;
+ tail = reinterpret_cast<void**>(ptr);
+ ptr = nptr;
+ num++;
+ }
+ ASSERT(ptr <= limit);
+ *tail = NULL;
+ span->refcount = 0; // No sub-object in use yet
+
+ // Add span to list of non-empty spans
+ lock_.Lock();
+ DLL_Prepend(&nonempty_, span);
+ counter_ += num;
+}
+
+//-------------------------------------------------------------------
+// TCMalloc_ThreadCache implementation
+//-------------------------------------------------------------------
+
+inline bool TCMalloc_ThreadCache::SampleAllocation(size_t k) {
+ if (bytes_until_sample_ < k) {
+ PickNextSample(k);
+ return true;
+ } else {
+ bytes_until_sample_ -= k;
+ return false;
+ }
+}
+
+void TCMalloc_ThreadCache::Init(ThreadIdentifier tid) {
+ size_ = 0;
+ next_ = NULL;
+ prev_ = NULL;
+ tid_ = tid;
+ in_setspecific_ = false;
+ for (size_t cl = 0; cl < kNumClasses; ++cl) {
+ list_[cl].Init();
+ }
+
+ // Initialize RNG -- run it for a bit to get to good values
+ bytes_until_sample_ = 0;
+ rnd_ = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this));
+ for (int i = 0; i < 100; i++) {
+ PickNextSample(static_cast<size_t>(FLAGS_tcmalloc_sample_parameter * 2));
+ }
+}
+
+void TCMalloc_ThreadCache::Cleanup() {
+ // Put unused memory back into central cache
+ for (size_t cl = 0; cl < kNumClasses; ++cl) {
+ if (list_[cl].length() > 0) {
+ ReleaseToCentralCache(cl, list_[cl].length());
+ }
+ }
+}
+
+ALWAYS_INLINE void* TCMalloc_ThreadCache::Allocate(size_t size) {
+ ASSERT(size <= kMaxSize);
+ const size_t cl = SizeClass(size);
+ FreeList* list = &list_[cl];
+ size_t allocationSize = ByteSizeForClass(cl);
+ if (list->empty()) {
+ FetchFromCentralCache(cl, allocationSize);
+ if (list->empty()) return NULL;
+ }
+ size_ -= allocationSize;
+ return list->Pop();
+}
+
+inline void TCMalloc_ThreadCache::Deallocate(void* ptr, size_t cl) {
+ size_ += ByteSizeForClass(cl);
+ FreeList* list = &list_[cl];
+ list->Push(ptr);
+ // If enough data is free, put back into central cache
+ if (list->length() > kMaxFreeListLength) {
+ ReleaseToCentralCache(cl, num_objects_to_move[cl]);
+ }
+ if (size_ >= per_thread_cache_size) Scavenge();
+}
+
+// Remove some objects of class "cl" from central cache and add to thread heap
+ALWAYS_INLINE void TCMalloc_ThreadCache::FetchFromCentralCache(size_t cl, size_t allocationSize) {
+ int fetch_count = num_objects_to_move[cl];
+ void *start, *end;
+ central_cache[cl].RemoveRange(&start, &end, &fetch_count);
+ list_[cl].PushRange(fetch_count, start, end);
+ size_ += allocationSize * fetch_count;
+}
+
+// Remove some objects of class "cl" from thread heap and add to central cache
+inline void TCMalloc_ThreadCache::ReleaseToCentralCache(size_t cl, int N) {
+ ASSERT(N > 0);
+ FreeList* src = &list_[cl];
+ if (N > src->length()) N = src->length();
+ size_ -= N*ByteSizeForClass(cl);
+
+ // We return prepackaged chains of the correct size to the central cache.
+ // TODO: Use the same format internally in the thread caches?
+ int batch_size = num_objects_to_move[cl];
+ while (N > batch_size) {
+ void *tail, *head;
+ src->PopRange(batch_size, &head, &tail);
+ central_cache[cl].InsertRange(head, tail, batch_size);
+ N -= batch_size;
+ }
+ void *tail, *head;
+ src->PopRange(N, &head, &tail);
+ central_cache[cl].InsertRange(head, tail, N);
+}
+
+// Release idle memory to the central cache
+inline void TCMalloc_ThreadCache::Scavenge() {
+ // If the low-water mark for the free list is L, it means we would
+ // not have had to allocate anything from the central cache even if
+ // we had reduced the free list size by L. We aim to get closer to
+ // that situation by dropping L/2 nodes from the free list. This
+ // may not release much memory, but if so we will call scavenge again
+ // pretty soon and the low-water marks will be high on that call.
+ //int64 start = CycleClock::Now();
+
+ for (size_t cl = 0; cl < kNumClasses; cl++) {
+ FreeList* list = &list_[cl];
+ const int lowmark = list->lowwatermark();
+ if (lowmark > 0) {
+ const int drop = (lowmark > 1) ? lowmark/2 : 1;
+ ReleaseToCentralCache(cl, drop);
+ }
+ list->clear_lowwatermark();
+ }
+
+ //int64 finish = CycleClock::Now();
+ //CycleTimer ct;
+ //MESSAGE("GC: %.0f ns\n", ct.CyclesToUsec(finish-start)*1000.0);
+}
+
+void TCMalloc_ThreadCache::PickNextSample(size_t k) {
+ // Make next "random" number
+ // x^32+x^22+x^2+x^1+1 is a primitive polynomial for random numbers
+ static const uint32_t kPoly = (1 << 22) | (1 << 2) | (1 << 1) | (1 << 0);
+ uint32_t r = rnd_;
+ rnd_ = (r << 1) ^ ((static_cast<int32_t>(r) >> 31) & kPoly);
+
+ // Next point is "rnd_ % (sample_period)". I.e., average
+ // increment is "sample_period/2".
+ const int flag_value = static_cast<int>(FLAGS_tcmalloc_sample_parameter);
+ static int last_flag_value = -1;
+
+ if (flag_value != last_flag_value) {
+ SpinLockHolder h(&sample_period_lock);
+ int i;
+ for (i = 0; i < (static_cast<int>(sizeof(primes_list)/sizeof(primes_list[0])) - 1); i++) {
+ if (primes_list[i] >= flag_value) {
+ break;
+ }
+ }
+ sample_period = primes_list[i];
+ last_flag_value = flag_value;
+ }
+
+ bytes_until_sample_ += rnd_ % sample_period;
+
+ if (k > (static_cast<size_t>(-1) >> 2)) {
+ // If the user has asked for a huge allocation then it is possible
+ // for the code below to loop infinitely. Just return (note that
+ // this throws off the sampling accuracy somewhat, but a user who
+ // is allocating more than 1G of memory at a time can live with a
+ // minor inaccuracy in profiling of small allocations, and also
+ // would rather not wait for the loop below to terminate).
+ return;
+ }
+
+ while (bytes_until_sample_ < k) {
+ // Increase bytes_until_sample_ by enough average sampling periods
+ // (sample_period >> 1) to allow us to sample past the current
+ // allocation.
+ bytes_until_sample_ += (sample_period >> 1);
+ }
+
+ bytes_until_sample_ -= k;
+}
+
+void TCMalloc_ThreadCache::InitModule() {
+ // There is a slight potential race here because of double-checked
+ // locking idiom. However, as long as the program does a small
+ // allocation before switching to multi-threaded mode, we will be
+ // fine. We increase the chances of doing such a small allocation
+ // by doing one in the constructor of the module_enter_exit_hook
+ // object declared below.
+ SpinLockHolder h(&pageheap_lock);
+ if (!phinited) {
+#ifdef WTF_CHANGES
+ InitTSD();
+#endif
+ InitSizeClasses();
+ threadheap_allocator.Init();
+ span_allocator.Init();
+ span_allocator.New(); // Reduce cache conflicts
+ span_allocator.New(); // Reduce cache conflicts
+ stacktrace_allocator.Init();
+ DLL_Init(&sampled_objects);
+ for (size_t i = 0; i < kNumClasses; ++i) {
+ central_cache[i].Init(i);
+ }
+ pageheap->init();
+ phinited = 1;
+#if defined(WTF_CHANGES) && PLATFORM(DARWIN)
+ FastMallocZone::init();
+#endif
+ }
+}
+
+inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::NewHeap(ThreadIdentifier tid) {
+ // Create the heap and add it to the linked list
+ TCMalloc_ThreadCache *heap = threadheap_allocator.New();
+ heap->Init(tid);
+ heap->next_ = thread_heaps;
+ heap->prev_ = NULL;
+ if (thread_heaps != NULL) thread_heaps->prev_ = heap;
+ thread_heaps = heap;
+ thread_heap_count++;
+ RecomputeThreadCacheSize();
+ return heap;
+}
+
+inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetThreadHeap() {
+#ifdef HAVE_TLS
+ // __thread is faster, but only when the kernel supports it
+ if (KernelSupportsTLS())
+ return threadlocal_heap;
+#elif COMPILER(MSVC)
+ return static_cast<TCMalloc_ThreadCache*>(TlsGetValue(tlsIndex));
+#else
+ return static_cast<TCMalloc_ThreadCache*>(pthread_getspecific(heap_key));
+#endif
+}
+
+inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCache() {
+ TCMalloc_ThreadCache* ptr = NULL;
+ if (!tsd_inited) {
+ InitModule();
+ } else {
+ ptr = GetThreadHeap();
+ }
+ if (ptr == NULL) ptr = CreateCacheIfNecessary();
+ return ptr;
+}
+
+// In deletion paths, we do not try to create a thread-cache. This is
+// because we may be in the thread destruction code and may have
+// already cleaned up the cache for this thread.
+inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCacheIfPresent() {
+ if (!tsd_inited) return NULL;
+ void* const p = GetThreadHeap();
+ return reinterpret_cast<TCMalloc_ThreadCache*>(p);
+}
+
+void TCMalloc_ThreadCache::InitTSD() {
+ ASSERT(!tsd_inited);
+ pthread_key_create(&heap_key, DestroyThreadCache);
+#if COMPILER(MSVC)
+ tlsIndex = TlsAlloc();
+#endif
+ tsd_inited = true;
+
+#if !COMPILER(MSVC)
+ // We may have used a fake pthread_t for the main thread. Fix it.
+ pthread_t zero;
+ memset(&zero, 0, sizeof(zero));
+#endif
+#ifndef WTF_CHANGES
+ SpinLockHolder h(&pageheap_lock);
+#else
+ ASSERT(pageheap_lock.IsHeld());
+#endif
+ for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
+#if COMPILER(MSVC)
+ if (h->tid_ == 0) {
+ h->tid_ = GetCurrentThreadId();
+ }
+#else
+ if (pthread_equal(h->tid_, zero)) {
+ h->tid_ = pthread_self();
+ }
+#endif
+ }
+}
+
+TCMalloc_ThreadCache* TCMalloc_ThreadCache::CreateCacheIfNecessary() {
+ // Initialize per-thread data if necessary
+ TCMalloc_ThreadCache* heap = NULL;
+ {
+ SpinLockHolder h(&pageheap_lock);
+
+#if COMPILER(MSVC)
+ DWORD me;
+ if (!tsd_inited) {
+ me = 0;
+ } else {
+ me = GetCurrentThreadId();
+ }
+#else
+ // Early on in glibc's life, we cannot even call pthread_self()
+ pthread_t me;
+ if (!tsd_inited) {
+ memset(&me, 0, sizeof(me));
+ } else {
+ me = pthread_self();
+ }
+#endif
+
+ // This may be a recursive malloc call from pthread_setspecific()
+ // In that case, the heap for this thread has already been created
+ // and added to the linked list. So we search for that first.
+ for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
+#if COMPILER(MSVC)
+ if (h->tid_ == me) {
+#else
+ if (pthread_equal(h->tid_, me)) {
+#endif
+ heap = h;
+ break;
+ }
+ }
+
+ if (heap == NULL) heap = NewHeap(me);
+ }
+
+ // We call pthread_setspecific() outside the lock because it may
+ // call malloc() recursively. The recursive call will never get
+ // here again because it will find the already allocated heap in the
+ // linked list of heaps.
+ if (!heap->in_setspecific_ && tsd_inited) {
+ heap->in_setspecific_ = true;
+ setThreadHeap(heap);
+ }
+ return heap;
+}
+
+void TCMalloc_ThreadCache::BecomeIdle() {
+ if (!tsd_inited) return; // No caches yet
+ TCMalloc_ThreadCache* heap = GetThreadHeap();
+ if (heap == NULL) return; // No thread cache to remove
+ if (heap->in_setspecific_) return; // Do not disturb the active caller
+
+ heap->in_setspecific_ = true;
+ pthread_setspecific(heap_key, NULL);
+#ifdef HAVE_TLS
+ // Also update the copy in __thread
+ threadlocal_heap = NULL;
+#endif
+ heap->in_setspecific_ = false;
+ if (GetThreadHeap() == heap) {
+ // Somehow heap got reinstated by a recursive call to malloc
+ // from pthread_setspecific. We give up in this case.
+ return;
+ }
+
+ // We can now get rid of the heap
+ DeleteCache(heap);
+}
+
+void TCMalloc_ThreadCache::DestroyThreadCache(void* ptr) {
+ // Note that "ptr" cannot be NULL since pthread promises not
+ // to invoke the destructor on NULL values, but for safety,
+ // we check anyway.
+ if (ptr == NULL) return;
+#ifdef HAVE_TLS
+ // Prevent fast path of GetThreadHeap() from returning heap.
+ threadlocal_heap = NULL;
+#endif
+ DeleteCache(reinterpret_cast<TCMalloc_ThreadCache*>(ptr));
+}
+
+void TCMalloc_ThreadCache::DeleteCache(TCMalloc_ThreadCache* heap) {
+ // Remove all memory from heap
+ heap->Cleanup();
+
+ // Remove from linked list
+ SpinLockHolder h(&pageheap_lock);
+ if (heap->next_ != NULL) heap->next_->prev_ = heap->prev_;
+ if (heap->prev_ != NULL) heap->prev_->next_ = heap->next_;
+ if (thread_heaps == heap) thread_heaps = heap->next_;
+ thread_heap_count--;
+ RecomputeThreadCacheSize();
+
+ threadheap_allocator.Delete(heap);
+}
+
+void TCMalloc_ThreadCache::RecomputeThreadCacheSize() {
+ // Divide available space across threads
+ int n = thread_heap_count > 0 ? thread_heap_count : 1;
+ size_t space = overall_thread_cache_size / n;
+
+ // Limit to allowed range
+ if (space < kMinThreadCacheSize) space = kMinThreadCacheSize;
+ if (space > kMaxThreadCacheSize) space = kMaxThreadCacheSize;
+
+ per_thread_cache_size = space;
+}
+
+void TCMalloc_ThreadCache::Print() const {
+ for (size_t cl = 0; cl < kNumClasses; ++cl) {
+ MESSAGE(" %5" PRIuS " : %4d len; %4d lo\n",
+ ByteSizeForClass(cl),
+ list_[cl].length(),
+ list_[cl].lowwatermark());
+ }
+}
+
+// Extract interesting stats
+struct TCMallocStats {
+ uint64_t system_bytes; // Bytes alloced from system
+ uint64_t thread_bytes; // Bytes in thread caches
+ uint64_t central_bytes; // Bytes in central cache
+ uint64_t transfer_bytes; // Bytes in central transfer cache
+ uint64_t pageheap_bytes; // Bytes in page heap
+ uint64_t metadata_bytes; // Bytes alloced for metadata
+};
+
+#ifndef WTF_CHANGES
+// Get stats into "r". Also get per-size-class counts if class_count != NULL
+static void ExtractStats(TCMallocStats* r, uint64_t* class_count) {
+ r->central_bytes = 0;
+ r->transfer_bytes = 0;
+ for (int cl = 0; cl < kNumClasses; ++cl) {
+ const int length = central_cache[cl].length();
+ const int tc_length = central_cache[cl].tc_length();
+ r->central_bytes += static_cast<uint64_t>(ByteSizeForClass(cl)) * length;
+ r->transfer_bytes +=
+ static_cast<uint64_t>(ByteSizeForClass(cl)) * tc_length;
+ if (class_count) class_count[cl] = length + tc_length;
+ }
+
+ // Add stats from per-thread heaps
+ r->thread_bytes = 0;
+ { // scope
+ SpinLockHolder h(&pageheap_lock);
+ for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
+ r->thread_bytes += h->Size();
+ if (class_count) {
+ for (size_t cl = 0; cl < kNumClasses; ++cl) {
+ class_count[cl] += h->freelist_length(cl);
+ }
+ }
+ }
+ }
+
+ { //scope
+ SpinLockHolder h(&pageheap_lock);
+ r->system_bytes = pageheap->SystemBytes();
+ r->metadata_bytes = metadata_system_bytes;
+ r->pageheap_bytes = pageheap->FreeBytes();
+ }
+}
+#endif
+
+#ifndef WTF_CHANGES
+// WRITE stats to "out"
+static void DumpStats(TCMalloc_Printer* out, int level) {
+ TCMallocStats stats;
+ uint64_t class_count[kNumClasses];
+ ExtractStats(&stats, (level >= 2 ? class_count : NULL));
+
+ if (level >= 2) {
+ out->printf("------------------------------------------------\n");
+ uint64_t cumulative = 0;
+ for (int cl = 0; cl < kNumClasses; ++cl) {
+ if (class_count[cl] > 0) {
+ uint64_t class_bytes = class_count[cl] * ByteSizeForClass(cl);
+ cumulative += class_bytes;
+ out->printf("class %3d [ %8" PRIuS " bytes ] : "
+ "%8" PRIu64 " objs; %5.1f MB; %5.1f cum MB\n",
+ cl, ByteSizeForClass(cl),
+ class_count[cl],
+ class_bytes / 1048576.0,
+ cumulative / 1048576.0);
+ }
+ }
+
+ SpinLockHolder h(&pageheap_lock);
+ pageheap->Dump(out);
+ }
+
+ const uint64_t bytes_in_use = stats.system_bytes
+ - stats.pageheap_bytes
+ - stats.central_bytes
+ - stats.transfer_bytes
+ - stats.thread_bytes;
+
+ out->printf("------------------------------------------------\n"
+ "MALLOC: %12" PRIu64 " Heap size\n"
+ "MALLOC: %12" PRIu64 " Bytes in use by application\n"
+ "MALLOC: %12" PRIu64 " Bytes free in page heap\n"
+ "MALLOC: %12" PRIu64 " Bytes free in central cache\n"
+ "MALLOC: %12" PRIu64 " Bytes free in transfer cache\n"
+ "MALLOC: %12" PRIu64 " Bytes free in thread caches\n"
+ "MALLOC: %12" PRIu64 " Spans in use\n"
+ "MALLOC: %12" PRIu64 " Thread heaps in use\n"
+ "MALLOC: %12" PRIu64 " Metadata allocated\n"
+ "------------------------------------------------\n",
+ stats.system_bytes,
+ bytes_in_use,
+ stats.pageheap_bytes,
+ stats.central_bytes,
+ stats.transfer_bytes,
+ stats.thread_bytes,
+ uint64_t(span_allocator.inuse()),
+ uint64_t(threadheap_allocator.inuse()),
+ stats.metadata_bytes);
+}
+
+static void PrintStats(int level) {
+ const int kBufferSize = 16 << 10;
+ char* buffer = new char[kBufferSize];
+ TCMalloc_Printer printer(buffer, kBufferSize);
+ DumpStats(&printer, level);
+ write(STDERR_FILENO, buffer, strlen(buffer));
+ delete[] buffer;
+}
+
+static void** DumpStackTraces() {
+ // Count how much space we need
+ int needed_slots = 0;
+ {
+ SpinLockHolder h(&pageheap_lock);
+ for (Span* s = sampled_objects.next; s != &sampled_objects; s = s->next) {
+ StackTrace* stack = reinterpret_cast<StackTrace*>(s->objects);
+ needed_slots += 3 + stack->depth;
+ }
+ needed_slots += 100; // Slop in case sample grows
+ needed_slots += needed_slots/8; // An extra 12.5% slop
+ }
+
+ void** result = new void*[needed_slots];
+ if (result == NULL) {
+ MESSAGE("tcmalloc: could not allocate %d slots for stack traces\n",
+ needed_slots);
+ return NULL;
+ }
+
+ SpinLockHolder h(&pageheap_lock);
+ int used_slots = 0;
+ for (Span* s = sampled_objects.next; s != &sampled_objects; s = s->next) {
+ ASSERT(used_slots < needed_slots); // Need to leave room for terminator
+ StackTrace* stack = reinterpret_cast<StackTrace*>(s->objects);
+ if (used_slots + 3 + stack->depth >= needed_slots) {
+ // No more room
+ break;
+ }
+
+ result[used_slots+0] = reinterpret_cast<void*>(static_cast<uintptr_t>(1));
+ result[used_slots+1] = reinterpret_cast<void*>(stack->size);
+ result[used_slots+2] = reinterpret_cast<void*>(stack->depth);
+ for (int d = 0; d < stack->depth; d++) {
+ result[used_slots+3+d] = stack->stack[d];
+ }
+ used_slots += 3 + stack->depth;
+ }
+ result[used_slots] = reinterpret_cast<void*>(static_cast<uintptr_t>(0));
+ return result;
+}
+#endif
+
+#ifndef WTF_CHANGES
+
+// TCMalloc's support for extra malloc interfaces
+class TCMallocImplementation : public MallocExtension {
+ public:
+ virtual void GetStats(char* buffer, int buffer_length) {
+ ASSERT(buffer_length > 0);
+ TCMalloc_Printer printer(buffer, buffer_length);
+
+ // Print level one stats unless lots of space is available
+ if (buffer_length < 10000) {
+ DumpStats(&printer, 1);
+ } else {
+ DumpStats(&printer, 2);
+ }
+ }
+
+ virtual void** ReadStackTraces() {
+ return DumpStackTraces();
+ }
+
+ virtual bool GetNumericProperty(const char* name, size_t* value) {
+ ASSERT(name != NULL);
+
+ if (strcmp(name, "generic.current_allocated_bytes") == 0) {
+ TCMallocStats stats;
+ ExtractStats(&stats, NULL);
+ *value = stats.system_bytes
+ - stats.thread_bytes
+ - stats.central_bytes
+ - stats.pageheap_bytes;
+ return true;
+ }
+
+ if (strcmp(name, "generic.heap_size") == 0) {
+ TCMallocStats stats;
+ ExtractStats(&stats, NULL);
+ *value = stats.system_bytes;
+ return true;
+ }
+
+ if (strcmp(name, "tcmalloc.slack_bytes") == 0) {
+ // We assume that bytes in the page heap are not fragmented too
+ // badly, and are therefore available for allocation.
+ SpinLockHolder l(&pageheap_lock);
+ *value = pageheap->FreeBytes();
+ return true;
+ }
+
+ if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
+ SpinLockHolder l(&pageheap_lock);
+ *value = overall_thread_cache_size;
+ return true;
+ }
+
+ if (strcmp(name, "tcmalloc.current_total_thread_cache_bytes") == 0) {
+ TCMallocStats stats;
+ ExtractStats(&stats, NULL);
+ *value = stats.thread_bytes;
+ return true;
+ }
+
+ return false;
+ }
+
+ virtual bool SetNumericProperty(const char* name, size_t value) {
+ ASSERT(name != NULL);
+
+ if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
+ // Clip the value to a reasonable range
+ if (value < kMinThreadCacheSize) value = kMinThreadCacheSize;
+ if (value > (1<<30)) value = (1<<30); // Limit to 1GB
+
+ SpinLockHolder l(&pageheap_lock);
+ overall_thread_cache_size = static_cast<size_t>(value);
+ TCMalloc_ThreadCache::RecomputeThreadCacheSize();
+ return true;
+ }
+
+ return false;
+ }
+
+ virtual void MarkThreadIdle() {
+ TCMalloc_ThreadCache::BecomeIdle();
+ }
+
+ virtual void ReleaseFreeMemory() {
+ SpinLockHolder h(&pageheap_lock);
+ pageheap->ReleaseFreePages();
+ }
+};
+#endif
+
+// The constructor allocates an object to ensure that initialization
+// runs before main(), and therefore we do not have a chance to become
+// multi-threaded before initialization. We also create the TSD key
+// here. Presumably by the time this constructor runs, glibc is in
+// good enough shape to handle pthread_key_create().
+//
+// The constructor also takes the opportunity to tell STL to use
+// tcmalloc. We want to do this early, before construct time, so
+// all user STL allocations go through tcmalloc (which works really
+// well for STL).
+//
+// The destructor prints stats when the program exits.
+class TCMallocGuard {
+ public:
+
+ TCMallocGuard() {
+#ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS
+ // Check whether the kernel also supports TLS (needs to happen at runtime)
+ CheckIfKernelSupportsTLS();
+#endif
+#ifndef WTF_CHANGES
+#ifdef WIN32 // patch the windows VirtualAlloc, etc.
+ PatchWindowsFunctions(); // defined in windows/patch_functions.cc
+#endif
+#endif
+ free(malloc(1));
+ TCMalloc_ThreadCache::InitTSD();
+ free(malloc(1));
+#ifndef WTF_CHANGES
+ MallocExtension::Register(new TCMallocImplementation);
+#endif
+ }
+
+#ifndef WTF_CHANGES
+ ~TCMallocGuard() {
+ const char* env = getenv("MALLOCSTATS");
+ if (env != NULL) {
+ int level = atoi(env);
+ if (level < 1) level = 1;
+ PrintStats(level);
+ }
+#ifdef WIN32
+ UnpatchWindowsFunctions();
+#endif
+ }
+#endif
+};
+
+#ifndef WTF_CHANGES
+static TCMallocGuard module_enter_exit_hook;
+#endif
+
+
+//-------------------------------------------------------------------
+// Helpers for the exported routines below
+//-------------------------------------------------------------------
+
+#ifndef WTF_CHANGES
+
+static Span* DoSampledAllocation(size_t size) {
+
+ // Grab the stack trace outside the heap lock
+ StackTrace tmp;
+ tmp.depth = GetStackTrace(tmp.stack, kMaxStackDepth, 1);
+ tmp.size = size;
+
+ SpinLockHolder h(&pageheap_lock);
+ // Allocate span
+ Span *span = pageheap->New(pages(size == 0 ? 1 : size));
+ if (span == NULL) {
+ return NULL;
+ }
+
+ // Allocate stack trace
+ StackTrace *stack = stacktrace_allocator.New();
+ if (stack == NULL) {
+ // Sampling failed because of lack of memory
+ return span;
+ }
+
+ *stack = tmp;
+ span->sample = 1;
+ span->objects = stack;
+ DLL_Prepend(&sampled_objects, span);
+
+ return span;
+}
+#endif
+
+static inline bool CheckCachedSizeClass(void *ptr) {
+ PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
+ size_t cached_value = pageheap->GetSizeClassIfCached(p);
+ return cached_value == 0 ||
+ cached_value == pageheap->GetDescriptor(p)->sizeclass;
+}
+
+static inline void* CheckedMallocResult(void *result)
+{
+ ASSERT(result == 0 || CheckCachedSizeClass(result));
+ return result;
+}
+
+static inline void* SpanToMallocResult(Span *span) {
+ ASSERT_SPAN_COMMITTED(span);
+ pageheap->CacheSizeClass(span->start, 0);
+ return
+ CheckedMallocResult(reinterpret_cast<void*>(span->start << kPageShift));
+}
+
+#ifdef WTF_CHANGES
+template <bool crashOnFailure>
+#endif
+static ALWAYS_INLINE void* do_malloc(size_t size) {
+ void* ret = NULL;
+
+#ifdef WTF_CHANGES
+ ASSERT(!isForbidden());
+#endif
+
+ // The following call forces module initialization
+ TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache();
+#ifndef WTF_CHANGES
+ if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) {
+ Span* span = DoSampledAllocation(size);
+ if (span != NULL) {
+ ret = SpanToMallocResult(span);
+ }
+ } else
+#endif
+ if (size > kMaxSize) {
+ // Use page-level allocator
+ SpinLockHolder h(&pageheap_lock);
+ Span* span = pageheap->New(pages(size));
+ if (span != NULL) {
+ ret = SpanToMallocResult(span);
+ }
+ } else {
+ // The common case, and also the simplest. This just pops the
+ // size-appropriate freelist, afer replenishing it if it's empty.
+ ret = CheckedMallocResult(heap->Allocate(size));
+ }
+ if (!ret) {
+#ifdef WTF_CHANGES
+ if (crashOnFailure) // This branch should be optimized out by the compiler.
+ CRASH();
+#else
+ errno = ENOMEM;
+#endif
+ }
+ return ret;
+}
+
+static ALWAYS_INLINE void do_free(void* ptr) {
+ if (ptr == NULL) return;
+ ASSERT(pageheap != NULL); // Should not call free() before malloc()
+ const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
+ Span* span = NULL;
+ size_t cl = pageheap->GetSizeClassIfCached(p);
+
+ if (cl == 0) {
+ span = pageheap->GetDescriptor(p);
+ cl = span->sizeclass;
+ pageheap->CacheSizeClass(p, cl);
+ }
+ if (cl != 0) {
+#ifndef NO_TCMALLOC_SAMPLES
+ ASSERT(!pageheap->GetDescriptor(p)->sample);
+#endif
+ TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCacheIfPresent();
+ if (heap != NULL) {
+ heap->Deallocate(ptr, cl);
+ } else {
+ // Delete directly into central cache
+ SLL_SetNext(ptr, NULL);
+ central_cache[cl].InsertRange(ptr, ptr, 1);
+ }
+ } else {
+ SpinLockHolder h(&pageheap_lock);
+ ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0);
+ ASSERT(span != NULL && span->start == p);
+#ifndef NO_TCMALLOC_SAMPLES
+ if (span->sample) {
+ DLL_Remove(span);
+ stacktrace_allocator.Delete(reinterpret_cast<StackTrace*>(span->objects));
+ span->objects = NULL;
+ }
+#endif
+ pageheap->Delete(span);
+ }
+}
+
+#ifndef WTF_CHANGES
+// For use by exported routines below that want specific alignments
+//
+// Note: this code can be slow, and can significantly fragment memory.
+// The expectation is that memalign/posix_memalign/valloc/pvalloc will
+// not be invoked very often. This requirement simplifies our
+// implementation and allows us to tune for expected allocation
+// patterns.
+static void* do_memalign(size_t align, size_t size) {
+ ASSERT((align & (align - 1)) == 0);
+ ASSERT(align > 0);
+ if (pageheap == NULL) TCMalloc_ThreadCache::InitModule();
+
+ // Allocate at least one byte to avoid boundary conditions below
+ if (size == 0) size = 1;
+
+ if (size <= kMaxSize && align < kPageSize) {
+ // Search through acceptable size classes looking for one with
+ // enough alignment. This depends on the fact that
+ // InitSizeClasses() currently produces several size classes that
+ // are aligned at powers of two. We will waste time and space if
+ // we miss in the size class array, but that is deemed acceptable
+ // since memalign() should be used rarely.
+ size_t cl = SizeClass(size);
+ while (cl < kNumClasses && ((class_to_size[cl] & (align - 1)) != 0)) {
+ cl++;
+ }
+ if (cl < kNumClasses) {
+ TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache();
+ return CheckedMallocResult(heap->Allocate(class_to_size[cl]));
+ }
+ }
+
+ // We will allocate directly from the page heap
+ SpinLockHolder h(&pageheap_lock);
+
+ if (align <= kPageSize) {
+ // Any page-level allocation will be fine
+ // TODO: We could put the rest of this page in the appropriate
+ // TODO: cache but it does not seem worth it.
+ Span* span = pageheap->New(pages(size));
+ return span == NULL ? NULL : SpanToMallocResult(span);
+ }
+
+ // Allocate extra pages and carve off an aligned portion
+ const Length alloc = pages(size + align);
+ Span* span = pageheap->New(alloc);
+ if (span == NULL) return NULL;
+
+ // Skip starting portion so that we end up aligned
+ Length skip = 0;
+ while ((((span->start+skip) << kPageShift) & (align - 1)) != 0) {
+ skip++;
+ }
+ ASSERT(skip < alloc);
+ if (skip > 0) {
+ Span* rest = pageheap->Split(span, skip);
+ pageheap->Delete(span);
+ span = rest;
+ }
+
+ // Skip trailing portion that we do not need to return
+ const Length needed = pages(size);
+ ASSERT(span->length >= needed);
+ if (span->length > needed) {
+ Span* trailer = pageheap->Split(span, needed);
+ pageheap->Delete(trailer);
+ }
+ return SpanToMallocResult(span);
+}
+#endif
+
+// Helpers for use by exported routines below:
+
+#ifndef WTF_CHANGES
+static inline void do_malloc_stats() {
+ PrintStats(1);
+}
+#endif
+
+static inline int do_mallopt(int, int) {
+ return 1; // Indicates error
+}
+
+#ifdef HAVE_STRUCT_MALLINFO // mallinfo isn't defined on freebsd, for instance
+static inline struct mallinfo do_mallinfo() {
+ TCMallocStats stats;
+ ExtractStats(&stats, NULL);
+
+ // Just some of the fields are filled in.
+ struct mallinfo info;
+ memset(&info, 0, sizeof(info));
+
+ // Unfortunately, the struct contains "int" field, so some of the
+ // size values will be truncated.
+ info.arena = static_cast<int>(stats.system_bytes);
+ info.fsmblks = static_cast<int>(stats.thread_bytes
+ + stats.central_bytes
+ + stats.transfer_bytes);
+ info.fordblks = static_cast<int>(stats.pageheap_bytes);
+ info.uordblks = static_cast<int>(stats.system_bytes
+ - stats.thread_bytes
+ - stats.central_bytes
+ - stats.transfer_bytes
+ - stats.pageheap_bytes);
+
+ return info;
+}
+#endif
+
+//-------------------------------------------------------------------
+// Exported routines
+//-------------------------------------------------------------------
+
+// CAVEAT: The code structure below ensures that MallocHook methods are always
+// called from the stack frame of the invoked allocation function.
+// heap-checker.cc depends on this to start a stack trace from
+// the call to the (de)allocation function.
+
+#ifndef WTF_CHANGES
+extern "C"
+#else
+#define do_malloc do_malloc<crashOnFailure>
+
+template <bool crashOnFailure>
+void* malloc(size_t);
+
+void* fastMalloc(size_t size)
+{
+ return malloc<true>(size);
+}
+
+void* tryFastMalloc(size_t size)
+{
+ return malloc<false>(size);
+}
+
+template <bool crashOnFailure>
+ALWAYS_INLINE
+#endif
+void* malloc(size_t size) {
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+ if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= size) // If overflow would occur...
+ return 0;
+ size += sizeof(AllocAlignmentInteger);
+ void* result = do_malloc(size);
+ if (!result)
+ return 0;
+
+ *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
+ result = static_cast<AllocAlignmentInteger*>(result) + 1;
+#else
+ void* result = do_malloc(size);
+#endif
+
+#ifndef WTF_CHANGES
+ MallocHook::InvokeNewHook(result, size);
+#endif
+ return result;
+}
+
+#ifndef WTF_CHANGES
+extern "C"
+#endif
+void free(void* ptr) {
+#ifndef WTF_CHANGES
+ MallocHook::InvokeDeleteHook(ptr);
+#endif
+
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+ if (!ptr)
+ return;
+
+ AllocAlignmentInteger* header = Internal::fastMallocMatchValidationValue(ptr);
+ if (*header != Internal::AllocTypeMalloc)
+ Internal::fastMallocMatchFailed(ptr);
+ do_free(header);
+#else
+ do_free(ptr);
+#endif
+}
+
+#ifndef WTF_CHANGES
+extern "C"
+#else
+template <bool crashOnFailure>
+void* calloc(size_t, size_t);
+
+void* fastCalloc(size_t n, size_t elem_size)
+{
+ return calloc<true>(n, elem_size);
+}
+
+void* tryFastCalloc(size_t n, size_t elem_size)
+{
+ return calloc<false>(n, elem_size);
+}
+
+template <bool crashOnFailure>
+ALWAYS_INLINE
+#endif
+void* calloc(size_t n, size_t elem_size) {
+ size_t totalBytes = n * elem_size;
+
+ // Protect against overflow
+ if (n > 1 && elem_size && (totalBytes / elem_size) != n)
+ return 0;
+
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+ if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= totalBytes) // If overflow would occur...
+ return 0;
+
+ totalBytes += sizeof(AllocAlignmentInteger);
+ void* result = do_malloc(totalBytes);
+ if (!result)
+ return 0;
+
+ memset(result, 0, totalBytes);
+ *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
+ result = static_cast<AllocAlignmentInteger*>(result) + 1;
+#else
+ void* result = do_malloc(totalBytes);
+ if (result != NULL) {
+ memset(result, 0, totalBytes);
+ }
+#endif
+
+#ifndef WTF_CHANGES
+ MallocHook::InvokeNewHook(result, totalBytes);
+#endif
+ return result;
+}
+
+// Since cfree isn't used anywhere, we don't compile it in.
+#ifndef WTF_CHANGES
+#ifndef WTF_CHANGES
+extern "C"
+#endif
+void cfree(void* ptr) {
+#ifndef WTF_CHANGES
+ MallocHook::InvokeDeleteHook(ptr);
+#endif
+ do_free(ptr);
+}
+#endif
+
+#ifndef WTF_CHANGES
+extern "C"
+#else
+template <bool crashOnFailure>
+void* realloc(void*, size_t);
+
+void* fastRealloc(void* old_ptr, size_t new_size)
+{
+ return realloc<true>(old_ptr, new_size);
+}
+
+void* tryFastRealloc(void* old_ptr, size_t new_size)
+{
+ return realloc<false>(old_ptr, new_size);
+}
+
+template <bool crashOnFailure>
+ALWAYS_INLINE
+#endif
+void* realloc(void* old_ptr, size_t new_size) {
+ if (old_ptr == NULL) {
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+ void* result = malloc(new_size);
+#else
+ void* result = do_malloc(new_size);
+#ifndef WTF_CHANGES
+ MallocHook::InvokeNewHook(result, new_size);
+#endif
+#endif
+ return result;
+ }
+ if (new_size == 0) {
+#ifndef WTF_CHANGES
+ MallocHook::InvokeDeleteHook(old_ptr);
+#endif
+ free(old_ptr);
+ return NULL;
+ }
+
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+ if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= new_size) // If overflow would occur...
+ return 0;
+ new_size += sizeof(AllocAlignmentInteger);
+ AllocAlignmentInteger* header = Internal::fastMallocMatchValidationValue(old_ptr);
+ if (*header != Internal::AllocTypeMalloc)
+ Internal::fastMallocMatchFailed(old_ptr);
+ old_ptr = header;
+#endif
+
+ // Get the size of the old entry
+ const PageID p = reinterpret_cast<uintptr_t>(old_ptr) >> kPageShift;
+ size_t cl = pageheap->GetSizeClassIfCached(p);
+ Span *span = NULL;
+ size_t old_size;
+ if (cl == 0) {
+ span = pageheap->GetDescriptor(p);
+ cl = span->sizeclass;
+ pageheap->CacheSizeClass(p, cl);
+ }
+ if (cl != 0) {
+ old_size = ByteSizeForClass(cl);
+ } else {
+ ASSERT(span != NULL);
+ old_size = span->length << kPageShift;
+ }
+
+ // Reallocate if the new size is larger than the old size,
+ // or if the new size is significantly smaller than the old size.
+ if ((new_size > old_size) || (AllocationSize(new_size) < old_size)) {
+ // Need to reallocate
+ void* new_ptr = do_malloc(new_size);
+ if (new_ptr == NULL) {
+ return NULL;
+ }
+#ifndef WTF_CHANGES
+ MallocHook::InvokeNewHook(new_ptr, new_size);
+#endif
+ memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size));
+#ifndef WTF_CHANGES
+ MallocHook::InvokeDeleteHook(old_ptr);
+#endif
+ // We could use a variant of do_free() that leverages the fact
+ // that we already know the sizeclass of old_ptr. The benefit
+ // would be small, so don't bother.
+ do_free(old_ptr);
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+ new_ptr = static_cast<AllocAlignmentInteger*>(new_ptr) + 1;
+#endif
+ return new_ptr;
+ } else {
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+ old_ptr = pByte + sizeof(AllocAlignmentInteger); // Set old_ptr back to the user pointer.
+#endif
+ return old_ptr;
+ }
+}
+
+#ifdef WTF_CHANGES
+#undef do_malloc
+#else
+
+static SpinLock set_new_handler_lock = SPINLOCK_INITIALIZER;
+
+static inline void* cpp_alloc(size_t size, bool nothrow) {
+ for (;;) {
+ void* p = do_malloc(size);
+#ifdef PREANSINEW
+ return p;
+#else
+ if (p == NULL) { // allocation failed
+ // Get the current new handler. NB: this function is not
+ // thread-safe. We make a feeble stab at making it so here, but
+ // this lock only protects against tcmalloc interfering with
+ // itself, not with other libraries calling set_new_handler.
+ std::new_handler nh;
+ {
+ SpinLockHolder h(&set_new_handler_lock);
+ nh = std::set_new_handler(0);
+ (void) std::set_new_handler(nh);
+ }
+ // If no new_handler is established, the allocation failed.
+ if (!nh) {
+ if (nothrow) return 0;
+ throw std::bad_alloc();
+ }
+ // Otherwise, try the new_handler. If it returns, retry the
+ // allocation. If it throws std::bad_alloc, fail the allocation.
+ // if it throws something else, don't interfere.
+ try {
+ (*nh)();
+ } catch (const std::bad_alloc&) {
+ if (!nothrow) throw;
+ return p;
+ }
+ } else { // allocation success
+ return p;
+ }
+#endif
+ }
+}
+
+void* operator new(size_t size) {
+ void* p = cpp_alloc(size, false);
+ // We keep this next instruction out of cpp_alloc for a reason: when
+ // it's in, and new just calls cpp_alloc, the optimizer may fold the
+ // new call into cpp_alloc, which messes up our whole section-based
+ // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
+ // isn't the last thing this fn calls, and prevents the folding.
+ MallocHook::InvokeNewHook(p, size);
+ return p;
+}
+
+void* operator new(size_t size, const std::nothrow_t&) __THROW {
+ void* p = cpp_alloc(size, true);
+ MallocHook::InvokeNewHook(p, size);
+ return p;
+}
+
+void operator delete(void* p) __THROW {
+ MallocHook::InvokeDeleteHook(p);
+ do_free(p);
+}
+
+void operator delete(void* p, const std::nothrow_t&) __THROW {
+ MallocHook::InvokeDeleteHook(p);
+ do_free(p);
+}
+
+void* operator new[](size_t size) {
+ void* p = cpp_alloc(size, false);
+ // We keep this next instruction out of cpp_alloc for a reason: when
+ // it's in, and new just calls cpp_alloc, the optimizer may fold the
+ // new call into cpp_alloc, which messes up our whole section-based
+ // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
+ // isn't the last thing this fn calls, and prevents the folding.
+ MallocHook::InvokeNewHook(p, size);
+ return p;
+}
+
+void* operator new[](size_t size, const std::nothrow_t&) __THROW {
+ void* p = cpp_alloc(size, true);
+ MallocHook::InvokeNewHook(p, size);
+ return p;
+}
+
+void operator delete[](void* p) __THROW {
+ MallocHook::InvokeDeleteHook(p);
+ do_free(p);
+}
+
+void operator delete[](void* p, const std::nothrow_t&) __THROW {
+ MallocHook::InvokeDeleteHook(p);
+ do_free(p);
+}
+
+extern "C" void* memalign(size_t align, size_t size) __THROW {
+ void* result = do_memalign(align, size);
+ MallocHook::InvokeNewHook(result, size);
+ return result;
+}
+
+extern "C" int posix_memalign(void** result_ptr, size_t align, size_t size)
+ __THROW {
+ if (((align % sizeof(void*)) != 0) ||
+ ((align & (align - 1)) != 0) ||
+ (align == 0)) {
+ return EINVAL;
+ }
+
+ void* result = do_memalign(align, size);
+ MallocHook::InvokeNewHook(result, size);
+ if (result == NULL) {
+ return ENOMEM;
+ } else {
+ *result_ptr = result;
+ return 0;
+ }
+}
+
+static size_t pagesize = 0;
+
+extern "C" void* valloc(size_t size) __THROW {
+ // Allocate page-aligned object of length >= size bytes
+ if (pagesize == 0) pagesize = getpagesize();
+ void* result = do_memalign(pagesize, size);
+ MallocHook::InvokeNewHook(result, size);
+ return result;
+}
+
+extern "C" void* pvalloc(size_t size) __THROW {
+ // Round up size to a multiple of pagesize
+ if (pagesize == 0) pagesize = getpagesize();
+ size = (size + pagesize - 1) & ~(pagesize - 1);
+ void* result = do_memalign(pagesize, size);
+ MallocHook::InvokeNewHook(result, size);
+ return result;
+}
+
+extern "C" void malloc_stats(void) {
+ do_malloc_stats();
+}
+
+extern "C" int mallopt(int cmd, int value) {
+ return do_mallopt(cmd, value);
+}
+
+#ifdef HAVE_STRUCT_MALLINFO
+extern "C" struct mallinfo mallinfo(void) {
+ return do_mallinfo();
+}
+#endif
+
+//-------------------------------------------------------------------
+// Some library routines on RedHat 9 allocate memory using malloc()
+// and free it using __libc_free() (or vice-versa). Since we provide
+// our own implementations of malloc/free, we need to make sure that
+// the __libc_XXX variants (defined as part of glibc) also point to
+// the same implementations.
+//-------------------------------------------------------------------
+
+#if defined(__GLIBC__)
+extern "C" {
+#if COMPILER(GCC) && !defined(__MACH__) && defined(HAVE___ATTRIBUTE__)
+ // Potentially faster variants that use the gcc alias extension.
+ // Mach-O (Darwin) does not support weak aliases, hence the __MACH__ check.
+# define ALIAS(x) __attribute__ ((weak, alias (x)))
+ void* __libc_malloc(size_t size) ALIAS("malloc");
+ void __libc_free(void* ptr) ALIAS("free");
+ void* __libc_realloc(void* ptr, size_t size) ALIAS("realloc");
+ void* __libc_calloc(size_t n, size_t size) ALIAS("calloc");
+ void __libc_cfree(void* ptr) ALIAS("cfree");
+ void* __libc_memalign(size_t align, size_t s) ALIAS("memalign");
+ void* __libc_valloc(size_t size) ALIAS("valloc");
+ void* __libc_pvalloc(size_t size) ALIAS("pvalloc");
+ int __posix_memalign(void** r, size_t a, size_t s) ALIAS("posix_memalign");
+# undef ALIAS
+# else /* not __GNUC__ */
+ // Portable wrappers
+ void* __libc_malloc(size_t size) { return malloc(size); }
+ void __libc_free(void* ptr) { free(ptr); }
+ void* __libc_realloc(void* ptr, size_t size) { return realloc(ptr, size); }
+ void* __libc_calloc(size_t n, size_t size) { return calloc(n, size); }
+ void __libc_cfree(void* ptr) { cfree(ptr); }
+ void* __libc_memalign(size_t align, size_t s) { return memalign(align, s); }
+ void* __libc_valloc(size_t size) { return valloc(size); }
+ void* __libc_pvalloc(size_t size) { return pvalloc(size); }
+ int __posix_memalign(void** r, size_t a, size_t s) {
+ return posix_memalign(r, a, s);
+ }
+# endif /* __GNUC__ */
+}
+#endif /* __GLIBC__ */
+
+// Override __libc_memalign in libc on linux boxes specially.
+// They have a bug in libc that causes them to (very rarely) allocate
+// with __libc_memalign() yet deallocate with free() and the
+// definitions above don't catch it.
+// This function is an exception to the rule of calling MallocHook method
+// from the stack frame of the allocation function;
+// heap-checker handles this special case explicitly.
+static void *MemalignOverride(size_t align, size_t size, const void *caller)
+ __THROW {
+ void* result = do_memalign(align, size);
+ MallocHook::InvokeNewHook(result, size);
+ return result;
+}
+void *(*__memalign_hook)(size_t, size_t, const void *) = MemalignOverride;
+
+#endif
+
+#if defined(WTF_CHANGES) && PLATFORM(DARWIN)
+
+class FreeObjectFinder {
+ const RemoteMemoryReader& m_reader;
+ HashSet<void*> m_freeObjects;
+
+public:
+ FreeObjectFinder(const RemoteMemoryReader& reader) : m_reader(reader) { }
+
+ void visit(void* ptr) { m_freeObjects.add(ptr); }
+ bool isFreeObject(void* ptr) const { return m_freeObjects.contains(ptr); }
+ bool isFreeObject(vm_address_t ptr) const { return isFreeObject(reinterpret_cast<void*>(ptr)); }
+ size_t freeObjectCount() const { return m_freeObjects.size(); }
+
+ void findFreeObjects(TCMalloc_ThreadCache* threadCache)
+ {
+ for (; threadCache; threadCache = (threadCache->next_ ? m_reader(threadCache->next_) : 0))
+ threadCache->enumerateFreeObjects(*this, m_reader);
+ }
+
+ void findFreeObjects(TCMalloc_Central_FreeListPadded* centralFreeList, size_t numSizes, TCMalloc_Central_FreeListPadded* remoteCentralFreeList)
+ {
+ for (unsigned i = 0; i < numSizes; i++)
+ centralFreeList[i].enumerateFreeObjects(*this, m_reader, remoteCentralFreeList + i);
+ }
+};
+
+class PageMapFreeObjectFinder {
+ const RemoteMemoryReader& m_reader;
+ FreeObjectFinder& m_freeObjectFinder;
+
+public:
+ PageMapFreeObjectFinder(const RemoteMemoryReader& reader, FreeObjectFinder& freeObjectFinder)
+ : m_reader(reader)
+ , m_freeObjectFinder(freeObjectFinder)
+ { }
+
+ int visit(void* ptr) const
+ {
+ if (!ptr)
+ return 1;
+
+ Span* span = m_reader(reinterpret_cast<Span*>(ptr));
+ if (span->free) {
+ void* ptr = reinterpret_cast<void*>(span->start << kPageShift);
+ m_freeObjectFinder.visit(ptr);
+ } else if (span->sizeclass) {
+ // Walk the free list of the small-object span, keeping track of each object seen
+ for (void* nextObject = span->objects; nextObject; nextObject = *m_reader(reinterpret_cast<void**>(nextObject)))
+ m_freeObjectFinder.visit(nextObject);
+ }
+ return span->length;
+ }
+};
+
+class PageMapMemoryUsageRecorder {
+ task_t m_task;
+ void* m_context;
+ unsigned m_typeMask;
+ vm_range_recorder_t* m_recorder;
+ const RemoteMemoryReader& m_reader;
+ const FreeObjectFinder& m_freeObjectFinder;
+
+ HashSet<void*> m_seenPointers;
+ Vector<Span*> m_coalescedSpans;
+
+public:
+ PageMapMemoryUsageRecorder(task_t task, void* context, unsigned typeMask, vm_range_recorder_t* recorder, const RemoteMemoryReader& reader, const FreeObjectFinder& freeObjectFinder)
+ : m_task(task)
+ , m_context(context)
+ , m_typeMask(typeMask)
+ , m_recorder(recorder)
+ , m_reader(reader)
+ , m_freeObjectFinder(freeObjectFinder)
+ { }
+
+ ~PageMapMemoryUsageRecorder()
+ {
+ ASSERT(!m_coalescedSpans.size());
+ }
+
+ void recordPendingRegions()
+ {
+ Span* lastSpan = m_coalescedSpans[m_coalescedSpans.size() - 1];
+ vm_range_t ptrRange = { m_coalescedSpans[0]->start << kPageShift, 0 };
+ ptrRange.size = (lastSpan->start << kPageShift) - ptrRange.address + (lastSpan->length * kPageSize);
+
+ // Mark the memory region the spans represent as a candidate for containing pointers
+ if (m_typeMask & MALLOC_PTR_REGION_RANGE_TYPE)
+ (*m_recorder)(m_task, m_context, MALLOC_PTR_REGION_RANGE_TYPE, &ptrRange, 1);
+
+ if (!(m_typeMask & MALLOC_PTR_IN_USE_RANGE_TYPE)) {
+ m_coalescedSpans.clear();
+ return;
+ }
+
+ Vector<vm_range_t, 1024> allocatedPointers;
+ for (size_t i = 0; i < m_coalescedSpans.size(); ++i) {
+ Span *theSpan = m_coalescedSpans[i];
+ if (theSpan->free)
+ continue;
+
+ vm_address_t spanStartAddress = theSpan->start << kPageShift;
+ vm_size_t spanSizeInBytes = theSpan->length * kPageSize;
+
+ if (!theSpan->sizeclass) {
+ // If it's an allocated large object span, mark it as in use
+ if (!m_freeObjectFinder.isFreeObject(spanStartAddress))
+ allocatedPointers.append((vm_range_t){spanStartAddress, spanSizeInBytes});
+ } else {
+ const size_t objectSize = ByteSizeForClass(theSpan->sizeclass);
+
+ // Mark each allocated small object within the span as in use
+ const vm_address_t endOfSpan = spanStartAddress + spanSizeInBytes;
+ for (vm_address_t object = spanStartAddress; object + objectSize <= endOfSpan; object += objectSize) {
+ if (!m_freeObjectFinder.isFreeObject(object))
+ allocatedPointers.append((vm_range_t){object, objectSize});
+ }
+ }
+ }
+
+ (*m_recorder)(m_task, m_context, MALLOC_PTR_IN_USE_RANGE_TYPE, allocatedPointers.data(), allocatedPointers.size());
+
+ m_coalescedSpans.clear();
+ }
+
+ int visit(void* ptr)
+ {
+ if (!ptr)
+ return 1;
+
+ Span* span = m_reader(reinterpret_cast<Span*>(ptr));
+ if (!span->start)
+ return 1;
+
+ if (m_seenPointers.contains(ptr))
+ return span->length;
+ m_seenPointers.add(ptr);
+
+ if (!m_coalescedSpans.size()) {
+ m_coalescedSpans.append(span);
+ return span->length;
+ }
+
+ Span* previousSpan = m_coalescedSpans[m_coalescedSpans.size() - 1];
+ vm_address_t previousSpanStartAddress = previousSpan->start << kPageShift;
+ vm_size_t previousSpanSizeInBytes = previousSpan->length * kPageSize;
+
+ // If the new span is adjacent to the previous span, do nothing for now.
+ vm_address_t spanStartAddress = span->start << kPageShift;
+ if (spanStartAddress == previousSpanStartAddress + previousSpanSizeInBytes) {
+ m_coalescedSpans.append(span);
+ return span->length;
+ }
+
+ // New span is not adjacent to previous span, so record the spans coalesced so far.
+ recordPendingRegions();
+ m_coalescedSpans.append(span);
+
+ return span->length;
+ }
+};
+
+class AdminRegionRecorder {
+ task_t m_task;
+ void* m_context;
+ unsigned m_typeMask;
+ vm_range_recorder_t* m_recorder;
+ const RemoteMemoryReader& m_reader;
+
+ Vector<vm_range_t, 1024> m_pendingRegions;
+
+public:
+ AdminRegionRecorder(task_t task, void* context, unsigned typeMask, vm_range_recorder_t* recorder, const RemoteMemoryReader& reader)
+ : m_task(task)
+ , m_context(context)
+ , m_typeMask(typeMask)
+ , m_recorder(recorder)
+ , m_reader(reader)
+ { }
+
+ void recordRegion(vm_address_t ptr, size_t size)
+ {
+ if (m_typeMask & MALLOC_ADMIN_REGION_RANGE_TYPE)
+ m_pendingRegions.append((vm_range_t){ ptr, size });
+ }
+
+ void visit(void *ptr, size_t size)
+ {
+ recordRegion(reinterpret_cast<vm_address_t>(ptr), size);
+ }
+
+ void recordPendingRegions()
+ {
+ if (m_pendingRegions.size()) {
+ (*m_recorder)(m_task, m_context, MALLOC_ADMIN_REGION_RANGE_TYPE, m_pendingRegions.data(), m_pendingRegions.size());
+ m_pendingRegions.clear();
+ }
+ }
+
+ ~AdminRegionRecorder()
+ {
+ ASSERT(!m_pendingRegions.size());
+ }
+};
+
+kern_return_t FastMallocZone::enumerate(task_t task, void* context, unsigned typeMask, vm_address_t zoneAddress, memory_reader_t reader, vm_range_recorder_t recorder)
+{
+ RemoteMemoryReader memoryReader(task, reader);
+
+ InitSizeClasses();
+
+ FastMallocZone* mzone = memoryReader(reinterpret_cast<FastMallocZone*>(zoneAddress));
+ TCMalloc_PageHeap* pageHeap = memoryReader(mzone->m_pageHeap);
+ TCMalloc_ThreadCache** threadHeapsPointer = memoryReader(mzone->m_threadHeaps);
+ TCMalloc_ThreadCache* threadHeaps = memoryReader(*threadHeapsPointer);
+
+ TCMalloc_Central_FreeListPadded* centralCaches = memoryReader(mzone->m_centralCaches, sizeof(TCMalloc_Central_FreeListPadded) * kNumClasses);
+
+ FreeObjectFinder finder(memoryReader);
+ finder.findFreeObjects(threadHeaps);
+ finder.findFreeObjects(centralCaches, kNumClasses, mzone->m_centralCaches);
+
+ TCMalloc_PageHeap::PageMap* pageMap = &pageHeap->pagemap_;
+ PageMapFreeObjectFinder pageMapFinder(memoryReader, finder);
+ pageMap->visitValues(pageMapFinder, memoryReader);
+
+ PageMapMemoryUsageRecorder usageRecorder(task, context, typeMask, recorder, memoryReader, finder);
+ pageMap->visitValues(usageRecorder, memoryReader);
+ usageRecorder.recordPendingRegions();
+
+ AdminRegionRecorder adminRegionRecorder(task, context, typeMask, recorder, memoryReader);
+ pageMap->visitAllocations(adminRegionRecorder, memoryReader);
+
+ PageHeapAllocator<Span>* spanAllocator = memoryReader(mzone->m_spanAllocator);
+ PageHeapAllocator<TCMalloc_ThreadCache>* pageHeapAllocator = memoryReader(mzone->m_pageHeapAllocator);
+
+ spanAllocator->recordAdministrativeRegions(adminRegionRecorder, memoryReader);
+ pageHeapAllocator->recordAdministrativeRegions(adminRegionRecorder, memoryReader);
+
+ adminRegionRecorder.recordPendingRegions();
+
+ return 0;
+}
+
+size_t FastMallocZone::size(malloc_zone_t*, const void*)
+{
+ return 0;
+}
+
+void* FastMallocZone::zoneMalloc(malloc_zone_t*, size_t)
+{
+ return 0;
+}
+
+void* FastMallocZone::zoneCalloc(malloc_zone_t*, size_t, size_t)
+{
+ return 0;
+}
+
+void FastMallocZone::zoneFree(malloc_zone_t*, void* ptr)
+{
+ // Due to <rdar://problem/5671357> zoneFree may be called by the system free even if the pointer
+ // is not in this zone. When this happens, the pointer being freed was not allocated by any
+ // zone so we need to print a useful error for the application developer.
+ malloc_printf("*** error for object %p: pointer being freed was not allocated\n", ptr);
+}
+
+void* FastMallocZone::zoneRealloc(malloc_zone_t*, void*, size_t)
+{
+ return 0;
+}
+
+
+#undef malloc
+#undef free
+#undef realloc
+#undef calloc
+
+extern "C" {
+malloc_introspection_t jscore_fastmalloc_introspection = { &FastMallocZone::enumerate, &FastMallocZone::goodSize, &FastMallocZone::check, &FastMallocZone::print,
+ &FastMallocZone::log, &FastMallocZone::forceLock, &FastMallocZone::forceUnlock, &FastMallocZone::statistics
+
+#if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) && !PLATFORM(IPHONE)
+ , 0 // zone_locked will not be called on the zone unless it advertises itself as version five or higher.
+#endif
+
+ };
+}
+
+FastMallocZone::FastMallocZone(TCMalloc_PageHeap* pageHeap, TCMalloc_ThreadCache** threadHeaps, TCMalloc_Central_FreeListPadded* centralCaches, PageHeapAllocator<Span>* spanAllocator, PageHeapAllocator<TCMalloc_ThreadCache>* pageHeapAllocator)
+ : m_pageHeap(pageHeap)
+ , m_threadHeaps(threadHeaps)
+ , m_centralCaches(centralCaches)
+ , m_spanAllocator(spanAllocator)
+ , m_pageHeapAllocator(pageHeapAllocator)
+{
+ memset(&m_zone, 0, sizeof(m_zone));
+ m_zone.version = 4;
+ m_zone.zone_name = "JavaScriptCore FastMalloc";
+ m_zone.size = &FastMallocZone::size;
+ m_zone.malloc = &FastMallocZone::zoneMalloc;
+ m_zone.calloc = &FastMallocZone::zoneCalloc;
+ m_zone.realloc = &FastMallocZone::zoneRealloc;
+ m_zone.free = &FastMallocZone::zoneFree;
+ m_zone.valloc = &FastMallocZone::zoneValloc;
+ m_zone.destroy = &FastMallocZone::zoneDestroy;
+ m_zone.introspect = &jscore_fastmalloc_introspection;
+ malloc_zone_register(&m_zone);
+}
+
+
+void FastMallocZone::init()
+{
+ static FastMallocZone zone(pageheap, &thread_heaps, static_cast<TCMalloc_Central_FreeListPadded*>(central_cache), &span_allocator, &threadheap_allocator);
+}
+
+#endif
+
+#if WTF_CHANGES
+void releaseFastMallocFreeMemory()
+{
+ // Flush free pages in the current thread cache back to the page heap.
+ // Low watermark mechanism in Scavenge() prevents full return on the first pass.
+ // The second pass flushes everything.
+ if (TCMalloc_ThreadCache* threadCache = TCMalloc_ThreadCache::GetCacheIfPresent()) {
+ threadCache->Scavenge();
+ threadCache->Scavenge();
+ }
+
+ SpinLockHolder h(&pageheap_lock);
+ pageheap->ReleaseFreePages();
+}
+
+FastMallocStatistics fastMallocStatistics()
+{
+ FastMallocStatistics statistics;
+ {
+ SpinLockHolder lockHolder(&pageheap_lock);
+ statistics.heapSize = static_cast<size_t>(pageheap->SystemBytes());
+ statistics.freeSizeInHeap = static_cast<size_t>(pageheap->FreeBytes());
+ statistics.returnedSize = pageheap->ReturnedBytes();
+ statistics.freeSizeInCaches = 0;
+ for (TCMalloc_ThreadCache* threadCache = thread_heaps; threadCache ; threadCache = threadCache->next_)
+ statistics.freeSizeInCaches += threadCache->Size();
+ }
+ for (unsigned cl = 0; cl < kNumClasses; ++cl) {
+ const int length = central_cache[cl].length();
+ const int tc_length = central_cache[cl].tc_length();
+ statistics.freeSizeInCaches += ByteSizeForClass(cl) * (length + tc_length);
+ }
+ return statistics;
+}
+
+} // namespace WTF
+#endif
+
+#endif // FORCE_SYSTEM_MALLOC
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/FastMalloc.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/FastMalloc.h
new file mode 100644
index 0000000..61ebe32
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/FastMalloc.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef WTF_FastMalloc_h
+#define WTF_FastMalloc_h
+
+#include "Platform.h"
+#include <stdlib.h>
+#include <new>
+
+namespace WTF {
+
+ // These functions call CRASH() if an allocation fails.
+ void* fastMalloc(size_t n);
+ void* fastZeroedMalloc(size_t n);
+ void* fastCalloc(size_t n_elements, size_t element_size);
+ void* fastRealloc(void* p, size_t n);
+
+ // These functions return NULL if an allocation fails.
+ void* tryFastMalloc(size_t n);
+ void* tryFastZeroedMalloc(size_t n);
+ void* tryFastCalloc(size_t n_elements, size_t element_size);
+ void* tryFastRealloc(void* p, size_t n);
+
+ void fastFree(void* p);
+
+#ifndef NDEBUG
+ void fastMallocForbid();
+ void fastMallocAllow();
+#endif
+
+ void releaseFastMallocFreeMemory();
+
+ struct FastMallocStatistics {
+ size_t heapSize;
+ size_t freeSizeInHeap;
+ size_t freeSizeInCaches;
+ size_t returnedSize;
+ };
+ FastMallocStatistics fastMallocStatistics();
+
+ // This defines a type which holds an unsigned integer and is the same
+ // size as the minimally aligned memory allocation.
+ typedef unsigned long long AllocAlignmentInteger;
+
+ namespace Internal {
+ enum AllocType { // Start with an unusual number instead of zero, because zero is common.
+ AllocTypeMalloc = 0x375d6750, // Encompasses fastMalloc, fastZeroedMalloc, fastCalloc, fastRealloc.
+ AllocTypeClassNew, // Encompasses class operator new from FastAllocBase.
+ AllocTypeClassNewArray, // Encompasses class operator new[] from FastAllocBase.
+ AllocTypeFastNew, // Encompasses fastNew.
+ AllocTypeFastNewArray, // Encompasses fastNewArray.
+ AllocTypeNew, // Encompasses global operator new.
+ AllocTypeNewArray // Encompasses global operator new[].
+ };
+ }
+
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+
+ // Malloc validation is a scheme whereby a tag is attached to an
+ // allocation which identifies how it was originally allocated.
+ // This allows us to verify that the freeing operation matches the
+ // allocation operation. If memory is allocated with operator new[]
+ // but freed with free or delete, this system would detect that.
+ // In the implementation here, the tag is an integer prepended to
+ // the allocation memory which is assigned one of the AllocType
+ // enumeration values. An alternative implementation of this
+ // scheme could store the tag somewhere else or ignore it.
+ // Users of FastMalloc don't need to know or care how this tagging
+ // is implemented.
+
+ namespace Internal {
+
+ // Return the AllocType tag associated with the allocated block p.
+ inline AllocType fastMallocMatchValidationType(const void* p)
+ {
+ const AllocAlignmentInteger* type = static_cast<const AllocAlignmentInteger*>(p) - 1;
+ return static_cast<AllocType>(*type);
+ }
+
+ // Return the address of the AllocType tag associated with the allocated block p.
+ inline AllocAlignmentInteger* fastMallocMatchValidationValue(void* p)
+ {
+ return reinterpret_cast<AllocAlignmentInteger*>(static_cast<char*>(p) - sizeof(AllocAlignmentInteger));
+ }
+
+ // Set the AllocType tag to be associaged with the allocated block p.
+ inline void setFastMallocMatchValidationType(void* p, AllocType allocType)
+ {
+ AllocAlignmentInteger* type = static_cast<AllocAlignmentInteger*>(p) - 1;
+ *type = static_cast<AllocAlignmentInteger>(allocType);
+ }
+
+ // Handle a detected alloc/free mismatch. By default this calls CRASH().
+ void fastMallocMatchFailed(void* p);
+
+ } // namespace Internal
+
+ // This is a higher level function which is used by FastMalloc-using code.
+ inline void fastMallocMatchValidateMalloc(void* p, Internal::AllocType allocType)
+ {
+ if (!p)
+ return;
+
+ Internal::setFastMallocMatchValidationType(p, allocType);
+ }
+
+ // This is a higher level function which is used by FastMalloc-using code.
+ inline void fastMallocMatchValidateFree(void* p, Internal::AllocType allocType)
+ {
+ if (!p)
+ return;
+
+ if (Internal::fastMallocMatchValidationType(p) != allocType)
+ Internal::fastMallocMatchFailed(p);
+ Internal::setFastMallocMatchValidationType(p, Internal::AllocTypeMalloc); // Set it to this so that fastFree thinks it's OK.
+ }
+
+#else
+
+ inline void fastMallocMatchValidateMalloc(void*, Internal::AllocType)
+ {
+ }
+
+ inline void fastMallocMatchValidateFree(void*, Internal::AllocType)
+ {
+ }
+
+#endif
+
+} // namespace WTF
+
+using WTF::fastMalloc;
+using WTF::fastZeroedMalloc;
+using WTF::fastCalloc;
+using WTF::fastRealloc;
+using WTF::tryFastMalloc;
+using WTF::tryFastZeroedMalloc;
+using WTF::tryFastCalloc;
+using WTF::tryFastRealloc;
+using WTF::fastFree;
+
+#ifndef NDEBUG
+using WTF::fastMallocForbid;
+using WTF::fastMallocAllow;
+#endif
+
+#if COMPILER(GCC) && PLATFORM(DARWIN)
+#define WTF_PRIVATE_INLINE __private_extern__ inline __attribute__((always_inline))
+#elif COMPILER(GCC)
+#define WTF_PRIVATE_INLINE inline __attribute__((always_inline))
+#elif COMPILER(MSVC)
+#define WTF_PRIVATE_INLINE __forceinline
+#else
+#define WTF_PRIVATE_INLINE inline
+#endif
+
+#ifndef _CRTDBG_MAP_ALLOC
+
+#if !defined(USE_SYSTEM_MALLOC) || !(USE_SYSTEM_MALLOC)
+WTF_PRIVATE_INLINE void* operator new(size_t s) { return fastMalloc(s); }
+WTF_PRIVATE_INLINE void operator delete(void* p) { fastFree(p); }
+WTF_PRIVATE_INLINE void* operator new[](size_t s) { return fastMalloc(s); }
+WTF_PRIVATE_INLINE void operator delete[](void* p) { fastFree(p); }
+
+#if PLATFORM(WINCE)
+WTF_PRIVATE_INLINE void* operator new(size_t s, const std::nothrow_t&) throw() { return fastMalloc(s); }
+WTF_PRIVATE_INLINE void operator delete(void* p, const std::nothrow_t&) throw() { fastFree(p); }
+WTF_PRIVATE_INLINE void* operator new[](size_t s, const std::nothrow_t&) throw() { return fastMalloc(s); }
+WTF_PRIVATE_INLINE void operator delete[](void* p, const std::nothrow_t&) throw() { fastFree(p); }
+#endif
+#endif
+
+#endif // _CRTDBG_MAP_ALLOC
+
+#endif /* WTF_FastMalloc_h */
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Forward.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Forward.h
new file mode 100644
index 0000000..67dc3be
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Forward.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2006 Apple Computer, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef WTF_Forward_h
+#define WTF_Forward_h
+
+#include <stddef.h>
+
+namespace WTF {
+ template<typename T> class ListRefPtr;
+ template<typename T> class OwnArrayPtr;
+ template<typename T> class OwnPtr;
+ template<typename T> class PassRefPtr;
+ template<typename T> class RefPtr;
+ template<typename T, size_t inlineCapacity> class Vector;
+}
+
+using WTF::ListRefPtr;
+using WTF::OwnArrayPtr;
+using WTF::OwnPtr;
+using WTF::PassRefPtr;
+using WTF::RefPtr;
+using WTF::Vector;
+
+#endif // WTF_Forward_h
+
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/GOwnPtr.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/GOwnPtr.cpp
new file mode 100644
index 0000000..432885f
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/GOwnPtr.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2008 Collabora Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+#include "GOwnPtr.h"
+
+namespace WTF {
+
+template <> void freeOwnedGPtr<GError>(GError* ptr)
+{
+ if (ptr)
+ g_error_free(ptr);
+}
+
+template <> void freeOwnedGPtr<GList>(GList* ptr)
+{
+ g_list_free(ptr);
+}
+
+template <> void freeOwnedGPtr<GCond>(GCond* ptr)
+{
+ if (ptr)
+ g_cond_free(ptr);
+}
+
+template <> void freeOwnedGPtr<GMutex>(GMutex* ptr)
+{
+ if (ptr)
+ g_mutex_free(ptr);
+}
+
+template <> void freeOwnedGPtr<GPatternSpec>(GPatternSpec* ptr)
+{
+ if (ptr)
+ g_pattern_spec_free(ptr);
+}
+
+template <> void freeOwnedGPtr<GDir>(GDir* ptr)
+{
+ if (ptr)
+ g_dir_close(ptr);
+}
+
+template <> void freeOwnedGPtr<GHashTable>(GHashTable* ptr)
+{
+ if (ptr)
+ g_hash_table_unref(ptr);
+}
+
+} // namespace WTF
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/GOwnPtr.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/GOwnPtr.h
new file mode 100644
index 0000000..4993348
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/GOwnPtr.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2006, 2007 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Collabora Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef GOwnPtr_h
+#define GOwnPtr_h
+
+#include <algorithm>
+#include <glib.h>
+#include <wtf/Assertions.h>
+#include <wtf/Noncopyable.h>
+
+namespace WTF {
+ template <typename T> inline void freeOwnedGPtr(T* ptr) { g_free(reinterpret_cast<void*>(ptr)); }
+ template<> void freeOwnedGPtr<GError>(GError*);
+ template<> void freeOwnedGPtr<GList>(GList*);
+ template<> void freeOwnedGPtr<GCond>(GCond*);
+ template<> void freeOwnedGPtr<GMutex>(GMutex*);
+ template<> void freeOwnedGPtr<GPatternSpec>(GPatternSpec*);
+ template<> void freeOwnedGPtr<GDir>(GDir*);
+ template<> void freeOwnedGPtr<GHashTable>(GHashTable*);
+
+ template <typename T> class GOwnPtr : public Noncopyable {
+ public:
+ explicit GOwnPtr(T* ptr = 0) : m_ptr(ptr) { }
+ ~GOwnPtr() { freeOwnedGPtr(m_ptr); }
+
+ T* get() const { return m_ptr; }
+ T* release() { T* ptr = m_ptr; m_ptr = 0; return ptr; }
+ T*& outPtr() { ASSERT(!m_ptr); return m_ptr; }
+
+ void set(T* ptr) { ASSERT(!ptr || m_ptr != ptr); freeOwnedGPtr(m_ptr); m_ptr = ptr; }
+ void clear() { freeOwnedGPtr(m_ptr); m_ptr = 0; }
+
+ T& operator*() const { ASSERT(m_ptr); return *m_ptr; }
+ T* operator->() const { ASSERT(m_ptr); return m_ptr; }
+
+ bool operator!() const { return !m_ptr; }
+
+ // This conversion operator allows implicit conversion to bool but not to other integer types.
+ typedef T* GOwnPtr::*UnspecifiedBoolType;
+ operator UnspecifiedBoolType() const { return m_ptr ? &GOwnPtr::m_ptr : 0; }
+
+ void swap(GOwnPtr& o) { std::swap(m_ptr, o.m_ptr); }
+
+ private:
+ T* m_ptr;
+ };
+
+ template <typename T> inline void swap(GOwnPtr<T>& a, GOwnPtr<T>& b) { a.swap(b); }
+
+ template <typename T, typename U> inline bool operator==(const GOwnPtr<T>& a, U* b)
+ {
+ return a.get() == b;
+ }
+
+ template <typename T, typename U> inline bool operator==(T* a, const GOwnPtr<U>& b)
+ {
+ return a == b.get();
+ }
+
+ template <typename T, typename U> inline bool operator!=(const GOwnPtr<T>& a, U* b)
+ {
+ return a.get() != b;
+ }
+
+ template <typename T, typename U> inline bool operator!=(T* a, const GOwnPtr<U>& b)
+ {
+ return a != b.get();
+ }
+
+ template <typename T> inline typename GOwnPtr<T>::PtrType getPtr(const GOwnPtr<T>& p)
+ {
+ return p.get();
+ }
+
+} // namespace WTF
+
+using WTF::GOwnPtr;
+
+#endif // GOwnPtr_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/GetPtr.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/GetPtr.h
new file mode 100644
index 0000000..25a0e6d
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/GetPtr.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2006 Apple Computer, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef WTF_GetPtr_h
+#define WTF_GetPtr_h
+
+namespace WTF {
+
+ template <typename T> inline T* getPtr(T* p)
+ {
+ return p;
+ }
+
+} // namespace WTF
+
+#endif // WTF_GetPtr_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashCountedSet.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashCountedSet.h
new file mode 100644
index 0000000..1a422d8
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashCountedSet.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) 2005, 2006, 2008 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef WTF_HashCountedSet_h
+#define WTF_HashCountedSet_h
+
+#include "Assertions.h"
+#include "FastAllocBase.h"
+#include "HashMap.h"
+#include "Vector.h"
+
+namespace WTF {
+
+ template<typename Value, typename HashFunctions = typename DefaultHash<Value>::Hash,
+ typename Traits = HashTraits<Value> > class HashCountedSet : public FastAllocBase {
+ private:
+ typedef HashMap<Value, unsigned, HashFunctions, Traits> ImplType;
+ public:
+ typedef Value ValueType;
+ typedef typename ImplType::iterator iterator;
+ typedef typename ImplType::const_iterator const_iterator;
+
+ HashCountedSet() {}
+
+ int size() const;
+ int capacity() const;
+ bool isEmpty() const;
+
+ // iterators iterate over pairs of values and counts
+ iterator begin();
+ iterator end();
+ const_iterator begin() const;
+ const_iterator end() const;
+
+ iterator find(const ValueType& value);
+ const_iterator find(const ValueType& value) const;
+ bool contains(const ValueType& value) const;
+ unsigned count(const ValueType& value) const;
+
+ // increases the count if an equal value is already present
+ // the return value is a pair of an interator to the new value's location,
+ // and a bool that is true if an new entry was added
+ std::pair<iterator, bool> add(const ValueType &value);
+
+ // reduces the count of the value, and removes it if count
+ // goes down to zero
+ void remove(const ValueType& value);
+ void remove(iterator it);
+
+ void clear();
+
+ private:
+ ImplType m_impl;
+ };
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ inline int HashCountedSet<Value, HashFunctions, Traits>::size() const
+ {
+ return m_impl.size();
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ inline int HashCountedSet<Value, HashFunctions, Traits>::capacity() const
+ {
+ return m_impl.capacity();
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ inline bool HashCountedSet<Value, HashFunctions, Traits>::isEmpty() const
+ {
+ return size() == 0;
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ inline typename HashCountedSet<Value, HashFunctions, Traits>::iterator HashCountedSet<Value, HashFunctions, Traits>::begin()
+ {
+ return m_impl.begin();
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ inline typename HashCountedSet<Value, HashFunctions, Traits>::iterator HashCountedSet<Value, HashFunctions, Traits>::end()
+ {
+ return m_impl.end();
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ inline typename HashCountedSet<Value, HashFunctions, Traits>::const_iterator HashCountedSet<Value, HashFunctions, Traits>::begin() const
+ {
+ return m_impl.begin();
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ inline typename HashCountedSet<Value, HashFunctions, Traits>::const_iterator HashCountedSet<Value, HashFunctions, Traits>::end() const
+ {
+ return m_impl.end();
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ inline typename HashCountedSet<Value, HashFunctions, Traits>::iterator HashCountedSet<Value, HashFunctions, Traits>::find(const ValueType& value)
+ {
+ return m_impl.find(value);
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ inline typename HashCountedSet<Value, HashFunctions, Traits>::const_iterator HashCountedSet<Value, HashFunctions, Traits>::find(const ValueType& value) const
+ {
+ return m_impl.find(value);
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ inline bool HashCountedSet<Value, HashFunctions, Traits>::contains(const ValueType& value) const
+ {
+ return m_impl.contains(value);
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ inline unsigned HashCountedSet<Value, HashFunctions, Traits>::count(const ValueType& value) const
+ {
+ return m_impl.get(value);
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ inline std::pair<typename HashCountedSet<Value, HashFunctions, Traits>::iterator, bool> HashCountedSet<Value, HashFunctions, Traits>::add(const ValueType &value)
+ {
+ pair<iterator, bool> result = m_impl.add(value, 0);
+ ++result.first->second;
+ return result;
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ inline void HashCountedSet<Value, HashFunctions, Traits>::remove(const ValueType& value)
+ {
+ remove(find(value));
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ inline void HashCountedSet<Value, HashFunctions, Traits>::remove(iterator it)
+ {
+ if (it == end())
+ return;
+
+ unsigned oldVal = it->second;
+ ASSERT(oldVal != 0);
+ unsigned newVal = oldVal - 1;
+ if (newVal == 0)
+ m_impl.remove(it);
+ else
+ it->second = newVal;
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ inline void HashCountedSet<Value, HashFunctions, Traits>::clear()
+ {
+ m_impl.clear();
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits, typename VectorType>
+ inline void copyToVector(const HashCountedSet<Value, HashFunctions, Traits>& collection, VectorType& vector)
+ {
+ typedef typename HashCountedSet<Value, HashFunctions, Traits>::const_iterator iterator;
+
+ vector.resize(collection.size());
+
+ iterator it = collection.begin();
+ iterator end = collection.end();
+ for (unsigned i = 0; it != end; ++it, ++i)
+ vector[i] = *it;
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ inline void copyToVector(const HashCountedSet<Value, HashFunctions, Traits>& collection, Vector<Value>& vector)
+ {
+ typedef typename HashCountedSet<Value, HashFunctions, Traits>::const_iterator iterator;
+
+ vector.resize(collection.size());
+
+ iterator it = collection.begin();
+ iterator end = collection.end();
+ for (unsigned i = 0; it != end; ++it, ++i)
+ vector[i] = (*it).first;
+ }
+
+
+} // namespace khtml
+
+using WTF::HashCountedSet;
+
+#endif /* WTF_HashCountedSet_h */
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashFunctions.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashFunctions.h
new file mode 100644
index 0000000..13afb72
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashFunctions.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2005, 2006, 2008 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef WTF_HashFunctions_h
+#define WTF_HashFunctions_h
+
+#include "RefPtr.h"
+#include <stdint.h>
+
+namespace WTF {
+
+ template<size_t size> struct IntTypes;
+ template<> struct IntTypes<1> { typedef int8_t SignedType; typedef uint8_t UnsignedType; };
+ template<> struct IntTypes<2> { typedef int16_t SignedType; typedef uint16_t UnsignedType; };
+ template<> struct IntTypes<4> { typedef int32_t SignedType; typedef uint32_t UnsignedType; };
+ template<> struct IntTypes<8> { typedef int64_t SignedType; typedef uint64_t UnsignedType; };
+
+ // integer hash function
+
+ // Thomas Wang's 32 Bit Mix Function: http://www.cris.com/~Ttwang/tech/inthash.htm
+ inline unsigned intHash(uint8_t key8)
+ {
+ unsigned key = key8;
+ key += ~(key << 15);
+ key ^= (key >> 10);
+ key += (key << 3);
+ key ^= (key >> 6);
+ key += ~(key << 11);
+ key ^= (key >> 16);
+ return key;
+ }
+
+ // Thomas Wang's 32 Bit Mix Function: http://www.cris.com/~Ttwang/tech/inthash.htm
+ inline unsigned intHash(uint16_t key16)
+ {
+ unsigned key = key16;
+ key += ~(key << 15);
+ key ^= (key >> 10);
+ key += (key << 3);
+ key ^= (key >> 6);
+ key += ~(key << 11);
+ key ^= (key >> 16);
+ return key;
+ }
+
+ // Thomas Wang's 32 Bit Mix Function: http://www.cris.com/~Ttwang/tech/inthash.htm
+ inline unsigned intHash(uint32_t key)
+ {
+ key += ~(key << 15);
+ key ^= (key >> 10);
+ key += (key << 3);
+ key ^= (key >> 6);
+ key += ~(key << 11);
+ key ^= (key >> 16);
+ return key;
+ }
+
+ // Thomas Wang's 64 bit Mix Function: http://www.cris.com/~Ttwang/tech/inthash.htm
+ inline unsigned intHash(uint64_t key)
+ {
+ key += ~(key << 32);
+ key ^= (key >> 22);
+ key += ~(key << 13);
+ key ^= (key >> 8);
+ key += (key << 3);
+ key ^= (key >> 15);
+ key += ~(key << 27);
+ key ^= (key >> 31);
+ return static_cast<unsigned>(key);
+ }
+
+ template<typename T> struct IntHash {
+ static unsigned hash(T key) { return intHash(static_cast<typename IntTypes<sizeof(T)>::UnsignedType>(key)); }
+ static bool equal(T a, T b) { return a == b; }
+ static const bool safeToCompareToEmptyOrDeleted = true;
+ };
+
+ template<typename T> struct FloatHash {
+ static unsigned hash(T key)
+ {
+ union {
+ T key;
+ typename IntTypes<sizeof(T)>::UnsignedType bits;
+ } u;
+ u.key = key;
+ return intHash(u.bits);
+ }
+ static bool equal(T a, T b) { return a == b; }
+ static const bool safeToCompareToEmptyOrDeleted = true;
+ };
+
+ // pointer identity hash function
+
+ template<typename T> struct PtrHash {
+ static unsigned hash(T key)
+ {
+#if COMPILER(MSVC)
+#pragma warning(push)
+#pragma warning(disable: 4244) // work around what seems to be a bug in MSVC's conversion warnings
+#endif
+ return IntHash<uintptr_t>::hash(reinterpret_cast<uintptr_t>(key));
+#if COMPILER(MSVC)
+#pragma warning(pop)
+#endif
+ }
+ static bool equal(T a, T b) { return a == b; }
+ static const bool safeToCompareToEmptyOrDeleted = true;
+ };
+ template<typename P> struct PtrHash<RefPtr<P> > : PtrHash<P*> {
+ using PtrHash<P*>::hash;
+ static unsigned hash(const RefPtr<P>& key) { return hash(key.get()); }
+ using PtrHash<P*>::equal;
+ static bool equal(const RefPtr<P>& a, const RefPtr<P>& b) { return a == b; }
+ static bool equal(P* a, const RefPtr<P>& b) { return a == b; }
+ static bool equal(const RefPtr<P>& a, P* b) { return a == b; }
+ };
+
+ // default hash function for each type
+
+ template<typename T> struct DefaultHash;
+
+ template<typename T, typename U> struct PairHash {
+ static unsigned hash(const std::pair<T, U>& p)
+ {
+ return intHash((static_cast<uint64_t>(DefaultHash<T>::Hash::hash(p.first)) << 32 | DefaultHash<U>::Hash::hash(p.second)));
+ }
+ static bool equal(const std::pair<T, U>& a, const std::pair<T, U>& b)
+ {
+ return DefaultHash<T>::Hash::equal(a.first, b.first) && DefaultHash<U>::Hash::equal(a.second, b.second);
+ }
+ static const bool safeToCompareToEmptyOrDeleted = DefaultHash<T>::Hash::safeToCompareToEmptyOrDeleted
+ && DefaultHash<U>::Hash::safeToCompareToEmptyOrDeleted;
+ };
+
+ // make IntHash the default hash function for many integer types
+
+ template<> struct DefaultHash<short> { typedef IntHash<unsigned> Hash; };
+ template<> struct DefaultHash<unsigned short> { typedef IntHash<unsigned> Hash; };
+ template<> struct DefaultHash<int> { typedef IntHash<unsigned> Hash; };
+ template<> struct DefaultHash<unsigned> { typedef IntHash<unsigned> Hash; };
+ template<> struct DefaultHash<long> { typedef IntHash<unsigned long> Hash; };
+ template<> struct DefaultHash<unsigned long> { typedef IntHash<unsigned long> Hash; };
+ template<> struct DefaultHash<long long> { typedef IntHash<unsigned long long> Hash; };
+ template<> struct DefaultHash<unsigned long long> { typedef IntHash<unsigned long long> Hash; };
+
+#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED)
+ template<> struct DefaultHash<wchar_t> { typedef IntHash<wchar_t> Hash; };
+#endif
+
+ template<> struct DefaultHash<float> { typedef FloatHash<float> Hash; };
+ template<> struct DefaultHash<double> { typedef FloatHash<double> Hash; };
+
+ // make PtrHash the default hash function for pointer types that don't specialize
+
+ template<typename P> struct DefaultHash<P*> { typedef PtrHash<P*> Hash; };
+ template<typename P> struct DefaultHash<RefPtr<P> > { typedef PtrHash<RefPtr<P> > Hash; };
+
+ template<typename T, typename U> struct DefaultHash<std::pair<T, U> > { typedef PairHash<T, U> Hash; };
+
+ // Golden ratio - arbitrary start value to avoid mapping all 0's to all 0's
+ static const unsigned stringHashingStartValue = 0x9e3779b9U;
+
+} // namespace WTF
+
+using WTF::DefaultHash;
+using WTF::IntHash;
+using WTF::PtrHash;
+
+#endif // WTF_HashFunctions_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashIterators.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashIterators.h
new file mode 100644
index 0000000..682c83b
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashIterators.h
@@ -0,0 +1,216 @@
+/*
+ * Copyright (C) 2007 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_HashIterators_h
+#define WTF_HashIterators_h
+
+namespace WTF {
+
+ template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableConstKeysIterator;
+ template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableConstValuesIterator;
+ template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableKeysIterator;
+ template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableValuesIterator;
+
+ template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableConstIteratorAdapter<HashTableType, std::pair<KeyType, MappedType> > {
+ private:
+ typedef std::pair<KeyType, MappedType> ValueType;
+ public:
+ typedef HashTableConstKeysIterator<HashTableType, KeyType, MappedType> Keys;
+ typedef HashTableConstValuesIterator<HashTableType, KeyType, MappedType> Values;
+
+ HashTableConstIteratorAdapter(const typename HashTableType::const_iterator& impl) : m_impl(impl) {}
+
+ const ValueType* get() const { return (const ValueType*)m_impl.get(); }
+ const ValueType& operator*() const { return *get(); }
+ const ValueType* operator->() const { return get(); }
+
+ HashTableConstIteratorAdapter& operator++() { ++m_impl; return *this; }
+ // postfix ++ intentionally omitted
+
+ Keys keys() { return Keys(*this); }
+ Values values() { return Values(*this); }
+
+ typename HashTableType::const_iterator m_impl;
+ };
+
+ template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableIteratorAdapter<HashTableType, std::pair<KeyType, MappedType> > {
+ private:
+ typedef std::pair<KeyType, MappedType> ValueType;
+ public:
+ typedef HashTableKeysIterator<HashTableType, KeyType, MappedType> Keys;
+ typedef HashTableValuesIterator<HashTableType, KeyType, MappedType> Values;
+
+ HashTableIteratorAdapter(const typename HashTableType::iterator& impl) : m_impl(impl) {}
+
+ ValueType* get() const { return (ValueType*)m_impl.get(); }
+ ValueType& operator*() const { return *get(); }
+ ValueType* operator->() const { return get(); }
+
+ HashTableIteratorAdapter& operator++() { ++m_impl; return *this; }
+ // postfix ++ intentionally omitted
+
+ operator HashTableConstIteratorAdapter<HashTableType, ValueType>() {
+ typename HashTableType::const_iterator i = m_impl;
+ return i;
+ }
+
+ Keys keys() { return Keys(*this); }
+ Values values() { return Values(*this); }
+
+ typename HashTableType::iterator m_impl;
+ };
+
+ template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableConstKeysIterator {
+ private:
+ typedef HashTableConstIteratorAdapter<HashTableType, std::pair<KeyType, MappedType> > ConstIterator;
+
+ public:
+ HashTableConstKeysIterator(const ConstIterator& impl) : m_impl(impl) {}
+
+ const KeyType* get() const { return &(m_impl.get()->first); }
+ const KeyType& operator*() const { return *get(); }
+ const KeyType* operator->() const { return get(); }
+
+ HashTableConstKeysIterator& operator++() { ++m_impl; return *this; }
+ // postfix ++ intentionally omitted
+
+ ConstIterator m_impl;
+ };
+
+ template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableConstValuesIterator {
+ private:
+ typedef HashTableConstIteratorAdapter<HashTableType, std::pair<KeyType, MappedType> > ConstIterator;
+
+ public:
+ HashTableConstValuesIterator(const ConstIterator& impl) : m_impl(impl) {}
+
+ const MappedType* get() const { return &(m_impl.get()->second); }
+ const MappedType& operator*() const { return *get(); }
+ const MappedType* operator->() const { return get(); }
+
+ HashTableConstValuesIterator& operator++() { ++m_impl; return *this; }
+ // postfix ++ intentionally omitted
+
+ ConstIterator m_impl;
+ };
+
+ template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableKeysIterator {
+ private:
+ typedef HashTableIteratorAdapter<HashTableType, std::pair<KeyType, MappedType> > Iterator;
+ typedef HashTableConstIteratorAdapter<HashTableType, std::pair<KeyType, MappedType> > ConstIterator;
+
+ public:
+ HashTableKeysIterator(const Iterator& impl) : m_impl(impl) {}
+
+ KeyType* get() const { return &(m_impl.get()->first); }
+ KeyType& operator*() const { return *get(); }
+ KeyType* operator->() const { return get(); }
+
+ HashTableKeysIterator& operator++() { ++m_impl; return *this; }
+ // postfix ++ intentionally omitted
+
+ operator HashTableConstKeysIterator<HashTableType, KeyType, MappedType>() {
+ ConstIterator i = m_impl;
+ return i;
+ }
+
+ Iterator m_impl;
+ };
+
+ template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableValuesIterator {
+ private:
+ typedef HashTableIteratorAdapter<HashTableType, std::pair<KeyType, MappedType> > Iterator;
+ typedef HashTableConstIteratorAdapter<HashTableType, std::pair<KeyType, MappedType> > ConstIterator;
+
+ public:
+ HashTableValuesIterator(const Iterator& impl) : m_impl(impl) {}
+
+ MappedType* get() const { return &(m_impl.get()->second); }
+ MappedType& operator*() const { return *get(); }
+ MappedType* operator->() const { return get(); }
+
+ HashTableValuesIterator& operator++() { ++m_impl; return *this; }
+ // postfix ++ intentionally omitted
+
+ operator HashTableConstValuesIterator<HashTableType, KeyType, MappedType>() {
+ ConstIterator i = m_impl;
+ return i;
+ }
+
+ Iterator m_impl;
+ };
+
+ template<typename T, typename U, typename V>
+ inline bool operator==(const HashTableConstKeysIterator<T, U, V>& a, const HashTableConstKeysIterator<T, U, V>& b)
+ {
+ return a.m_impl == b.m_impl;
+ }
+
+ template<typename T, typename U, typename V>
+ inline bool operator!=(const HashTableConstKeysIterator<T, U, V>& a, const HashTableConstKeysIterator<T, U, V>& b)
+ {
+ return a.m_impl != b.m_impl;
+ }
+
+ template<typename T, typename U, typename V>
+ inline bool operator==(const HashTableConstValuesIterator<T, U, V>& a, const HashTableConstValuesIterator<T, U, V>& b)
+ {
+ return a.m_impl == b.m_impl;
+ }
+
+ template<typename T, typename U, typename V>
+ inline bool operator!=(const HashTableConstValuesIterator<T, U, V>& a, const HashTableConstValuesIterator<T, U, V>& b)
+ {
+ return a.m_impl != b.m_impl;
+ }
+
+ template<typename T, typename U, typename V>
+ inline bool operator==(const HashTableKeysIterator<T, U, V>& a, const HashTableKeysIterator<T, U, V>& b)
+ {
+ return a.m_impl == b.m_impl;
+ }
+
+ template<typename T, typename U, typename V>
+ inline bool operator!=(const HashTableKeysIterator<T, U, V>& a, const HashTableKeysIterator<T, U, V>& b)
+ {
+ return a.m_impl != b.m_impl;
+ }
+
+ template<typename T, typename U, typename V>
+ inline bool operator==(const HashTableValuesIterator<T, U, V>& a, const HashTableValuesIterator<T, U, V>& b)
+ {
+ return a.m_impl == b.m_impl;
+ }
+
+ template<typename T, typename U, typename V>
+ inline bool operator!=(const HashTableValuesIterator<T, U, V>& a, const HashTableValuesIterator<T, U, V>& b)
+ {
+ return a.m_impl != b.m_impl;
+ }
+
+
+} // namespace WTF
+
+#endif // WTF_HashIterators_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashMap.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashMap.h
new file mode 100644
index 0000000..8ff9170
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashMap.h
@@ -0,0 +1,337 @@
+/*
+ * Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef WTF_HashMap_h
+#define WTF_HashMap_h
+
+#include "HashTable.h"
+
+namespace WTF {
+
+ template<typename PairType> struct PairFirstExtractor;
+
+ template<typename KeyArg, typename MappedArg, typename HashArg = typename DefaultHash<KeyArg>::Hash,
+ typename KeyTraitsArg = HashTraits<KeyArg>, typename MappedTraitsArg = HashTraits<MappedArg> >
+ class HashMap : public FastAllocBase {
+ private:
+ typedef KeyTraitsArg KeyTraits;
+ typedef MappedTraitsArg MappedTraits;
+ typedef PairHashTraits<KeyTraits, MappedTraits> ValueTraits;
+
+ public:
+ typedef typename KeyTraits::TraitType KeyType;
+ typedef typename MappedTraits::TraitType MappedType;
+ typedef typename ValueTraits::TraitType ValueType;
+
+ private:
+ typedef HashArg HashFunctions;
+
+ typedef HashTable<KeyType, ValueType, PairFirstExtractor<ValueType>,
+ HashFunctions, ValueTraits, KeyTraits> HashTableType;
+
+ public:
+ typedef HashTableIteratorAdapter<HashTableType, ValueType> iterator;
+ typedef HashTableConstIteratorAdapter<HashTableType, ValueType> const_iterator;
+
+ void swap(HashMap&);
+
+ int size() const;
+ int capacity() const;
+ bool isEmpty() const;
+
+ // iterators iterate over pairs of keys and values
+ iterator begin();
+ iterator end();
+ const_iterator begin() const;
+ const_iterator end() const;
+
+ iterator find(const KeyType&);
+ const_iterator find(const KeyType&) const;
+ bool contains(const KeyType&) const;
+ MappedType get(const KeyType&) const;
+
+ // replaces value but not key if key is already present
+ // return value is a pair of the iterator to the key location,
+ // and a boolean that's true if a new value was actually added
+ pair<iterator, bool> set(const KeyType&, const MappedType&);
+
+ // does nothing if key is already present
+ // return value is a pair of the iterator to the key location,
+ // and a boolean that's true if a new value was actually added
+ pair<iterator, bool> add(const KeyType&, const MappedType&);
+
+ void remove(const KeyType&);
+ void remove(iterator);
+ void clear();
+
+ MappedType take(const KeyType&); // efficient combination of get with remove
+
+ private:
+ pair<iterator, bool> inlineAdd(const KeyType&, const MappedType&);
+
+ HashTableType m_impl;
+ };
+
+ template<typename PairType> struct PairFirstExtractor {
+ static const typename PairType::first_type& extract(const PairType& p) { return p.first; }
+ };
+
+ template<typename ValueType, typename ValueTraits, typename HashFunctions>
+ struct HashMapTranslator {
+ typedef typename ValueType::first_type KeyType;
+ typedef typename ValueType::second_type MappedType;
+
+ static unsigned hash(const KeyType& key) { return HashFunctions::hash(key); }
+ static bool equal(const KeyType& a, const KeyType& b) { return HashFunctions::equal(a, b); }
+ static void translate(ValueType& location, const KeyType& key, const MappedType& mapped)
+ {
+ location.first = key;
+ location.second = mapped;
+ }
+ };
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline void HashMap<T, U, V, W, X>::swap(HashMap& other)
+ {
+ m_impl.swap(other.m_impl);
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline int HashMap<T, U, V, W, X>::size() const
+ {
+ return m_impl.size();
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline int HashMap<T, U, V, W, X>::capacity() const
+ {
+ return m_impl.capacity();
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline bool HashMap<T, U, V, W, X>::isEmpty() const
+ {
+ return m_impl.isEmpty();
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline typename HashMap<T, U, V, W, X>::iterator HashMap<T, U, V, W, X>::begin()
+ {
+ return m_impl.begin();
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline typename HashMap<T, U, V, W, X>::iterator HashMap<T, U, V, W, X>::end()
+ {
+ return m_impl.end();
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline typename HashMap<T, U, V, W, X>::const_iterator HashMap<T, U, V, W, X>::begin() const
+ {
+ return m_impl.begin();
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline typename HashMap<T, U, V, W, X>::const_iterator HashMap<T, U, V, W, X>::end() const
+ {
+ return m_impl.end();
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline typename HashMap<T, U, V, W, X>::iterator HashMap<T, U, V, W, X>::find(const KeyType& key)
+ {
+ return m_impl.find(key);
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline typename HashMap<T, U, V, W, X>::const_iterator HashMap<T, U, V, W, X>::find(const KeyType& key) const
+ {
+ return m_impl.find(key);
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline bool HashMap<T, U, V, W, X>::contains(const KeyType& key) const
+ {
+ return m_impl.contains(key);
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline pair<typename HashMap<T, U, V, W, X>::iterator, bool>
+ HashMap<T, U, V, W, X>::inlineAdd(const KeyType& key, const MappedType& mapped)
+ {
+ typedef HashMapTranslator<ValueType, ValueTraits, HashFunctions> TranslatorType;
+ pair<typename HashTableType::iterator, bool> p = m_impl.template add<KeyType, MappedType, TranslatorType>(key, mapped);
+ typename HashMap<T, U, V, W, X>::iterator temp = p.first;
+ return make_pair<typename HashMap<T, U, V, W, X>::iterator, bool>(temp, p.second);
+// return m_impl.template add<KeyType, MappedType, TranslatorType>(key, mapped);
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ pair<typename HashMap<T, U, V, W, X>::iterator, bool>
+ HashMap<T, U, V, W, X>::set(const KeyType& key, const MappedType& mapped)
+ {
+ pair<iterator, bool> result = inlineAdd(key, mapped);
+ if (!result.second) {
+ // add call above didn't change anything, so set the mapped value
+ result.first->second = mapped;
+ }
+ return result;
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ pair<typename HashMap<T, U, V, W, X>::iterator, bool>
+ HashMap<T, U, V, W, X>::add(const KeyType& key, const MappedType& mapped)
+ {
+ return inlineAdd(key, mapped);
+ }
+
+ template<typename T, typename U, typename V, typename W, typename MappedTraits>
+ typename HashMap<T, U, V, W, MappedTraits>::MappedType
+ HashMap<T, U, V, W, MappedTraits>::get(const KeyType& key) const
+ {
+ ValueType* entry = const_cast<HashTableType&>(m_impl).lookup(key);
+ if (!entry)
+ return MappedTraits::emptyValue();
+ return entry->second;
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline void HashMap<T, U, V, W, X>::remove(iterator it)
+ {
+ if (it.m_impl == m_impl.end())
+ return;
+ m_impl.checkTableConsistency();
+ m_impl.removeWithoutEntryConsistencyCheck(it.m_impl);
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline void HashMap<T, U, V, W, X>::remove(const KeyType& key)
+ {
+ remove(find(key));
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline void HashMap<T, U, V, W, X>::clear()
+ {
+ m_impl.clear();
+ }
+
+ template<typename T, typename U, typename V, typename W, typename MappedTraits>
+ typename HashMap<T, U, V, W, MappedTraits>::MappedType
+ HashMap<T, U, V, W, MappedTraits>::take(const KeyType& key)
+ {
+ // This can probably be made more efficient to avoid ref/deref churn.
+ iterator it = find(key);
+ if (it == end())
+ return MappedTraits::emptyValue();
+ typename HashMap<T, U, V, W, MappedTraits>::MappedType result = it->second;
+ remove(it);
+ return result;
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ bool operator==(const HashMap<T, U, V, W, X>& a, const HashMap<T, U, V, W, X>& b)
+ {
+ if (a.size() != b.size())
+ return false;
+
+ typedef typename HashMap<T, U, V, W, X>::const_iterator const_iterator;
+
+ const_iterator end = a.end();
+ const_iterator notFound = b.end();
+ for (const_iterator it = a.begin(); it != end; ++it) {
+ const_iterator bPos = b.find(it->first);
+ if (bPos == notFound || it->second != bPos->second)
+ return false;
+ }
+
+ return true;
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline bool operator!=(const HashMap<T, U, V, W, X>& a, const HashMap<T, U, V, W, X>& b)
+ {
+ return !(a == b);
+ }
+
+ template<typename MappedType, typename HashTableType>
+ void deleteAllPairSeconds(HashTableType& collection)
+ {
+ typedef typename HashTableType::const_iterator iterator;
+ iterator end = collection.end();
+ for (iterator it = collection.begin(); it != end; ++it)
+ delete it->second;
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline void deleteAllValues(const HashMap<T, U, V, W, X>& collection)
+ {
+ deleteAllPairSeconds<typename HashMap<T, U, V, W, X>::MappedType>(collection);
+ }
+
+ template<typename KeyType, typename HashTableType>
+ void deleteAllPairFirsts(HashTableType& collection)
+ {
+ typedef typename HashTableType::const_iterator iterator;
+ iterator end = collection.end();
+ for (iterator it = collection.begin(); it != end; ++it)
+ delete it->first;
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline void deleteAllKeys(const HashMap<T, U, V, W, X>& collection)
+ {
+ deleteAllPairFirsts<typename HashMap<T, U, V, W, X>::KeyType>(collection);
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X, typename Y>
+ inline void copyKeysToVector(const HashMap<T, U, V, W, X>& collection, Y& vector)
+ {
+ typedef typename HashMap<T, U, V, W, X>::const_iterator::Keys iterator;
+
+ vector.resize(collection.size());
+
+ iterator it = collection.begin().keys();
+ iterator end = collection.end().keys();
+ for (unsigned i = 0; it != end; ++it, ++i)
+ vector[i] = *it;
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X, typename Y>
+ inline void copyValuesToVector(const HashMap<T, U, V, W, X>& collection, Y& vector)
+ {
+ typedef typename HashMap<T, U, V, W, X>::const_iterator::Values iterator;
+
+ vector.resize(collection.size());
+
+ iterator it = collection.begin().values();
+ iterator end = collection.end().values();
+ for (unsigned i = 0; it != end; ++it, ++i)
+ vector[i] = *it;
+ }
+
+} // namespace WTF
+
+using WTF::HashMap;
+
+#include "RefPtrHashMap.h"
+
+#endif /* WTF_HashMap_h */
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashSet.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashSet.h
new file mode 100644
index 0000000..ec809e5
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashSet.h
@@ -0,0 +1,278 @@
+/*
+ * Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef WTF_HashSet_h
+#define WTF_HashSet_h
+
+#include "FastAllocBase.h"
+#include "HashTable.h"
+
+namespace WTF {
+
+ template<typename Value, typename HashFunctions, typename Traits> class HashSet;
+ template<typename Value, typename HashFunctions, typename Traits>
+ void deleteAllValues(const HashSet<Value, HashFunctions, Traits>&);
+
+ template<typename T> struct IdentityExtractor;
+
+ template<typename ValueArg, typename HashArg = typename DefaultHash<ValueArg>::Hash,
+ typename TraitsArg = HashTraits<ValueArg> > class HashSet : public FastAllocBase {
+ private:
+ typedef HashArg HashFunctions;
+ typedef TraitsArg ValueTraits;
+
+ public:
+ typedef typename ValueTraits::TraitType ValueType;
+
+ private:
+ typedef HashTable<ValueType, ValueType, IdentityExtractor<ValueType>,
+ HashFunctions, ValueTraits, ValueTraits> HashTableType;
+
+ public:
+ typedef HashTableIteratorAdapter<HashTableType, ValueType> iterator;
+ typedef HashTableConstIteratorAdapter<HashTableType, ValueType> const_iterator;
+
+ void swap(HashSet&);
+
+ int size() const;
+ int capacity() const;
+ bool isEmpty() const;
+
+ iterator begin();
+ iterator end();
+ const_iterator begin() const;
+ const_iterator end() const;
+
+ iterator find(const ValueType&);
+ const_iterator find(const ValueType&) const;
+ bool contains(const ValueType&) const;
+
+ // An alternate version of find() that finds the object by hashing and comparing
+ // with some other type, to avoid the cost of type conversion. HashTranslator
+ // must have the following function members:
+ // static unsigned hash(const T&);
+ // static bool equal(const ValueType&, const T&);
+ template<typename T, typename HashTranslator> iterator find(const T&);
+ template<typename T, typename HashTranslator> const_iterator find(const T&) const;
+ template<typename T, typename HashTranslator> bool contains(const T&) const;
+
+ // The return value is a pair of an interator to the new value's location,
+ // and a bool that is true if an new entry was added.
+ pair<iterator, bool> add(const ValueType&);
+
+ // An alternate version of add() that finds the object by hashing and comparing
+ // with some other type, to avoid the cost of type conversion if the object is already
+ // in the table. HashTranslator must have the following methods:
+ // static unsigned hash(const T&);
+ // static bool equal(const ValueType&, const T&);
+ // static translate(ValueType&, const T&, unsigned hashCode);
+ template<typename T, typename HashTranslator> pair<iterator, bool> add(const T&);
+
+ void remove(const ValueType&);
+ void remove(iterator);
+ void clear();
+
+ private:
+ friend void deleteAllValues<>(const HashSet&);
+
+ HashTableType m_impl;
+ };
+
+ template<typename T> struct IdentityExtractor {
+ static const T& extract(const T& t) { return t; }
+ };
+
+ template<typename ValueType, typename ValueTraits, typename T, typename Translator>
+ struct HashSetTranslatorAdapter {
+ static unsigned hash(const T& key) { return Translator::hash(key); }
+ static bool equal(const ValueType& a, const T& b) { return Translator::equal(a, b); }
+ static void translate(ValueType& location, const T& key, const T&, unsigned hashCode)
+ {
+ Translator::translate(location, key, hashCode);
+ }
+ };
+
+ template<typename T, typename U, typename V>
+ inline void HashSet<T, U, V>::swap(HashSet& other)
+ {
+ m_impl.swap(other.m_impl);
+ }
+
+ template<typename T, typename U, typename V>
+ inline int HashSet<T, U, V>::size() const
+ {
+ return m_impl.size();
+ }
+
+ template<typename T, typename U, typename V>
+ inline int HashSet<T, U, V>::capacity() const
+ {
+ return m_impl.capacity();
+ }
+
+ template<typename T, typename U, typename V>
+ inline bool HashSet<T, U, V>::isEmpty() const
+ {
+ return m_impl.isEmpty();
+ }
+
+ template<typename T, typename U, typename V>
+ inline typename HashSet<T, U, V>::iterator HashSet<T, U, V>::begin()
+ {
+ return m_impl.begin();
+ }
+
+ template<typename T, typename U, typename V>
+ inline typename HashSet<T, U, V>::iterator HashSet<T, U, V>::end()
+ {
+ return m_impl.end();
+ }
+
+ template<typename T, typename U, typename V>
+ inline typename HashSet<T, U, V>::const_iterator HashSet<T, U, V>::begin() const
+ {
+ return m_impl.begin();
+ }
+
+ template<typename T, typename U, typename V>
+ inline typename HashSet<T, U, V>::const_iterator HashSet<T, U, V>::end() const
+ {
+ return m_impl.end();
+ }
+
+ template<typename T, typename U, typename V>
+ inline typename HashSet<T, U, V>::iterator HashSet<T, U, V>::find(const ValueType& value)
+ {
+ return m_impl.find(value);
+ }
+
+ template<typename T, typename U, typename V>
+ inline typename HashSet<T, U, V>::const_iterator HashSet<T, U, V>::find(const ValueType& value) const
+ {
+ return m_impl.find(value);
+ }
+
+ template<typename T, typename U, typename V>
+ inline bool HashSet<T, U, V>::contains(const ValueType& value) const
+ {
+ return m_impl.contains(value);
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ template<typename T, typename HashTranslator>
+ typename HashSet<Value, HashFunctions, Traits>::iterator
+ inline HashSet<Value, HashFunctions, Traits>::find(const T& value)
+ {
+ typedef HashSetTranslatorAdapter<ValueType, ValueTraits, T, HashTranslator> Adapter;
+ return m_impl.template find<T, Adapter>(value);
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ template<typename T, typename HashTranslator>
+ typename HashSet<Value, HashFunctions, Traits>::const_iterator
+ inline HashSet<Value, HashFunctions, Traits>::find(const T& value) const
+ {
+ typedef HashSetTranslatorAdapter<ValueType, ValueTraits, T, HashTranslator> Adapter;
+ return m_impl.template find<T, Adapter>(value);
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ template<typename T, typename HashTranslator>
+ inline bool HashSet<Value, HashFunctions, Traits>::contains(const T& value) const
+ {
+ typedef HashSetTranslatorAdapter<ValueType, ValueTraits, T, HashTranslator> Adapter;
+ return m_impl.template contains<T, Adapter>(value);
+ }
+
+ template<typename T, typename U, typename V>
+ pair<typename HashSet<T, U, V>::iterator, bool> HashSet<T, U, V>::add(const ValueType& value)
+ {
+ pair<typename HashTable<T, T, IdentityExtractor<T>, U, V, V>::iterator, bool> p = m_impl.add(value);
+ typename HashSet<T, U, V>::iterator temp = p.first;
+ pair<typename HashSet<T, U, V>::iterator, bool> p2 = make_pair<typename HashSet<T, U, V>::iterator, bool>(temp, p.second);
+ // p2.first = p.first;
+ // p2.second = p.second;
+ return p2;
+ }
+
+ template<typename Value, typename HashFunctions, typename Traits>
+ template<typename T, typename HashTranslator>
+ pair<typename HashSet<Value, HashFunctions, Traits>::iterator, bool>
+ HashSet<Value, HashFunctions, Traits>::add(const T& value)
+ {
+ typedef HashSetTranslatorAdapter<ValueType, ValueTraits, T, HashTranslator> Adapter;
+ pair<typename HashTableType::iterator, bool> p = m_impl.template addPassingHashCode<T, T, Adapter>(value, value);
+ return make_pair<iterator, bool>(p.first, p.second);
+ }
+
+ template<typename T, typename U, typename V>
+ inline void HashSet<T, U, V>::remove(iterator it)
+ {
+ if (it.m_impl == m_impl.end())
+ return;
+ m_impl.checkTableConsistency();
+ m_impl.removeWithoutEntryConsistencyCheck(it.m_impl);
+ }
+
+ template<typename T, typename U, typename V>
+ inline void HashSet<T, U, V>::remove(const ValueType& value)
+ {
+ remove(find(value));
+ }
+
+ template<typename T, typename U, typename V>
+ inline void HashSet<T, U, V>::clear()
+ {
+ m_impl.clear();
+ }
+
+ template<typename ValueType, typename HashTableType>
+ void deleteAllValues(HashTableType& collection)
+ {
+ typedef typename HashTableType::const_iterator iterator;
+ iterator end = collection.end();
+ for (iterator it = collection.begin(); it != end; ++it)
+ delete *it;
+ }
+
+ template<typename T, typename U, typename V>
+ inline void deleteAllValues(const HashSet<T, U, V>& collection)
+ {
+ deleteAllValues<typename HashSet<T, U, V>::ValueType>(collection.m_impl);
+ }
+
+ template<typename T, typename U, typename V, typename W>
+ inline void copyToVector(const HashSet<T, U, V>& collection, W& vector)
+ {
+ typedef typename HashSet<T, U, V>::const_iterator iterator;
+
+ vector.resize(collection.size());
+
+ iterator it = collection.begin();
+ iterator end = collection.end();
+ for (unsigned i = 0; it != end; ++it, ++i)
+ vector[i] = *it;
+ }
+
+} // namespace WTF
+
+using WTF::HashSet;
+
+#endif /* WTF_HashSet_h */
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashTable.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashTable.cpp
new file mode 100644
index 0000000..71d3f86
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashTable.cpp
@@ -0,0 +1,69 @@
+/*
+ Copyright (C) 2005 Apple Inc. All rights reserved.
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public License
+ along with this library; see the file COPYING.LIB. If not, write to
+ the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA.
+*/
+
+#include "config.h"
+#include "HashTable.h"
+
+namespace WTF {
+
+#if DUMP_HASHTABLE_STATS
+
+int HashTableStats::numAccesses;
+int HashTableStats::numCollisions;
+int HashTableStats::collisionGraph[4096];
+int HashTableStats::maxCollisions;
+int HashTableStats::numRehashes;
+int HashTableStats::numRemoves;
+int HashTableStats::numReinserts;
+
+static HashTableStats logger;
+
+static Mutex& hashTableStatsMutex()
+{
+ AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
+ return mutex;
+}
+
+HashTableStats::~HashTableStats()
+{
+ // Don't lock hashTableStatsMutex here because it can cause deadlocks at shutdown
+ // if any thread was killed while holding the mutex.
+ printf("\nWTF::HashTable statistics\n\n");
+ printf("%d accesses\n", numAccesses);
+ printf("%d total collisions, average %.2f probes per access\n", numCollisions, 1.0 * (numAccesses + numCollisions) / numAccesses);
+ printf("longest collision chain: %d\n", maxCollisions);
+ for (int i = 1; i <= maxCollisions; i++) {
+ printf(" %d lookups with exactly %d collisions (%.2f%% , %.2f%% with this many or more)\n", collisionGraph[i], i, 100.0 * (collisionGraph[i] - collisionGraph[i+1]) / numAccesses, 100.0 * collisionGraph[i] / numAccesses);
+ }
+ printf("%d rehashes\n", numRehashes);
+ printf("%d reinserts\n", numReinserts);
+}
+
+void HashTableStats::recordCollisionAtCount(int count)
+{
+ MutexLocker lock(hashTableStatsMutex());
+ if (count > maxCollisions)
+ maxCollisions = count;
+ numCollisions++;
+ collisionGraph[count]++;
+}
+
+#endif
+
+} // namespace WTF
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashTable.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashTable.h
new file mode 100644
index 0000000..3b283f8
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashTable.h
@@ -0,0 +1,1158 @@
+/*
+ * Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 David Levin <levin@chromium.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef WTF_HashTable_h
+#define WTF_HashTable_h
+
+#include "FastMalloc.h"
+#include "HashTraits.h"
+#include <wtf/Assertions.h>
+#include <wtf/Threading.h>
+
+namespace WTF {
+
+#define DUMP_HASHTABLE_STATS 0
+#define CHECK_HASHTABLE_CONSISTENCY 0
+
+#ifdef NDEBUG
+#define CHECK_HASHTABLE_ITERATORS 0
+#define CHECK_HASHTABLE_USE_AFTER_DESTRUCTION 0
+#else
+#define CHECK_HASHTABLE_ITERATORS 1
+#define CHECK_HASHTABLE_USE_AFTER_DESTRUCTION 1
+#endif
+
+#if DUMP_HASHTABLE_STATS
+
+ struct HashTableStats {
+ ~HashTableStats();
+ // All of the variables are accessed in ~HashTableStats when the static struct is destroyed.
+
+ // The following variables are all atomically incremented when modified.
+ static int numAccesses;
+ static int numRehashes;
+ static int numRemoves;
+ static int numReinserts;
+
+ // The following variables are only modified in the recordCollisionAtCount method within a mutex.
+ static int maxCollisions;
+ static int numCollisions;
+ static int collisionGraph[4096];
+
+ static void recordCollisionAtCount(int count);
+ };
+
+#endif
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ class HashTable;
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ class HashTableIterator;
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ class HashTableConstIterator;
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ void addIterator(const HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>*,
+ HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>*);
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ void removeIterator(HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>*);
+
+#if !CHECK_HASHTABLE_ITERATORS
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ inline void addIterator(const HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>*,
+ HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>*) { }
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ inline void removeIterator(HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>*) { }
+
+#endif
+
+ typedef enum { HashItemKnownGood } HashItemKnownGoodTag;
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ class HashTableConstIterator {
+ private:
+ typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> HashTableType;
+ typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> iterator;
+ typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> const_iterator;
+ typedef Value ValueType;
+ typedef const ValueType& ReferenceType;
+ typedef const ValueType* PointerType;
+
+ friend class HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>;
+ friend class HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>;
+
+ void skipEmptyBuckets()
+ {
+ while (m_position != m_endPosition && HashTableType::isEmptyOrDeletedBucket(*m_position))
+ ++m_position;
+ }
+
+ HashTableConstIterator(const HashTableType* table, PointerType position, PointerType endPosition)
+ : m_position(position), m_endPosition(endPosition)
+ {
+ addIterator(table, this);
+ skipEmptyBuckets();
+ }
+
+ HashTableConstIterator(const HashTableType* table, PointerType position, PointerType endPosition, HashItemKnownGoodTag)
+ : m_position(position), m_endPosition(endPosition)
+ {
+ addIterator(table, this);
+ }
+
+ public:
+ HashTableConstIterator()
+ {
+ addIterator(0, this);
+ }
+
+ // default copy, assignment and destructor are OK if CHECK_HASHTABLE_ITERATORS is 0
+
+#if CHECK_HASHTABLE_ITERATORS
+ ~HashTableConstIterator()
+ {
+ removeIterator(this);
+ }
+
+ HashTableConstIterator(const const_iterator& other)
+ : m_position(other.m_position), m_endPosition(other.m_endPosition)
+ {
+ addIterator(other.m_table, this);
+ }
+
+ const_iterator& operator=(const const_iterator& other)
+ {
+ m_position = other.m_position;
+ m_endPosition = other.m_endPosition;
+
+ removeIterator(this);
+ addIterator(other.m_table, this);
+
+ return *this;
+ }
+#endif
+
+ PointerType get() const
+ {
+ checkValidity();
+ return m_position;
+ }
+ ReferenceType operator*() const { return *get(); }
+ PointerType operator->() const { return get(); }
+
+ const_iterator& operator++()
+ {
+ checkValidity();
+ ASSERT(m_position != m_endPosition);
+ ++m_position;
+ skipEmptyBuckets();
+ return *this;
+ }
+
+ // postfix ++ intentionally omitted
+
+ // Comparison.
+ bool operator==(const const_iterator& other) const
+ {
+ checkValidity(other);
+ return m_position == other.m_position;
+ }
+ bool operator!=(const const_iterator& other) const
+ {
+ checkValidity(other);
+ return m_position != other.m_position;
+ }
+
+ private:
+ void checkValidity() const
+ {
+#if CHECK_HASHTABLE_ITERATORS
+ ASSERT(m_table);
+#endif
+ }
+
+
+#if CHECK_HASHTABLE_ITERATORS
+ void checkValidity(const const_iterator& other) const
+ {
+ ASSERT(m_table);
+ ASSERT(other.m_table);
+ ASSERT(m_table == other.m_table);
+ }
+#else
+ void checkValidity(const const_iterator&) const { }
+#endif
+
+ PointerType m_position;
+ PointerType m_endPosition;
+
+#if CHECK_HASHTABLE_ITERATORS
+ public:
+ // Any modifications of the m_next or m_previous of an iterator that is in a linked list of a HashTable::m_iterator,
+ // should be guarded with m_table->m_mutex.
+ mutable const HashTableType* m_table;
+ mutable const_iterator* m_next;
+ mutable const_iterator* m_previous;
+#endif
+ };
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ class HashTableIterator {
+ private:
+ typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> HashTableType;
+ typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> iterator;
+ typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> const_iterator;
+ typedef Value ValueType;
+ typedef ValueType& ReferenceType;
+ typedef ValueType* PointerType;
+
+ friend class HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>;
+
+ HashTableIterator(HashTableType* table, PointerType pos, PointerType end) : m_iterator(table, pos, end) { }
+ HashTableIterator(HashTableType* table, PointerType pos, PointerType end, HashItemKnownGoodTag tag) : m_iterator(table, pos, end, tag) { }
+
+ public:
+ HashTableIterator() { }
+
+ // default copy, assignment and destructor are OK
+
+ PointerType get() const { return const_cast<PointerType>(m_iterator.get()); }
+ ReferenceType operator*() const { return *get(); }
+ PointerType operator->() const { return get(); }
+
+ iterator& operator++() { ++m_iterator; return *this; }
+
+ // postfix ++ intentionally omitted
+
+ // Comparison.
+ bool operator==(const iterator& other) const { return m_iterator == other.m_iterator; }
+ bool operator!=(const iterator& other) const { return m_iterator != other.m_iterator; }
+
+ operator const_iterator() const { return m_iterator; }
+
+ private:
+ const_iterator m_iterator;
+ };
+
+ using std::swap;
+
+#if !COMPILER(MSVC)
+ // Visual C++ has a swap for pairs defined.
+
+ // swap pairs by component, in case of pair members that specialize swap
+ template<typename T, typename U> inline void swap(pair<T, U>& a, pair<T, U>& b)
+ {
+ swap(a.first, b.first);
+ swap(a.second, b.second);
+ }
+#endif
+
+ template<typename T, bool useSwap> struct Mover;
+ template<typename T> struct Mover<T, true> { static void move(T& from, T& to) { swap(from, to); } };
+ template<typename T> struct Mover<T, false> { static void move(T& from, T& to) { to = from; } };
+
+ template<typename Key, typename Value, typename HashFunctions> class IdentityHashTranslator {
+ public:
+ static unsigned hash(const Key& key) { return HashFunctions::hash(key); }
+ static bool equal(const Key& a, const Key& b) { return HashFunctions::equal(a, b); }
+ static void translate(Value& location, const Key&, const Value& value) { location = value; }
+ };
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ class HashTable {
+ public:
+ typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> iterator;
+ typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> const_iterator;
+ typedef Traits ValueTraits;
+ typedef Key KeyType;
+ typedef Value ValueType;
+ typedef IdentityHashTranslator<Key, Value, HashFunctions> IdentityTranslatorType;
+
+ HashTable();
+ ~HashTable()
+ {
+ invalidateIterators();
+ deallocateTable(m_table, m_tableSize);
+#if CHECK_HASHTABLE_USE_AFTER_DESTRUCTION
+ m_table = (ValueType*)(uintptr_t)0xbbadbeef;
+#endif
+ }
+
+ HashTable(const HashTable&);
+ void swap(HashTable&);
+ HashTable& operator=(const HashTable&);
+
+ iterator begin() { return makeIterator(m_table); }
+ iterator end() { return makeKnownGoodIterator(m_table + m_tableSize); }
+ const_iterator begin() const { return makeConstIterator(m_table); }
+ const_iterator end() const { return makeKnownGoodConstIterator(m_table + m_tableSize); }
+
+ int size() const { return m_keyCount; }
+ int capacity() const { return m_tableSize; }
+ bool isEmpty() const { return !m_keyCount; }
+
+ pair<iterator, bool> add(const ValueType& value) { return add<KeyType, ValueType, IdentityTranslatorType>(Extractor::extract(value), value); }
+
+ // A special version of add() that finds the object by hashing and comparing
+ // with some other type, to avoid the cost of type conversion if the object is already
+ // in the table.
+ template<typename T, typename Extra, typename HashTranslator> pair<iterator, bool> add(const T& key, const Extra&);
+ template<typename T, typename Extra, typename HashTranslator> pair<iterator, bool> addPassingHashCode(const T& key, const Extra&);
+
+ iterator find(const KeyType& key) { return find<KeyType, IdentityTranslatorType>(key); }
+ const_iterator find(const KeyType& key) const { return find<KeyType, IdentityTranslatorType>(key); }
+ bool contains(const KeyType& key) const { return contains<KeyType, IdentityTranslatorType>(key); }
+
+ template <typename T, typename HashTranslator> iterator find(const T&);
+ template <typename T, typename HashTranslator> const_iterator find(const T&) const;
+ template <typename T, typename HashTranslator> bool contains(const T&) const;
+
+ void remove(const KeyType&);
+ void remove(iterator);
+ void removeWithoutEntryConsistencyCheck(iterator);
+ void clear();
+
+ static bool isEmptyBucket(const ValueType& value) { return Extractor::extract(value) == KeyTraits::emptyValue(); }
+ static bool isDeletedBucket(const ValueType& value) { return KeyTraits::isDeletedValue(Extractor::extract(value)); }
+ static bool isEmptyOrDeletedBucket(const ValueType& value) { return isEmptyBucket(value) || isDeletedBucket(value); }
+
+ ValueType* lookup(const Key& key) { return lookup<Key, IdentityTranslatorType>(key); }
+ template<typename T, typename HashTranslator> ValueType* lookup(const T&);
+
+#if CHECK_HASHTABLE_CONSISTENCY
+ void checkTableConsistency() const;
+#else
+ static void checkTableConsistency() { }
+#endif
+
+ private:
+ static ValueType* allocateTable(int size);
+ static void deallocateTable(ValueType* table, int size);
+
+ typedef pair<ValueType*, bool> LookupType;
+ typedef pair<LookupType, unsigned> FullLookupType;
+
+ LookupType lookupForWriting(const Key& key) { return lookupForWriting<Key, IdentityTranslatorType>(key); };
+ template<typename T, typename HashTranslator> FullLookupType fullLookupForWriting(const T&);
+ template<typename T, typename HashTranslator> LookupType lookupForWriting(const T&);
+
+ template<typename T, typename HashTranslator> void checkKey(const T&);
+
+ void removeAndInvalidateWithoutEntryConsistencyCheck(ValueType*);
+ void removeAndInvalidate(ValueType*);
+ void remove(ValueType*);
+
+ bool shouldExpand() const { return (m_keyCount + m_deletedCount) * m_maxLoad >= m_tableSize; }
+ bool mustRehashInPlace() const { return m_keyCount * m_minLoad < m_tableSize * 2; }
+ bool shouldShrink() const { return m_keyCount * m_minLoad < m_tableSize && m_tableSize > m_minTableSize; }
+ void expand();
+ void shrink() { rehash(m_tableSize / 2); }
+
+ void rehash(int newTableSize);
+ void reinsert(ValueType&);
+
+ static void initializeBucket(ValueType& bucket) { new (&bucket) ValueType(Traits::emptyValue()); }
+ static void deleteBucket(ValueType& bucket) { bucket.~ValueType(); Traits::constructDeletedValue(bucket); }
+
+ FullLookupType makeLookupResult(ValueType* position, bool found, unsigned hash)
+ { return FullLookupType(LookupType(position, found), hash); }
+
+ iterator makeIterator(ValueType* pos) { return iterator(this, pos, m_table + m_tableSize); }
+ const_iterator makeConstIterator(ValueType* pos) const { return const_iterator(this, pos, m_table + m_tableSize); }
+ iterator makeKnownGoodIterator(ValueType* pos) { return iterator(this, pos, m_table + m_tableSize, HashItemKnownGood); }
+ const_iterator makeKnownGoodConstIterator(ValueType* pos) const { return const_iterator(this, pos, m_table + m_tableSize, HashItemKnownGood); }
+
+#if CHECK_HASHTABLE_CONSISTENCY
+ void checkTableConsistencyExceptSize() const;
+#else
+ static void checkTableConsistencyExceptSize() { }
+#endif
+
+#if CHECK_HASHTABLE_ITERATORS
+ void invalidateIterators();
+#else
+ static void invalidateIterators() { }
+#endif
+
+ static const int m_minTableSize = 64;
+ static const int m_maxLoad = 2;
+ static const int m_minLoad = 6;
+
+ ValueType* m_table;
+ int m_tableSize;
+ int m_tableSizeMask;
+ int m_keyCount;
+ int m_deletedCount;
+
+#if CHECK_HASHTABLE_ITERATORS
+ public:
+ // All access to m_iterators should be guarded with m_mutex.
+ mutable const_iterator* m_iterators;
+ mutable Mutex m_mutex;
+#endif
+ };
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ inline HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::HashTable()
+ : m_table(0)
+ , m_tableSize(0)
+ , m_tableSizeMask(0)
+ , m_keyCount(0)
+ , m_deletedCount(0)
+#if CHECK_HASHTABLE_ITERATORS
+ , m_iterators(0)
+#endif
+ {
+ }
+
+ static inline unsigned doubleHash(unsigned key)
+ {
+ key = ~key + (key >> 23);
+ key ^= (key << 12);
+ key ^= (key >> 7);
+ key ^= (key << 2);
+ key ^= (key >> 20);
+ return key;
+ }
+
+#if ASSERT_DISABLED
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ template<typename T, typename HashTranslator>
+ inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::checkKey(const T&)
+ {
+ }
+
+#else
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ template<typename T, typename HashTranslator>
+ void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::checkKey(const T& key)
+ {
+ if (!HashFunctions::safeToCompareToEmptyOrDeleted)
+ return;
+ ASSERT(!HashTranslator::equal(KeyTraits::emptyValue(), key));
+ ValueType deletedValue = Traits::emptyValue();
+ deletedValue.~ValueType();
+ Traits::constructDeletedValue(deletedValue);
+ ASSERT(!HashTranslator::equal(Extractor::extract(deletedValue), key));
+ new (&deletedValue) ValueType(Traits::emptyValue());
+ }
+
+#endif
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ template<typename T, typename HashTranslator>
+ inline Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::lookup(const T& key)
+ {
+ checkKey<T, HashTranslator>(key);
+
+ int k = 0;
+ int sizeMask = m_tableSizeMask;
+ ValueType* table = m_table;
+ unsigned h = HashTranslator::hash(key);
+ int i = h & sizeMask;
+
+ if (!table)
+ return 0;
+
+#if DUMP_HASHTABLE_STATS
+ atomicIncrement(&HashTableStats::numAccesses);
+ int probeCount = 0;
+#endif
+
+ while (1) {
+ ValueType* entry = table + i;
+
+ // we count on the compiler to optimize out this branch
+ if (HashFunctions::safeToCompareToEmptyOrDeleted) {
+ if (HashTranslator::equal(Extractor::extract(*entry), key))
+ return entry;
+
+ if (isEmptyBucket(*entry))
+ return 0;
+ } else {
+ if (isEmptyBucket(*entry))
+ return 0;
+
+ if (!isDeletedBucket(*entry) && HashTranslator::equal(Extractor::extract(*entry), key))
+ return entry;
+ }
+#if DUMP_HASHTABLE_STATS
+ ++probeCount;
+ HashTableStats::recordCollisionAtCount(probeCount);
+#endif
+ if (k == 0)
+ k = 1 | doubleHash(h);
+ i = (i + k) & sizeMask;
+ }
+ }
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ template<typename T, typename HashTranslator>
+ inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::LookupType HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::lookupForWriting(const T& key)
+ {
+ ASSERT(m_table);
+ checkKey<T, HashTranslator>(key);
+
+ int k = 0;
+ ValueType* table = m_table;
+ int sizeMask = m_tableSizeMask;
+ unsigned h = HashTranslator::hash(key);
+ int i = h & sizeMask;
+
+#if DUMP_HASHTABLE_STATS
+ atomicIncrement(&HashTableStats::numAccesses);
+ int probeCount = 0;
+#endif
+
+ ValueType* deletedEntry = 0;
+
+ while (1) {
+ ValueType* entry = table + i;
+
+ // we count on the compiler to optimize out this branch
+ if (HashFunctions::safeToCompareToEmptyOrDeleted) {
+ if (isEmptyBucket(*entry))
+ return LookupType(deletedEntry ? deletedEntry : entry, false);
+
+ if (HashTranslator::equal(Extractor::extract(*entry), key))
+ return LookupType(entry, true);
+
+ if (isDeletedBucket(*entry))
+ deletedEntry = entry;
+ } else {
+ if (isEmptyBucket(*entry))
+ return LookupType(deletedEntry ? deletedEntry : entry, false);
+
+ if (isDeletedBucket(*entry))
+ deletedEntry = entry;
+ else if (HashTranslator::equal(Extractor::extract(*entry), key))
+ return LookupType(entry, true);
+ }
+#if DUMP_HASHTABLE_STATS
+ ++probeCount;
+ HashTableStats::recordCollisionAtCount(probeCount);
+#endif
+ if (k == 0)
+ k = 1 | doubleHash(h);
+ i = (i + k) & sizeMask;
+ }
+ }
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ template<typename T, typename HashTranslator>
+ inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::FullLookupType HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::fullLookupForWriting(const T& key)
+ {
+ ASSERT(m_table);
+ checkKey<T, HashTranslator>(key);
+
+ int k = 0;
+ ValueType* table = m_table;
+ int sizeMask = m_tableSizeMask;
+ unsigned h = HashTranslator::hash(key);
+ int i = h & sizeMask;
+
+#if DUMP_HASHTABLE_STATS
+ atomicIncrement(&HashTableStats::numAccesses);
+ int probeCount = 0;
+#endif
+
+ ValueType* deletedEntry = 0;
+
+ while (1) {
+ ValueType* entry = table + i;
+
+ // we count on the compiler to optimize out this branch
+ if (HashFunctions::safeToCompareToEmptyOrDeleted) {
+ if (isEmptyBucket(*entry))
+ return makeLookupResult(deletedEntry ? deletedEntry : entry, false, h);
+
+ if (HashTranslator::equal(Extractor::extract(*entry), key))
+ return makeLookupResult(entry, true, h);
+
+ if (isDeletedBucket(*entry))
+ deletedEntry = entry;
+ } else {
+ if (isEmptyBucket(*entry))
+ return makeLookupResult(deletedEntry ? deletedEntry : entry, false, h);
+
+ if (isDeletedBucket(*entry))
+ deletedEntry = entry;
+ else if (HashTranslator::equal(Extractor::extract(*entry), key))
+ return makeLookupResult(entry, true, h);
+ }
+#if DUMP_HASHTABLE_STATS
+ ++probeCount;
+ HashTableStats::recordCollisionAtCount(probeCount);
+#endif
+ if (k == 0)
+ k = 1 | doubleHash(h);
+ i = (i + k) & sizeMask;
+ }
+ }
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ template<typename T, typename Extra, typename HashTranslator>
+ inline pair<typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::iterator, bool> HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::add(const T& key, const Extra& extra)
+ {
+ checkKey<T, HashTranslator>(key);
+
+ invalidateIterators();
+
+ if (!m_table)
+ expand();
+
+ checkTableConsistency();
+
+ ASSERT(m_table);
+
+ int k = 0;
+ ValueType* table = m_table;
+ int sizeMask = m_tableSizeMask;
+ unsigned h = HashTranslator::hash(key);
+ int i = h & sizeMask;
+
+#if DUMP_HASHTABLE_STATS
+ atomicIncrement(&HashTableStats::numAccesses);
+ int probeCount = 0;
+#endif
+
+ ValueType* deletedEntry = 0;
+ ValueType* entry;
+ while (1) {
+ entry = table + i;
+
+ // we count on the compiler to optimize out this branch
+ if (HashFunctions::safeToCompareToEmptyOrDeleted) {
+ if (isEmptyBucket(*entry))
+ break;
+
+ if (HashTranslator::equal(Extractor::extract(*entry), key))
+ return std::make_pair(makeKnownGoodIterator(entry), false);
+
+ if (isDeletedBucket(*entry))
+ deletedEntry = entry;
+ } else {
+ if (isEmptyBucket(*entry))
+ break;
+
+ if (isDeletedBucket(*entry))
+ deletedEntry = entry;
+ else if (HashTranslator::equal(Extractor::extract(*entry), key))
+ return std::make_pair(makeKnownGoodIterator(entry), false);
+ }
+#if DUMP_HASHTABLE_STATS
+ ++probeCount;
+ HashTableStats::recordCollisionAtCount(probeCount);
+#endif
+ if (k == 0)
+ k = 1 | doubleHash(h);
+ i = (i + k) & sizeMask;
+ }
+
+ if (deletedEntry) {
+ initializeBucket(*deletedEntry);
+ entry = deletedEntry;
+ --m_deletedCount;
+ }
+
+ HashTranslator::translate(*entry, key, extra);
+
+ ++m_keyCount;
+
+ if (shouldExpand()) {
+ // FIXME: This makes an extra copy on expand. Probably not that bad since
+ // expand is rare, but would be better to have a version of expand that can
+ // follow a pivot entry and return the new position.
+ KeyType enteredKey = Extractor::extract(*entry);
+ expand();
+ pair<iterator, bool> p = std::make_pair(find(enteredKey), true);
+ ASSERT(p.first != end());
+ return p;
+ }
+
+ checkTableConsistency();
+
+ return std::make_pair(makeKnownGoodIterator(entry), true);
+ }
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ template<typename T, typename Extra, typename HashTranslator>
+ inline pair<typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::iterator, bool> HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::addPassingHashCode(const T& key, const Extra& extra)
+ {
+ checkKey<T, HashTranslator>(key);
+
+ invalidateIterators();
+
+ if (!m_table)
+ expand();
+
+ checkTableConsistency();
+
+ FullLookupType lookupResult = fullLookupForWriting<T, HashTranslator>(key);
+
+ ValueType* entry = lookupResult.first.first;
+ bool found = lookupResult.first.second;
+ unsigned h = lookupResult.second;
+
+ if (found)
+ return std::make_pair(makeKnownGoodIterator(entry), false);
+
+ if (isDeletedBucket(*entry)) {
+ initializeBucket(*entry);
+ --m_deletedCount;
+ }
+
+ HashTranslator::translate(*entry, key, extra, h);
+ ++m_keyCount;
+ if (shouldExpand()) {
+ // FIXME: This makes an extra copy on expand. Probably not that bad since
+ // expand is rare, but would be better to have a version of expand that can
+ // follow a pivot entry and return the new position.
+ KeyType enteredKey = Extractor::extract(*entry);
+ expand();
+ pair<iterator, bool> p = std::make_pair(find(enteredKey), true);
+ ASSERT(p.first != end());
+ return p;
+ }
+
+ checkTableConsistency();
+
+ return std::make_pair(makeKnownGoodIterator(entry), true);
+ }
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::reinsert(ValueType& entry)
+ {
+ ASSERT(m_table);
+ ASSERT(!lookupForWriting(Extractor::extract(entry)).second);
+ ASSERT(!isDeletedBucket(*(lookupForWriting(Extractor::extract(entry)).first)));
+#if DUMP_HASHTABLE_STATS
+ atomicIncrement(&HashTableStats::numReinserts);
+#endif
+
+ Mover<ValueType, Traits::needsDestruction>::move(entry, *lookupForWriting(Extractor::extract(entry)).first);
+ }
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ template <typename T, typename HashTranslator>
+ typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::iterator HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::find(const T& key)
+ {
+ if (!m_table)
+ return end();
+
+ ValueType* entry = lookup<T, HashTranslator>(key);
+ if (!entry)
+ return end();
+
+ return makeKnownGoodIterator(entry);
+ }
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ template <typename T, typename HashTranslator>
+ typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::const_iterator HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::find(const T& key) const
+ {
+ if (!m_table)
+ return end();
+
+ ValueType* entry = const_cast<HashTable*>(this)->lookup<T, HashTranslator>(key);
+ if (!entry)
+ return end();
+
+ return makeKnownGoodConstIterator(entry);
+ }
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ template <typename T, typename HashTranslator>
+ bool HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::contains(const T& key) const
+ {
+ if (!m_table)
+ return false;
+
+ return const_cast<HashTable*>(this)->lookup<T, HashTranslator>(key);
+ }
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::removeAndInvalidateWithoutEntryConsistencyCheck(ValueType* pos)
+ {
+ invalidateIterators();
+ remove(pos);
+ }
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::removeAndInvalidate(ValueType* pos)
+ {
+ invalidateIterators();
+ checkTableConsistency();
+ remove(pos);
+ }
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::remove(ValueType* pos)
+ {
+#if DUMP_HASHTABLE_STATS
+ atomicIncrement(&HashTableStats::numRemoves);
+#endif
+
+ deleteBucket(*pos);
+ ++m_deletedCount;
+ --m_keyCount;
+
+ if (shouldShrink())
+ shrink();
+
+ checkTableConsistency();
+ }
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::remove(iterator it)
+ {
+ if (it == end())
+ return;
+
+ removeAndInvalidate(const_cast<ValueType*>(it.m_iterator.m_position));
+ }
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::removeWithoutEntryConsistencyCheck(iterator it)
+ {
+ if (it == end())
+ return;
+
+ removeAndInvalidateWithoutEntryConsistencyCheck(const_cast<ValueType*>(it.m_iterator.m_position));
+ }
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::remove(const KeyType& key)
+ {
+ remove(find(key));
+ }
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::allocateTable(int size)
+ {
+ // would use a template member function with explicit specializations here, but
+ // gcc doesn't appear to support that
+ if (Traits::emptyValueIsZero)
+ return static_cast<ValueType*>(fastZeroedMalloc(size * sizeof(ValueType)));
+ ValueType* result = static_cast<ValueType*>(fastMalloc(size * sizeof(ValueType)));
+ for (int i = 0; i < size; i++)
+ initializeBucket(result[i]);
+ return result;
+ }
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::deallocateTable(ValueType* table, int size)
+ {
+ if (Traits::needsDestruction) {
+ for (int i = 0; i < size; ++i) {
+ if (!isDeletedBucket(table[i]))
+ table[i].~ValueType();
+ }
+ }
+ fastFree(table);
+ }
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::expand()
+ {
+ int newSize;
+ if (m_tableSize == 0)
+ newSize = m_minTableSize;
+ else if (mustRehashInPlace())
+ newSize = m_tableSize;
+ else
+ newSize = m_tableSize * 2;
+
+ rehash(newSize);
+ }
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::rehash(int newTableSize)
+ {
+ checkTableConsistencyExceptSize();
+
+ int oldTableSize = m_tableSize;
+ ValueType* oldTable = m_table;
+
+#if DUMP_HASHTABLE_STATS
+ if (oldTableSize != 0)
+ atomicIncrement(&HashTableStats::numRehashes);
+#endif
+
+ m_tableSize = newTableSize;
+ m_tableSizeMask = newTableSize - 1;
+ m_table = allocateTable(newTableSize);
+
+ for (int i = 0; i != oldTableSize; ++i)
+ if (!isEmptyOrDeletedBucket(oldTable[i]))
+ reinsert(oldTable[i]);
+
+ m_deletedCount = 0;
+
+ deallocateTable(oldTable, oldTableSize);
+
+ checkTableConsistency();
+ }
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::clear()
+ {
+ invalidateIterators();
+ deallocateTable(m_table, m_tableSize);
+ m_table = 0;
+ m_tableSize = 0;
+ m_tableSizeMask = 0;
+ m_keyCount = 0;
+ }
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::HashTable(const HashTable& other)
+ : m_table(0)
+ , m_tableSize(0)
+ , m_tableSizeMask(0)
+ , m_keyCount(0)
+ , m_deletedCount(0)
+#if CHECK_HASHTABLE_ITERATORS
+ , m_iterators(0)
+#endif
+ {
+ // Copy the hash table the dumb way, by adding each element to the new table.
+ // It might be more efficient to copy the table slots, but it's not clear that efficiency is needed.
+ const_iterator end = other.end();
+ for (const_iterator it = other.begin(); it != end; ++it)
+ add(*it);
+ }
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::swap(HashTable& other)
+ {
+ invalidateIterators();
+ other.invalidateIterators();
+
+ ValueType* tmp_table = m_table;
+ m_table = other.m_table;
+ other.m_table = tmp_table;
+
+ int tmp_tableSize = m_tableSize;
+ m_tableSize = other.m_tableSize;
+ other.m_tableSize = tmp_tableSize;
+
+ int tmp_tableSizeMask = m_tableSizeMask;
+ m_tableSizeMask = other.m_tableSizeMask;
+ other.m_tableSizeMask = tmp_tableSizeMask;
+
+ int tmp_keyCount = m_keyCount;
+ m_keyCount = other.m_keyCount;
+ other.m_keyCount = tmp_keyCount;
+
+ int tmp_deletedCount = m_deletedCount;
+ m_deletedCount = other.m_deletedCount;
+ other.m_deletedCount = tmp_deletedCount;
+ }
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>& HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::operator=(const HashTable& other)
+ {
+ HashTable tmp(other);
+ swap(tmp);
+ return *this;
+ }
+
+#if CHECK_HASHTABLE_CONSISTENCY
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::checkTableConsistency() const
+ {
+ checkTableConsistencyExceptSize();
+ ASSERT(!shouldExpand());
+ ASSERT(!shouldShrink());
+ }
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::checkTableConsistencyExceptSize() const
+ {
+ if (!m_table)
+ return;
+
+ int count = 0;
+ int deletedCount = 0;
+ for (int j = 0; j < m_tableSize; ++j) {
+ ValueType* entry = m_table + j;
+ if (isEmptyBucket(*entry))
+ continue;
+
+ if (isDeletedBucket(*entry)) {
+ ++deletedCount;
+ continue;
+ }
+
+ const_iterator it = find(Extractor::extract(*entry));
+ ASSERT(entry == it.m_position);
+ ++count;
+ }
+
+ ASSERT(count == m_keyCount);
+ ASSERT(deletedCount == m_deletedCount);
+ ASSERT(m_tableSize >= m_minTableSize);
+ ASSERT(m_tableSizeMask);
+ ASSERT(m_tableSize == m_tableSizeMask + 1);
+ }
+
+#endif // CHECK_HASHTABLE_CONSISTENCY
+
+#if CHECK_HASHTABLE_ITERATORS
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::invalidateIterators()
+ {
+ MutexLocker lock(m_mutex);
+ const_iterator* next;
+ for (const_iterator* p = m_iterators; p; p = next) {
+ next = p->m_next;
+ p->m_table = 0;
+ p->m_next = 0;
+ p->m_previous = 0;
+ }
+ m_iterators = 0;
+ }
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ void addIterator(const HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>* table,
+ HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>* it)
+ {
+ it->m_table = table;
+ it->m_previous = 0;
+
+ // Insert iterator at head of doubly-linked list of iterators.
+ if (!table) {
+ it->m_next = 0;
+ } else {
+ MutexLocker lock(table->m_mutex);
+ ASSERT(table->m_iterators != it);
+ it->m_next = table->m_iterators;
+ table->m_iterators = it;
+ if (it->m_next) {
+ ASSERT(!it->m_next->m_previous);
+ it->m_next->m_previous = it;
+ }
+ }
+ }
+
+ template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits>
+ void removeIterator(HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>* it)
+ {
+ typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> HashTableType;
+ typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> const_iterator;
+
+ // Delete iterator from doubly-linked list of iterators.
+ if (!it->m_table) {
+ ASSERT(!it->m_next);
+ ASSERT(!it->m_previous);
+ } else {
+ MutexLocker lock(it->m_table->m_mutex);
+ if (it->m_next) {
+ ASSERT(it->m_next->m_previous == it);
+ it->m_next->m_previous = it->m_previous;
+ }
+ if (it->m_previous) {
+ ASSERT(it->m_table->m_iterators != it);
+ ASSERT(it->m_previous->m_next == it);
+ it->m_previous->m_next = it->m_next;
+ } else {
+ ASSERT(it->m_table->m_iterators == it);
+ it->m_table->m_iterators = it->m_next;
+ }
+ }
+
+ it->m_table = 0;
+ it->m_next = 0;
+ it->m_previous = 0;
+ }
+
+#endif // CHECK_HASHTABLE_ITERATORS
+
+ // iterator adapters
+
+ template<typename HashTableType, typename ValueType> struct HashTableConstIteratorAdapter {
+ HashTableConstIteratorAdapter(const typename HashTableType::const_iterator& impl) : m_impl(impl) {}
+
+ const ValueType* get() const { return (const ValueType*)m_impl.get(); }
+ const ValueType& operator*() const { return *get(); }
+ const ValueType* operator->() const { return get(); }
+
+ HashTableConstIteratorAdapter& operator++() { ++m_impl; return *this; }
+ // postfix ++ intentionally omitted
+
+ typename HashTableType::const_iterator m_impl;
+ };
+
+ template<typename HashTableType, typename ValueType> struct HashTableIteratorAdapter {
+ HashTableIteratorAdapter(const typename HashTableType::iterator& impl) : m_impl(impl) {}
+
+ ValueType* get() const { return (ValueType*)m_impl.get(); }
+ ValueType& operator*() const { return *get(); }
+ ValueType* operator->() const { return get(); }
+
+ HashTableIteratorAdapter& operator++() { ++m_impl; return *this; }
+ // postfix ++ intentionally omitted
+
+ operator HashTableConstIteratorAdapter<HashTableType, ValueType>() {
+ typename HashTableType::const_iterator i = m_impl;
+ return i;
+ }
+
+ typename HashTableType::iterator m_impl;
+ };
+
+ template<typename T, typename U>
+ inline bool operator==(const HashTableConstIteratorAdapter<T, U>& a, const HashTableConstIteratorAdapter<T, U>& b)
+ {
+ return a.m_impl == b.m_impl;
+ }
+
+ template<typename T, typename U>
+ inline bool operator!=(const HashTableConstIteratorAdapter<T, U>& a, const HashTableConstIteratorAdapter<T, U>& b)
+ {
+ return a.m_impl != b.m_impl;
+ }
+
+ template<typename T, typename U>
+ inline bool operator==(const HashTableIteratorAdapter<T, U>& a, const HashTableIteratorAdapter<T, U>& b)
+ {
+ return a.m_impl == b.m_impl;
+ }
+
+ template<typename T, typename U>
+ inline bool operator!=(const HashTableIteratorAdapter<T, U>& a, const HashTableIteratorAdapter<T, U>& b)
+ {
+ return a.m_impl != b.m_impl;
+ }
+
+} // namespace WTF
+
+#include "HashIterators.h"
+
+#endif // WTF_HashTable_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashTraits.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashTraits.h
new file mode 100644
index 0000000..c8d40f7
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/HashTraits.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef WTF_HashTraits_h
+#define WTF_HashTraits_h
+
+#include "HashFunctions.h"
+#include "TypeTraits.h"
+#include <utility>
+#include <limits>
+
+namespace WTF {
+
+ using std::pair;
+ using std::make_pair;
+
+ template<typename T> struct HashTraits;
+
+ template<bool isInteger, typename T> struct GenericHashTraitsBase;
+
+ template<typename T> struct GenericHashTraitsBase<false, T> {
+ static const bool emptyValueIsZero = false;
+ static const bool needsDestruction = true;
+ };
+
+ // Default integer traits disallow both 0 and -1 as keys (max value instead of -1 for unsigned).
+ template<typename T> struct GenericHashTraitsBase<true, T> {
+ static const bool emptyValueIsZero = true;
+ static const bool needsDestruction = false;
+ static void constructDeletedValue(T& slot) { slot = static_cast<T>(-1); }
+ static bool isDeletedValue(T value) { return value == static_cast<T>(-1); }
+ };
+
+ template<typename T> struct GenericHashTraits : GenericHashTraitsBase<IsInteger<T>::value, T> {
+ typedef T TraitType;
+ static T emptyValue() { return T(); }
+ };
+
+ template<typename T> struct HashTraits : GenericHashTraits<T> { };
+
+ template<typename T> struct FloatHashTraits : GenericHashTraits<T> {
+ static const bool needsDestruction = false;
+ static T emptyValue() { return std::numeric_limits<T>::infinity(); }
+ static void constructDeletedValue(T& slot) { slot = -std::numeric_limits<T>::infinity(); }
+ static bool isDeletedValue(T value) { return value == -std::numeric_limits<T>::infinity(); }
+ };
+
+ template<> struct HashTraits<float> : FloatHashTraits<float> { };
+ template<> struct HashTraits<double> : FloatHashTraits<double> { };
+
+ // Default unsigned traits disallow both 0 and max as keys -- use these traits to allow zero and disallow max - 1.
+ template<typename T> struct UnsignedWithZeroKeyHashTraits : GenericHashTraits<T> {
+ static const bool emptyValueIsZero = false;
+ static const bool needsDestruction = false;
+ static T emptyValue() { return std::numeric_limits<T>::max(); }
+ static void constructDeletedValue(T& slot) { slot = std::numeric_limits<T>::max() - 1; }
+ static bool isDeletedValue(T value) { return value == std::numeric_limits<T>::max() - 1; }
+ };
+
+ template<typename P> struct HashTraits<P*> : GenericHashTraits<P*> {
+ static const bool emptyValueIsZero = true;
+ static const bool needsDestruction = false;
+ static void constructDeletedValue(P*& slot) { slot = reinterpret_cast<P*>(-1); }
+ static bool isDeletedValue(P* value) { return value == reinterpret_cast<P*>(-1); }
+ };
+
+ template<typename P> struct HashTraits<RefPtr<P> > : GenericHashTraits<RefPtr<P> > {
+ static const bool emptyValueIsZero = true;
+ static void constructDeletedValue(RefPtr<P>& slot) { new (&slot) RefPtr<P>(HashTableDeletedValue); }
+ static bool isDeletedValue(const RefPtr<P>& value) { return value.isHashTableDeletedValue(); }
+ };
+
+ // special traits for pairs, helpful for their use in HashMap implementation
+
+ template<typename FirstTraitsArg, typename SecondTraitsArg>
+ struct PairHashTraits : GenericHashTraits<pair<typename FirstTraitsArg::TraitType, typename SecondTraitsArg::TraitType> > {
+ typedef FirstTraitsArg FirstTraits;
+ typedef SecondTraitsArg SecondTraits;
+ typedef pair<typename FirstTraits::TraitType, typename SecondTraits::TraitType> TraitType;
+
+ static const bool emptyValueIsZero = FirstTraits::emptyValueIsZero && SecondTraits::emptyValueIsZero;
+ static TraitType emptyValue() { return make_pair(FirstTraits::emptyValue(), SecondTraits::emptyValue()); }
+
+ static const bool needsDestruction = FirstTraits::needsDestruction || SecondTraits::needsDestruction;
+
+ static void constructDeletedValue(TraitType& slot) { FirstTraits::constructDeletedValue(slot.first); }
+ static bool isDeletedValue(const TraitType& value) { return FirstTraits::isDeletedValue(value.first); }
+ };
+
+ template<typename First, typename Second>
+ struct HashTraits<pair<First, Second> > : public PairHashTraits<HashTraits<First>, HashTraits<Second> > { };
+
+} // namespace WTF
+
+using WTF::HashTraits;
+using WTF::PairHashTraits;
+
+#endif // WTF_HashTraits_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ListHashSet.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ListHashSet.h
new file mode 100644
index 0000000..38cc998
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ListHashSet.h
@@ -0,0 +1,616 @@
+/*
+ * Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef WTF_ListHashSet_h
+#define WTF_ListHashSet_h
+
+#include "Assertions.h"
+#include "HashSet.h"
+#include "OwnPtr.h"
+
+namespace WTF {
+
+ // ListHashSet: Just like HashSet, this class provides a Set
+ // interface - a collection of unique objects with O(1) insertion,
+ // removal and test for containership. However, it also has an
+ // order - iterating it will always give back values in the order
+ // in which they are added.
+
+ // In theory it would be possible to add prepend, insertAfter
+ // and an append that moves the element to the end even if already present,
+ // but unclear yet if these are needed.
+
+ template<typename Value, typename HashFunctions> class ListHashSet;
+
+ template<typename T> struct IdentityExtractor;
+
+ template<typename Value, typename HashFunctions>
+ void deleteAllValues(const ListHashSet<Value, HashFunctions>&);
+
+ template<typename ValueArg, typename HashArg> class ListHashSetIterator;
+ template<typename ValueArg, typename HashArg> class ListHashSetConstIterator;
+
+ template<typename ValueArg> struct ListHashSetNode;
+ template<typename ValueArg> struct ListHashSetNodeAllocator;
+ template<typename ValueArg, typename HashArg> struct ListHashSetNodeHashFunctions;
+
+ template<typename ValueArg, typename HashArg = typename DefaultHash<ValueArg>::Hash> class ListHashSet {
+ private:
+ typedef ListHashSetNode<ValueArg> Node;
+ typedef ListHashSetNodeAllocator<ValueArg> NodeAllocator;
+
+ typedef HashTraits<Node*> NodeTraits;
+ typedef ListHashSetNodeHashFunctions<ValueArg, HashArg> NodeHash;
+
+ typedef HashTable<Node*, Node*, IdentityExtractor<Node*>, NodeHash, NodeTraits, NodeTraits> ImplType;
+ typedef HashTableIterator<Node*, Node*, IdentityExtractor<Node*>, NodeHash, NodeTraits, NodeTraits> ImplTypeIterator;
+ typedef HashTableConstIterator<Node*, Node*, IdentityExtractor<Node*>, NodeHash, NodeTraits, NodeTraits> ImplTypeConstIterator;
+
+ typedef HashArg HashFunctions;
+
+ public:
+ typedef ValueArg ValueType;
+ typedef ListHashSetIterator<ValueType, HashArg> iterator;
+ typedef ListHashSetConstIterator<ValueType, HashArg> const_iterator;
+
+ friend class ListHashSetConstIterator<ValueType, HashArg>;
+
+ ListHashSet();
+ ListHashSet(const ListHashSet&);
+ ListHashSet& operator=(const ListHashSet&);
+ ~ListHashSet();
+
+ void swap(ListHashSet&);
+
+ int size() const;
+ int capacity() const;
+ bool isEmpty() const;
+
+ iterator begin();
+ iterator end();
+ const_iterator begin() const;
+ const_iterator end() const;
+
+ iterator find(const ValueType&);
+ const_iterator find(const ValueType&) const;
+ bool contains(const ValueType&) const;
+
+ // the return value is a pair of an iterator to the new value's location,
+ // and a bool that is true if an new entry was added
+ pair<iterator, bool> add(const ValueType&);
+
+ pair<iterator, bool> insertBefore(const ValueType& beforeValue, const ValueType& newValue);
+ pair<iterator, bool> insertBefore(iterator it, const ValueType&);
+
+ void remove(const ValueType&);
+ void remove(iterator);
+ void clear();
+
+ private:
+ void unlinkAndDelete(Node*);
+ void appendNode(Node*);
+ void insertNodeBefore(Node* beforeNode, Node* newNode);
+ void deleteAllNodes();
+ iterator makeIterator(Node*);
+ const_iterator makeConstIterator(Node*) const;
+
+ friend void deleteAllValues<>(const ListHashSet&);
+
+ ImplType m_impl;
+ Node* m_head;
+ Node* m_tail;
+ OwnPtr<NodeAllocator> m_allocator;
+ };
+
+ template<typename ValueArg> struct ListHashSetNodeAllocator {
+ typedef ListHashSetNode<ValueArg> Node;
+ typedef ListHashSetNodeAllocator<ValueArg> NodeAllocator;
+
+ ListHashSetNodeAllocator()
+ : m_freeList(pool())
+ , m_isDoneWithInitialFreeList(false)
+ {
+ memset(m_pool.pool, 0, sizeof(m_pool.pool));
+ }
+
+ Node* allocate()
+ {
+ Node* result = m_freeList;
+
+ if (!result)
+ return static_cast<Node*>(fastMalloc(sizeof(Node)));
+
+ ASSERT(!result->m_isAllocated);
+
+ Node* next = result->m_next;
+ ASSERT(!next || !next->m_isAllocated);
+ if (!next && !m_isDoneWithInitialFreeList) {
+ next = result + 1;
+ if (next == pastPool()) {
+ m_isDoneWithInitialFreeList = true;
+ next = 0;
+ } else {
+ ASSERT(inPool(next));
+ ASSERT(!next->m_isAllocated);
+ }
+ }
+ m_freeList = next;
+
+ return result;
+ }
+
+ void deallocate(Node* node)
+ {
+ if (inPool(node)) {
+#ifndef NDEBUG
+ node->m_isAllocated = false;
+#endif
+ node->m_next = m_freeList;
+ m_freeList = node;
+ return;
+ }
+
+ fastFree(node);
+ }
+
+ private:
+ Node* pool() { return reinterpret_cast<Node*>(m_pool.pool); }
+ Node* pastPool() { return pool() + m_poolSize; }
+
+ bool inPool(Node* node)
+ {
+ return node >= pool() && node < pastPool();
+ }
+
+ Node* m_freeList;
+ bool m_isDoneWithInitialFreeList;
+ static const size_t m_poolSize = 256;
+ union {
+ char pool[sizeof(Node) * m_poolSize];
+ double forAlignment;
+ } m_pool;
+ };
+
+ template<typename ValueArg> struct ListHashSetNode {
+ typedef ListHashSetNodeAllocator<ValueArg> NodeAllocator;
+
+ ListHashSetNode(ValueArg value)
+ : m_value(value)
+ , m_prev(0)
+ , m_next(0)
+#ifndef NDEBUG
+ , m_isAllocated(true)
+#endif
+ {
+ }
+
+ void* operator new(size_t, NodeAllocator* allocator)
+ {
+ return allocator->allocate();
+ }
+ void destroy(NodeAllocator* allocator)
+ {
+ this->~ListHashSetNode();
+ allocator->deallocate(this);
+ }
+
+ ValueArg m_value;
+ ListHashSetNode* m_prev;
+ ListHashSetNode* m_next;
+
+#ifndef NDEBUG
+ bool m_isAllocated;
+#endif
+ };
+
+ template<typename ValueArg, typename HashArg> struct ListHashSetNodeHashFunctions {
+ typedef ListHashSetNode<ValueArg> Node;
+
+ static unsigned hash(Node* const& key) { return HashArg::hash(key->m_value); }
+ static bool equal(Node* const& a, Node* const& b) { return HashArg::equal(a->m_value, b->m_value); }
+ static const bool safeToCompareToEmptyOrDeleted = false;
+ };
+
+ template<typename ValueArg, typename HashArg> class ListHashSetIterator {
+ private:
+ typedef ListHashSet<ValueArg, HashArg> ListHashSetType;
+ typedef ListHashSetIterator<ValueArg, HashArg> iterator;
+ typedef ListHashSetConstIterator<ValueArg, HashArg> const_iterator;
+ typedef ListHashSetNode<ValueArg> Node;
+ typedef ValueArg ValueType;
+ typedef ValueType& ReferenceType;
+ typedef ValueType* PointerType;
+
+ friend class ListHashSet<ValueArg, HashArg>;
+
+ ListHashSetIterator(const ListHashSetType* set, Node* position) : m_iterator(set, position) { }
+
+ public:
+ ListHashSetIterator() { }
+
+ // default copy, assignment and destructor are OK
+
+ PointerType get() const { return const_cast<PointerType>(m_iterator.get()); }
+ ReferenceType operator*() const { return *get(); }
+ PointerType operator->() const { return get(); }
+
+ iterator& operator++() { ++m_iterator; return *this; }
+
+ // postfix ++ intentionally omitted
+
+ iterator& operator--() { --m_iterator; return *this; }
+
+ // postfix -- intentionally omitted
+
+ // Comparison.
+ bool operator==(const iterator& other) const { return m_iterator == other.m_iterator; }
+ bool operator!=(const iterator& other) const { return m_iterator != other.m_iterator; }
+
+ operator const_iterator() const { return m_iterator; }
+
+ private:
+ Node* node() { return m_iterator.node(); }
+
+ const_iterator m_iterator;
+ };
+
+ template<typename ValueArg, typename HashArg> class ListHashSetConstIterator {
+ private:
+ typedef ListHashSet<ValueArg, HashArg> ListHashSetType;
+ typedef ListHashSetIterator<ValueArg, HashArg> iterator;
+ typedef ListHashSetConstIterator<ValueArg, HashArg> const_iterator;
+ typedef ListHashSetNode<ValueArg> Node;
+ typedef ValueArg ValueType;
+ typedef const ValueType& ReferenceType;
+ typedef const ValueType* PointerType;
+
+ friend class ListHashSet<ValueArg, HashArg>;
+ friend class ListHashSetIterator<ValueArg, HashArg>;
+
+ ListHashSetConstIterator(const ListHashSetType* set, Node* position)
+ : m_set(set)
+ , m_position(position)
+ {
+ }
+
+ public:
+ ListHashSetConstIterator()
+ {
+ }
+
+ PointerType get() const
+ {
+ return &m_position->m_value;
+ }
+ ReferenceType operator*() const { return *get(); }
+ PointerType operator->() const { return get(); }
+
+ const_iterator& operator++()
+ {
+ ASSERT(m_position != 0);
+ m_position = m_position->m_next;
+ return *this;
+ }
+
+ // postfix ++ intentionally omitted
+
+ const_iterator& operator--()
+ {
+ ASSERT(m_position != m_set->m_head);
+ if (!m_position)
+ m_position = m_set->m_tail;
+ else
+ m_position = m_position->m_prev;
+ return *this;
+ }
+
+ // postfix -- intentionally omitted
+
+ // Comparison.
+ bool operator==(const const_iterator& other) const
+ {
+ return m_position == other.m_position;
+ }
+ bool operator!=(const const_iterator& other) const
+ {
+ return m_position != other.m_position;
+ }
+
+ private:
+ Node* node() { return m_position; }
+
+ const ListHashSetType* m_set;
+ Node* m_position;
+ };
+
+
+ template<typename ValueType, typename HashFunctions>
+ struct ListHashSetTranslator {
+ private:
+ typedef ListHashSetNode<ValueType> Node;
+ typedef ListHashSetNodeAllocator<ValueType> NodeAllocator;
+ public:
+ static unsigned hash(const ValueType& key) { return HashFunctions::hash(key); }
+ static bool equal(Node* const& a, const ValueType& b) { return HashFunctions::equal(a->m_value, b); }
+ static void translate(Node*& location, const ValueType& key, NodeAllocator* allocator)
+ {
+ location = new (allocator) Node(key);
+ }
+ };
+
+ template<typename T, typename U>
+ inline ListHashSet<T, U>::ListHashSet()
+ : m_head(0)
+ , m_tail(0)
+ , m_allocator(new NodeAllocator)
+ {
+ }
+
+ template<typename T, typename U>
+ inline ListHashSet<T, U>::ListHashSet(const ListHashSet& other)
+ : m_head(0)
+ , m_tail(0)
+ , m_allocator(new NodeAllocator)
+ {
+ const_iterator end = other.end();
+ for (const_iterator it = other.begin(); it != end; ++it)
+ add(*it);
+ }
+
+ template<typename T, typename U>
+ inline ListHashSet<T, U>& ListHashSet<T, U>::operator=(const ListHashSet& other)
+ {
+ ListHashSet tmp(other);
+ swap(tmp);
+ return *this;
+ }
+
+ template<typename T, typename U>
+ inline void ListHashSet<T, U>::swap(ListHashSet& other)
+ {
+ m_impl.swap(other.m_impl);
+ std::swap(m_head, other.m_head);
+ std::swap(m_tail, other.m_tail);
+ m_allocator.swap(other.m_allocator);
+ }
+
+ template<typename T, typename U>
+ inline ListHashSet<T, U>::~ListHashSet()
+ {
+ deleteAllNodes();
+ }
+
+ template<typename T, typename U>
+ inline int ListHashSet<T, U>::size() const
+ {
+ return m_impl.size();
+ }
+
+ template<typename T, typename U>
+ inline int ListHashSet<T, U>::capacity() const
+ {
+ return m_impl.capacity();
+ }
+
+ template<typename T, typename U>
+ inline bool ListHashSet<T, U>::isEmpty() const
+ {
+ return m_impl.isEmpty();
+ }
+
+ template<typename T, typename U>
+ inline typename ListHashSet<T, U>::iterator ListHashSet<T, U>::begin()
+ {
+ return makeIterator(m_head);
+ }
+
+ template<typename T, typename U>
+ inline typename ListHashSet<T, U>::iterator ListHashSet<T, U>::end()
+ {
+ return makeIterator(0);
+ }
+
+ template<typename T, typename U>
+ inline typename ListHashSet<T, U>::const_iterator ListHashSet<T, U>::begin() const
+ {
+ return makeConstIterator(m_head);
+ }
+
+ template<typename T, typename U>
+ inline typename ListHashSet<T, U>::const_iterator ListHashSet<T, U>::end() const
+ {
+ return makeConstIterator(0);
+ }
+
+ template<typename T, typename U>
+ inline typename ListHashSet<T, U>::iterator ListHashSet<T, U>::find(const ValueType& value)
+ {
+ typedef ListHashSetTranslator<ValueType, HashFunctions> Translator;
+ ImplTypeIterator it = m_impl.template find<ValueType, Translator>(value);
+ if (it == m_impl.end())
+ return end();
+ return makeIterator(*it);
+ }
+
+ template<typename T, typename U>
+ inline typename ListHashSet<T, U>::const_iterator ListHashSet<T, U>::find(const ValueType& value) const
+ {
+ typedef ListHashSetTranslator<ValueType, HashFunctions> Translator;
+ ImplTypeConstIterator it = m_impl.template find<ValueType, Translator>(value);
+ if (it == m_impl.end())
+ return end();
+ return makeConstIterator(*it);
+ }
+
+ template<typename T, typename U>
+ inline bool ListHashSet<T, U>::contains(const ValueType& value) const
+ {
+ typedef ListHashSetTranslator<ValueType, HashFunctions> Translator;
+ return m_impl.template contains<ValueType, Translator>(value);
+ }
+
+ template<typename T, typename U>
+ pair<typename ListHashSet<T, U>::iterator, bool> ListHashSet<T, U>::add(const ValueType &value)
+ {
+ typedef ListHashSetTranslator<ValueType, HashFunctions> Translator;
+ pair<typename ImplType::iterator, bool> result = m_impl.template add<ValueType, NodeAllocator*, Translator>(value, m_allocator.get());
+ if (result.second)
+ appendNode(*result.first);
+ return std::make_pair(makeIterator(*result.first), result.second);
+ }
+
+ template<typename T, typename U>
+ pair<typename ListHashSet<T, U>::iterator, bool> ListHashSet<T, U>::insertBefore(iterator it, const ValueType& newValue)
+ {
+ typedef ListHashSetTranslator<ValueType, HashFunctions> Translator;
+ pair<typename ImplType::iterator, bool> result = m_impl.template add<ValueType, NodeAllocator*, Translator>(newValue, m_allocator.get());
+ if (result.second)
+ insertNodeBefore(it.node(), *result.first);
+ return std::make_pair(makeIterator(*result.first), result.second);
+
+ }
+
+ template<typename T, typename U>
+ pair<typename ListHashSet<T, U>::iterator, bool> ListHashSet<T, U>::insertBefore(const ValueType& beforeValue, const ValueType& newValue)
+ {
+ return insertBefore(find(beforeValue), newValue);
+ }
+
+ template<typename T, typename U>
+ inline void ListHashSet<T, U>::remove(iterator it)
+ {
+ if (it == end())
+ return;
+ m_impl.remove(it.node());
+ unlinkAndDelete(it.node());
+ }
+
+ template<typename T, typename U>
+ inline void ListHashSet<T, U>::remove(const ValueType& value)
+ {
+ remove(find(value));
+ }
+
+ template<typename T, typename U>
+ inline void ListHashSet<T, U>::clear()
+ {
+ deleteAllNodes();
+ m_impl.clear();
+ m_head = 0;
+ m_tail = 0;
+ }
+
+ template<typename T, typename U>
+ void ListHashSet<T, U>::unlinkAndDelete(Node* node)
+ {
+ if (!node->m_prev) {
+ ASSERT(node == m_head);
+ m_head = node->m_next;
+ } else {
+ ASSERT(node != m_head);
+ node->m_prev->m_next = node->m_next;
+ }
+
+ if (!node->m_next) {
+ ASSERT(node == m_tail);
+ m_tail = node->m_prev;
+ } else {
+ ASSERT(node != m_tail);
+ node->m_next->m_prev = node->m_prev;
+ }
+
+ node->destroy(m_allocator.get());
+ }
+
+ template<typename T, typename U>
+ void ListHashSet<T, U>::appendNode(Node* node)
+ {
+ node->m_prev = m_tail;
+ node->m_next = 0;
+
+ if (m_tail) {
+ ASSERT(m_head);
+ m_tail->m_next = node;
+ } else {
+ ASSERT(!m_head);
+ m_head = node;
+ }
+
+ m_tail = node;
+ }
+
+ template<typename T, typename U>
+ void ListHashSet<T, U>::insertNodeBefore(Node* beforeNode, Node* newNode)
+ {
+ if (!beforeNode)
+ return appendNode(newNode);
+
+ newNode->m_next = beforeNode;
+ newNode->m_prev = beforeNode->m_prev;
+ if (beforeNode->m_prev)
+ beforeNode->m_prev->m_next = newNode;
+ beforeNode->m_prev = newNode;
+
+ if (!newNode->m_prev)
+ m_head = newNode;
+ }
+
+ template<typename T, typename U>
+ void ListHashSet<T, U>::deleteAllNodes()
+ {
+ if (!m_head)
+ return;
+
+ for (Node* node = m_head, *next = m_head->m_next; node; node = next, next = node ? node->m_next : 0)
+ node->destroy(m_allocator.get());
+ }
+
+ template<typename T, typename U>
+ inline ListHashSetIterator<T, U> ListHashSet<T, U>::makeIterator(Node* position)
+ {
+ return ListHashSetIterator<T, U>(this, position);
+ }
+
+ template<typename T, typename U>
+ inline ListHashSetConstIterator<T, U> ListHashSet<T, U>::makeConstIterator(Node* position) const
+ {
+ return ListHashSetConstIterator<T, U>(this, position);
+ }
+
+ template<bool, typename ValueType, typename HashTableType>
+ void deleteAllValues(HashTableType& collection)
+ {
+ typedef typename HashTableType::const_iterator iterator;
+ iterator end = collection.end();
+ for (iterator it = collection.begin(); it != end; ++it)
+ delete (*it)->m_value;
+ }
+
+ template<typename T, typename U>
+ inline void deleteAllValues(const ListHashSet<T, U>& collection)
+ {
+ deleteAllValues<true, typename ListHashSet<T, U>::ValueType>(collection.m_impl);
+ }
+
+} // namespace WTF
+
+using WTF::ListHashSet;
+
+#endif /* WTF_ListHashSet_h */
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ListRefPtr.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ListRefPtr.h
new file mode 100644
index 0000000..9f9a354
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ListRefPtr.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2005, 2006, 2008 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef WTF_ListRefPtr_h
+#define WTF_ListRefPtr_h
+
+#include <wtf/RefPtr.h>
+
+namespace WTF {
+
+ // Specialized version of RefPtr desgined for use in singly-linked lists.
+ // Derefs the list iteratively to avoid recursive derefing that can overflow the stack.
+ template <typename T> class ListRefPtr : public RefPtr<T> {
+ public:
+ ListRefPtr() : RefPtr<T>() {}
+ ListRefPtr(T* ptr) : RefPtr<T>(ptr) {}
+ ListRefPtr(const RefPtr<T>& o) : RefPtr<T>(o) {}
+ // see comment in PassRefPtr.h for why this takes const reference
+ template <typename U> ListRefPtr(const PassRefPtr<U>& o) : RefPtr<T>(o) {}
+
+ ~ListRefPtr()
+ {
+ RefPtr<T> reaper = this->release();
+ while (reaper && reaper->hasOneRef())
+ reaper = reaper->releaseNext(); // implicitly protects reaper->next, then derefs reaper
+ }
+
+ ListRefPtr& operator=(T* optr) { RefPtr<T>::operator=(optr); return *this; }
+ ListRefPtr& operator=(const RefPtr<T>& o) { RefPtr<T>::operator=(o); return *this; }
+ ListRefPtr& operator=(const PassRefPtr<T>& o) { RefPtr<T>::operator=(o); return *this; }
+ template <typename U> ListRefPtr& operator=(const RefPtr<U>& o) { RefPtr<T>::operator=(o); return *this; }
+ template <typename U> ListRefPtr& operator=(const PassRefPtr<U>& o) { RefPtr<T>::operator=(o); return *this; }
+ };
+
+ template <typename T> inline T* getPtr(const ListRefPtr<T>& p)
+ {
+ return p.get();
+ }
+
+} // namespace WTF
+
+using WTF::ListRefPtr;
+
+#endif // WTF_ListRefPtr_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Locker.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Locker.h
new file mode 100644
index 0000000..41813d3
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Locker.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef Locker_h
+#define Locker_h
+
+#include <wtf/Noncopyable.h>
+
+namespace WTF {
+
+template <typename T> class Locker : public Noncopyable {
+public:
+ Locker(T& lockable) : m_lockable(lockable) { m_lockable.lock(); }
+ ~Locker() { m_lockable.unlock(); }
+private:
+ T& m_lockable;
+};
+
+}
+
+using WTF::Locker;
+
+#endif
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/MainThread.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/MainThread.cpp
new file mode 100644
index 0000000..3c19b7a
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/MainThread.cpp
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2007, 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "MainThread.h"
+
+#include "CurrentTime.h"
+#include "Deque.h"
+#include "StdLibExtras.h"
+#include "Threading.h"
+
+namespace WTF {
+
+struct FunctionWithContext {
+ MainThreadFunction* function;
+ void* context;
+
+ FunctionWithContext(MainThreadFunction* function = 0, void* context = 0)
+ : function(function)
+ , context(context)
+ {
+ }
+};
+
+typedef Deque<FunctionWithContext> FunctionQueue;
+
+static bool callbacksPaused; // This global variable is only accessed from main thread.
+
+Mutex& mainThreadFunctionQueueMutex()
+{
+ DEFINE_STATIC_LOCAL(Mutex, staticMutex, ());
+ return staticMutex;
+}
+
+static FunctionQueue& functionQueue()
+{
+ DEFINE_STATIC_LOCAL(FunctionQueue, staticFunctionQueue, ());
+ return staticFunctionQueue;
+}
+
+void initializeMainThread()
+{
+ mainThreadFunctionQueueMutex();
+ initializeMainThreadPlatform();
+}
+
+// 0.1 sec delays in UI is approximate threshold when they become noticeable. Have a limit that's half of that.
+static const double maxRunLoopSuspensionTime = 0.05;
+
+void dispatchFunctionsFromMainThread()
+{
+ ASSERT(isMainThread());
+
+ if (callbacksPaused)
+ return;
+
+ double startTime = currentTime();
+
+ FunctionWithContext invocation;
+ while (true) {
+ {
+ MutexLocker locker(mainThreadFunctionQueueMutex());
+ if (!functionQueue().size())
+ break;
+ invocation = functionQueue().first();
+ functionQueue().removeFirst();
+ }
+
+ invocation.function(invocation.context);
+
+ // If we are running accumulated functions for too long so UI may become unresponsive, we need to
+ // yield so the user input can be processed. Otherwise user may not be able to even close the window.
+ // This code has effect only in case the scheduleDispatchFunctionsOnMainThread() is implemented in a way that
+ // allows input events to be processed before we are back here.
+ if (currentTime() - startTime > maxRunLoopSuspensionTime) {
+ scheduleDispatchFunctionsOnMainThread();
+ break;
+ }
+ }
+}
+
+void callOnMainThread(MainThreadFunction* function, void* context)
+{
+ ASSERT(function);
+ bool needToSchedule = false;
+ {
+ MutexLocker locker(mainThreadFunctionQueueMutex());
+ needToSchedule = functionQueue().size() == 0;
+ functionQueue().append(FunctionWithContext(function, context));
+ }
+ if (needToSchedule)
+ scheduleDispatchFunctionsOnMainThread();
+}
+
+void setMainThreadCallbacksPaused(bool paused)
+{
+ ASSERT(isMainThread());
+
+ if (callbacksPaused == paused)
+ return;
+
+ callbacksPaused = paused;
+
+ if (!callbacksPaused)
+ scheduleDispatchFunctionsOnMainThread();
+}
+
+} // namespace WTF
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/MainThread.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/MainThread.h
new file mode 100644
index 0000000..b8305b5
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/MainThread.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2007, 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MainThread_h
+#define MainThread_h
+
+namespace WTF {
+
+class Mutex;
+
+extern "C" {
+ typedef void MainThreadFunction(void*);
+}
+
+void callOnMainThread(MainThreadFunction*, void* context);
+
+void setMainThreadCallbacksPaused(bool paused);
+
+// Must be called from the main thread (Darwin is an exception to this rule).
+void initializeMainThread();
+
+// These functions are internal to the callOnMainThread implementation.
+void initializeMainThreadPlatform();
+void scheduleDispatchFunctionsOnMainThread();
+Mutex& mainThreadFunctionQueueMutex();
+void dispatchFunctionsFromMainThread();
+
+} // namespace WTF
+
+using WTF::callOnMainThread;
+using WTF::setMainThreadCallbacksPaused;
+
+#endif // MainThread_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/MallocZoneSupport.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/MallocZoneSupport.h
new file mode 100644
index 0000000..62df145
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/MallocZoneSupport.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2007 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MallocZoneSupport_h
+#define MallocZoneSupport_h
+
+#include <malloc/malloc.h>
+
+namespace WTF {
+
+class RemoteMemoryReader {
+ task_t m_task;
+ memory_reader_t* m_reader;
+
+public:
+ RemoteMemoryReader(task_t task, memory_reader_t* reader)
+ : m_task(task)
+ , m_reader(reader)
+ { }
+
+ void* operator()(vm_address_t address, size_t size) const
+ {
+ void* output;
+ kern_return_t err = (*m_reader)(m_task, address, size, static_cast<void**>(&output));
+ ASSERT(!err);
+ if (err)
+ output = 0;
+ return output;
+ }
+
+ template <typename T>
+ T* operator()(T* address, size_t size=sizeof(T)) const
+ {
+ return static_cast<T*>((*this)(reinterpret_cast<vm_address_t>(address), size));
+ }
+};
+
+} // namespace WTF
+
+#endif // MallocZoneSupport_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/MathExtras.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/MathExtras.h
new file mode 100644
index 0000000..324300d
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/MathExtras.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2006, 2007, 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_MathExtras_h
+#define WTF_MathExtras_h
+
+#include <math.h>
+#include <stdlib.h>
+
+#if PLATFORM(SOLARIS)
+#include <ieeefp.h>
+#endif
+
+#if PLATFORM(OPENBSD)
+#include <sys/types.h>
+#include <machine/ieee.h>
+#endif
+
+#if COMPILER(MSVC)
+#if PLATFORM(WINCE)
+#include <stdlib.h>
+#endif
+#include <limits>
+
+#if HAVE(FLOAT_H)
+#include <float.h>
+#endif
+
+#endif
+
+#ifndef M_PI
+const double piDouble = 3.14159265358979323846;
+const float piFloat = 3.14159265358979323846f;
+#else
+const double piDouble = M_PI;
+const float piFloat = static_cast<float>(M_PI);
+#endif
+
+#ifndef M_PI_4
+const double piOverFourDouble = 0.785398163397448309616;
+const float piOverFourFloat = 0.785398163397448309616f;
+#else
+const double piOverFourDouble = M_PI_4;
+const float piOverFourFloat = static_cast<float>(M_PI_4);
+#endif
+
+#if PLATFORM(DARWIN)
+
+// Work around a bug in the Mac OS X libc where ceil(-0.1) return +0.
+inline double wtf_ceil(double x) { return copysign(ceil(x), x); }
+
+#define ceil(x) wtf_ceil(x)
+
+#endif
+
+#if PLATFORM(SOLARIS)
+
+#ifndef isfinite
+inline bool isfinite(double x) { return finite(x) && !isnand(x); }
+#endif
+#ifndef isinf
+inline bool isinf(double x) { return !finite(x) && !isnand(x); }
+#endif
+#ifndef signbit
+inline bool signbit(double x) { return x < 0.0; } // FIXME: Wrong for negative 0.
+#endif
+
+#endif
+
+#if PLATFORM(OPENBSD)
+
+#ifndef isfinite
+inline bool isfinite(double x) { return finite(x); }
+#endif
+#ifndef signbit
+inline bool signbit(double x) { struct ieee_double *p = (struct ieee_double *)&x; return p->dbl_sign; }
+#endif
+
+#endif
+
+#if COMPILER(MSVC) || COMPILER(RVCT)
+
+inline long lround(double num) { return static_cast<long>(num > 0 ? num + 0.5 : ceil(num - 0.5)); }
+inline long lroundf(float num) { return static_cast<long>(num > 0 ? num + 0.5f : ceilf(num - 0.5f)); }
+inline double round(double num) { return num > 0 ? floor(num + 0.5) : ceil(num - 0.5); }
+inline float roundf(float num) { return num > 0 ? floorf(num + 0.5f) : ceilf(num - 0.5f); }
+inline double trunc(double num) { return num > 0 ? floor(num) : ceil(num); }
+
+#endif
+
+#if COMPILER(MSVC)
+
+inline bool isinf(double num) { return !_finite(num) && !_isnan(num); }
+inline bool isnan(double num) { return !!_isnan(num); }
+inline bool signbit(double num) { return _copysign(1.0, num) < 0; }
+
+inline double nextafter(double x, double y) { return _nextafter(x, y); }
+inline float nextafterf(float x, float y) { return x > y ? x - FLT_EPSILON : x + FLT_EPSILON; }
+
+inline double copysign(double x, double y) { return _copysign(x, y); }
+inline int isfinite(double x) { return _finite(x); }
+
+// Work around a bug in Win, where atan2(+-infinity, +-infinity) yields NaN instead of specific values.
+inline double wtf_atan2(double x, double y)
+{
+ double posInf = std::numeric_limits<double>::infinity();
+ double negInf = -std::numeric_limits<double>::infinity();
+ double nan = std::numeric_limits<double>::quiet_NaN();
+
+ double result = nan;
+
+ if (x == posInf && y == posInf)
+ result = piOverFourDouble;
+ else if (x == posInf && y == negInf)
+ result = 3 * piOverFourDouble;
+ else if (x == negInf && y == posInf)
+ result = -piOverFourDouble;
+ else if (x == negInf && y == negInf)
+ result = -3 * piOverFourDouble;
+ else
+ result = ::atan2(x, y);
+
+ return result;
+}
+
+// Work around a bug in the Microsoft CRT, where fmod(x, +-infinity) yields NaN instead of x.
+inline double wtf_fmod(double x, double y) { return (!isinf(x) && isinf(y)) ? x : fmod(x, y); }
+
+// Work around a bug in the Microsoft CRT, where pow(NaN, 0) yields NaN instead of 1.
+inline double wtf_pow(double x, double y) { return y == 0 ? 1 : pow(x, y); }
+
+#define atan2(x, y) wtf_atan2(x, y)
+#define fmod(x, y) wtf_fmod(x, y)
+#define pow(x, y) wtf_pow(x, y)
+
+#endif // COMPILER(MSVC)
+
+inline double deg2rad(double d) { return d * piDouble / 180.0; }
+inline double rad2deg(double r) { return r * 180.0 / piDouble; }
+inline double deg2grad(double d) { return d * 400.0 / 360.0; }
+inline double grad2deg(double g) { return g * 360.0 / 400.0; }
+inline double turn2deg(double t) { return t * 360.0; }
+inline double deg2turn(double d) { return d / 360.0; }
+inline double rad2grad(double r) { return r * 200.0 / piDouble; }
+inline double grad2rad(double g) { return g * piDouble / 200.0; }
+
+inline float deg2rad(float d) { return d * piFloat / 180.0f; }
+inline float rad2deg(float r) { return r * 180.0f / piFloat; }
+inline float deg2grad(float d) { return d * 400.0f / 360.0f; }
+inline float grad2deg(float g) { return g * 360.0f / 400.0f; }
+inline float turn2deg(float t) { return t * 360.0f; }
+inline float deg2turn(float d) { return d / 360.0f; }
+inline float rad2grad(float r) { return r * 200.0f / piFloat; }
+inline float grad2rad(float g) { return g * piFloat / 200.0f; }
+
+#endif // #ifndef WTF_MathExtras_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/MessageQueue.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/MessageQueue.h
new file mode 100644
index 0000000..070b76c
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/MessageQueue.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2009 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MessageQueue_h
+#define MessageQueue_h
+
+#include <limits>
+#include <wtf/Assertions.h>
+#include <wtf/Deque.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/Threading.h>
+
+namespace WTF {
+
+ enum MessageQueueWaitResult {
+ MessageQueueTerminated, // Queue was destroyed while waiting for message.
+ MessageQueueTimeout, // Timeout was specified and it expired.
+ MessageQueueMessageReceived // A message was successfully received and returned.
+ };
+
+ template<typename DataType>
+ class MessageQueue : public Noncopyable {
+ public:
+ MessageQueue() : m_killed(false) { }
+
+ void append(const DataType&);
+ bool appendAndCheckEmpty(const DataType&);
+ void prepend(const DataType&);
+ bool waitForMessage(DataType&);
+ template<typename Predicate>
+ MessageQueueWaitResult waitForMessageFilteredWithTimeout(DataType&, Predicate&, double absoluteTime);
+ void kill();
+
+ bool tryGetMessage(DataType&);
+ bool killed() const;
+
+ // The result of isEmpty() is only valid if no other thread is manipulating the queue at the same time.
+ bool isEmpty();
+
+ static double infiniteTime() { return std::numeric_limits<double>::max(); }
+
+ private:
+ static bool alwaysTruePredicate(DataType&) { return true; }
+
+ mutable Mutex m_mutex;
+ ThreadCondition m_condition;
+ Deque<DataType> m_queue;
+ bool m_killed;
+ };
+
+ template<typename DataType>
+ inline void MessageQueue<DataType>::append(const DataType& message)
+ {
+ MutexLocker lock(m_mutex);
+ m_queue.append(message);
+ m_condition.signal();
+ }
+
+ // Returns true if the queue was empty before the item was added.
+ template<typename DataType>
+ inline bool MessageQueue<DataType>::appendAndCheckEmpty(const DataType& message)
+ {
+ MutexLocker lock(m_mutex);
+ bool wasEmpty = m_queue.isEmpty();
+ m_queue.append(message);
+ m_condition.signal();
+ return wasEmpty;
+ }
+
+ template<typename DataType>
+ inline void MessageQueue<DataType>::prepend(const DataType& message)
+ {
+ MutexLocker lock(m_mutex);
+ m_queue.prepend(message);
+ m_condition.signal();
+ }
+
+ template<typename DataType>
+ inline bool MessageQueue<DataType>::waitForMessage(DataType& result)
+ {
+ MessageQueueWaitResult exitReason = waitForMessageFilteredWithTimeout(result, MessageQueue<DataType>::alwaysTruePredicate, infiniteTime());
+ ASSERT(exitReason == MessageQueueTerminated || exitReason == MessageQueueMessageReceived);
+ return exitReason == MessageQueueMessageReceived;
+ }
+
+ template<typename DataType>
+ template<typename Predicate>
+ inline MessageQueueWaitResult MessageQueue<DataType>::waitForMessageFilteredWithTimeout(DataType& result, Predicate& predicate, double absoluteTime)
+ {
+ MutexLocker lock(m_mutex);
+ bool timedOut = false;
+
+ DequeConstIterator<DataType> found = m_queue.end();
+ while (!m_killed && !timedOut && (found = m_queue.findIf(predicate)) == m_queue.end())
+ timedOut = !m_condition.timedWait(m_mutex, absoluteTime);
+
+ ASSERT(!timedOut || absoluteTime != infiniteTime());
+
+ if (m_killed)
+ return MessageQueueTerminated;
+
+ if (timedOut)
+ return MessageQueueTimeout;
+
+ ASSERT(found != m_queue.end());
+ result = *found;
+ m_queue.remove(found);
+ return MessageQueueMessageReceived;
+ }
+
+ template<typename DataType>
+ inline bool MessageQueue<DataType>::tryGetMessage(DataType& result)
+ {
+ MutexLocker lock(m_mutex);
+ if (m_killed)
+ return false;
+ if (m_queue.isEmpty())
+ return false;
+
+ result = m_queue.first();
+ m_queue.removeFirst();
+ return true;
+ }
+
+ template<typename DataType>
+ inline bool MessageQueue<DataType>::isEmpty()
+ {
+ MutexLocker lock(m_mutex);
+ if (m_killed)
+ return true;
+ return m_queue.isEmpty();
+ }
+
+ template<typename DataType>
+ inline void MessageQueue<DataType>::kill()
+ {
+ MutexLocker lock(m_mutex);
+ m_killed = true;
+ m_condition.broadcast();
+ }
+
+ template<typename DataType>
+ inline bool MessageQueue<DataType>::killed() const
+ {
+ MutexLocker lock(m_mutex);
+ return m_killed;
+ }
+} // namespace WTF
+
+using WTF::MessageQueue;
+// MessageQueueWaitResult enum and all its values.
+using WTF::MessageQueueWaitResult;
+using WTF::MessageQueueTerminated;
+using WTF::MessageQueueTimeout;
+using WTF::MessageQueueMessageReceived;
+
+#endif // MessageQueue_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Noncopyable.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Noncopyable.h
new file mode 100644
index 0000000..c50c9d8
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Noncopyable.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2006 Apple Computer, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef WTF_Noncopyable_h
+#define WTF_Noncopyable_h
+
+// We don't want argument-dependent lookup to pull in everything from the WTF
+// namespace when you use Noncopyable, so put it in its own namespace.
+
+#include "FastAllocBase.h"
+
+namespace WTFNoncopyable {
+
+ class Noncopyable {
+ Noncopyable(const Noncopyable&);
+ Noncopyable& operator=(const Noncopyable&);
+ protected:
+ Noncopyable() { }
+ ~Noncopyable() { }
+ };
+
+ class NoncopyableCustomAllocated {
+ NoncopyableCustomAllocated(const NoncopyableCustomAllocated&);
+ NoncopyableCustomAllocated& operator=(const NoncopyableCustomAllocated&);
+ protected:
+ NoncopyableCustomAllocated() { }
+ ~NoncopyableCustomAllocated() { }
+ };
+
+} // namespace WTFNoncopyable
+
+using WTFNoncopyable::Noncopyable;
+using WTFNoncopyable::NoncopyableCustomAllocated;
+
+#endif // WTF_Noncopyable_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/NotFound.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/NotFound.h
new file mode 100644
index 0000000..4263bce
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/NotFound.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef NotFound_h
+#define NotFound_h
+
+namespace WTF {
+
+ const size_t notFound = static_cast<size_t>(-1);
+
+} // namespace WTF
+
+using WTF::notFound;
+
+#endif // NotFound_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/OwnArrayPtr.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/OwnArrayPtr.h
new file mode 100644
index 0000000..61375c7
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/OwnArrayPtr.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2006 Apple Computer, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef WTF_OwnArrayPtr_h
+#define WTF_OwnArrayPtr_h
+
+#include <algorithm>
+#include <wtf/Assertions.h>
+#include <wtf/Noncopyable.h>
+
+namespace WTF {
+
+ template <typename T> class OwnArrayPtr : public Noncopyable {
+ public:
+ explicit OwnArrayPtr(T* ptr = 0) : m_ptr(ptr) { }
+ ~OwnArrayPtr() { safeDelete(); }
+
+ T* get() const { return m_ptr; }
+ T* release() { T* ptr = m_ptr; m_ptr = 0; return ptr; }
+
+ void set(T* ptr) { ASSERT(m_ptr != ptr); safeDelete(); m_ptr = ptr; }
+ void clear() { safeDelete(); m_ptr = 0; }
+
+ T& operator*() const { ASSERT(m_ptr); return *m_ptr; }
+ T* operator->() const { ASSERT(m_ptr); return m_ptr; }
+
+ T& operator[](std::ptrdiff_t i) const { ASSERT(m_ptr); ASSERT(i >= 0); return m_ptr[i]; }
+
+ bool operator!() const { return !m_ptr; }
+
+ // This conversion operator allows implicit conversion to bool but not to other integer types.
+#if COMPILER(WINSCW)
+ operator bool() const { return m_ptr; }
+#else
+ typedef T* OwnArrayPtr::*UnspecifiedBoolType;
+ operator UnspecifiedBoolType() const { return m_ptr ? &OwnArrayPtr::m_ptr : 0; }
+#endif
+
+ void swap(OwnArrayPtr& o) { std::swap(m_ptr, o.m_ptr); }
+
+ private:
+ void safeDelete() { typedef char known[sizeof(T) ? 1 : -1]; if (sizeof(known)) delete [] m_ptr; }
+
+ T* m_ptr;
+ };
+
+ template <typename T> inline void swap(OwnArrayPtr<T>& a, OwnArrayPtr<T>& b) { a.swap(b); }
+
+ template <typename T> inline T* getPtr(const OwnArrayPtr<T>& p)
+ {
+ return p.get();
+ }
+
+} // namespace WTF
+
+using WTF::OwnArrayPtr;
+
+#endif // WTF_OwnArrayPtr_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/OwnFastMallocPtr.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/OwnFastMallocPtr.h
new file mode 100644
index 0000000..c88235a
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/OwnFastMallocPtr.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2006, 2007 Apple Inc. All rights reserved.
+ * Copyright (C) 2009 Google Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef OwnFastMallocPtr_h
+#define OwnFastMallocPtr_h
+
+#include "FastMalloc.h"
+#include "Noncopyable.h"
+
+namespace WTF {
+
+ template<class T> class OwnFastMallocPtr : public Noncopyable {
+ public:
+ explicit OwnFastMallocPtr(T* ptr) : m_ptr(ptr)
+ {
+ }
+
+ ~OwnFastMallocPtr()
+ {
+ fastFree(m_ptr);
+ }
+
+ T* get() const { return m_ptr; }
+ T* release() { T* ptr = m_ptr; m_ptr = 0; return ptr; }
+
+ private:
+ T* m_ptr;
+ };
+
+} // namespace WTF
+
+using WTF::OwnFastMallocPtr;
+
+#endif // OwnFastMallocPtr_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/OwnPtr.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/OwnPtr.h
new file mode 100644
index 0000000..b7e62b1
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/OwnPtr.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef WTF_OwnPtr_h
+#define WTF_OwnPtr_h
+
+#include "Assertions.h"
+#include "Noncopyable.h"
+#include "OwnPtrCommon.h"
+#include "TypeTraits.h"
+#include <algorithm>
+#include <memory>
+
+namespace WTF {
+
+ // Unlike most of our smart pointers, OwnPtr can take either the pointer type or the pointed-to type.
+
+ template <typename T> class PassOwnPtr;
+
+ template <typename T> class OwnPtr : public Noncopyable {
+ public:
+ typedef typename RemovePointer<T>::Type ValueType;
+ typedef ValueType* PtrType;
+
+ explicit OwnPtr(PtrType ptr = 0) : m_ptr(ptr) { }
+ OwnPtr(std::auto_ptr<ValueType> autoPtr) : m_ptr(autoPtr.release()) { }
+ // See comment in PassOwnPtr.h for why this takes a const reference.
+ template <typename U> OwnPtr(const PassOwnPtr<U>& o);
+
+ // This copy constructor is used implicitly by gcc when it generates
+ // transients for assigning a PassOwnPtr<T> object to a stack-allocated
+ // OwnPtr<T> object. It should never be called explicitly and gcc
+ // should optimize away the constructor when generating code.
+ OwnPtr(const OwnPtr<ValueType>& o);
+
+ ~OwnPtr() { deleteOwnedPtr(m_ptr); }
+
+ PtrType get() const { return m_ptr; }
+ PtrType release() { PtrType ptr = m_ptr; m_ptr = 0; return ptr; }
+
+ // FIXME: This should be renamed to adopt.
+ void set(PtrType ptr) { ASSERT(!ptr || m_ptr != ptr); deleteOwnedPtr(m_ptr); m_ptr = ptr; }
+
+ void adopt(std::auto_ptr<ValueType> autoPtr) { ASSERT(!autoPtr.get() || m_ptr != autoPtr.get()); deleteOwnedPtr(m_ptr); m_ptr = autoPtr.release(); }
+
+ void clear() { deleteOwnedPtr(m_ptr); m_ptr = 0; }
+
+ ValueType& operator*() const { ASSERT(m_ptr); return *m_ptr; }
+ PtrType operator->() const { ASSERT(m_ptr); return m_ptr; }
+
+ bool operator!() const { return !m_ptr; }
+
+ // This conversion operator allows implicit conversion to bool but not to other integer types.
+ typedef PtrType OwnPtr::*UnspecifiedBoolType;
+ operator UnspecifiedBoolType() const { return m_ptr ? &OwnPtr::m_ptr : 0; }
+
+ OwnPtr& operator=(const PassOwnPtr<T>&);
+ template <typename U> OwnPtr& operator=(const PassOwnPtr<U>&);
+
+ void swap(OwnPtr& o) { std::swap(m_ptr, o.m_ptr); }
+
+ private:
+ PtrType m_ptr;
+ };
+
+ template <typename T> template <typename U> inline OwnPtr<T>::OwnPtr(const PassOwnPtr<U>& o)
+ : m_ptr(o.release())
+ {
+ }
+
+ template <typename T> inline OwnPtr<T>& OwnPtr<T>::operator=(const PassOwnPtr<T>& o)
+ {
+ T* ptr = m_ptr;
+ m_ptr = o.release();
+ ASSERT(!ptr || m_ptr != ptr);
+ if (ptr)
+ deleteOwnedPtr(ptr);
+ return *this;
+ }
+
+ template <typename T> template <typename U> inline OwnPtr<T>& OwnPtr<T>::operator=(const PassOwnPtr<U>& o)
+ {
+ T* ptr = m_ptr;
+ m_ptr = o.release();
+ ASSERT(!ptr || m_ptr != ptr);
+ if (ptr)
+ deleteOwnedPtr(ptr);
+ return *this;
+ }
+
+ template <typename T> inline void swap(OwnPtr<T>& a, OwnPtr<T>& b)
+ {
+ a.swap(b);
+ }
+
+ template <typename T, typename U> inline bool operator==(const OwnPtr<T>& a, U* b)
+ {
+ return a.get() == b;
+ }
+
+ template <typename T, typename U> inline bool operator==(T* a, const OwnPtr<U>& b)
+ {
+ return a == b.get();
+ }
+
+ template <typename T, typename U> inline bool operator!=(const OwnPtr<T>& a, U* b)
+ {
+ return a.get() != b;
+ }
+
+ template <typename T, typename U> inline bool operator!=(T* a, const OwnPtr<U>& b)
+ {
+ return a != b.get();
+ }
+
+ template <typename T> inline typename OwnPtr<T>::PtrType getPtr(const OwnPtr<T>& p)
+ {
+ return p.get();
+ }
+
+} // namespace WTF
+
+using WTF::OwnPtr;
+
+#endif // WTF_OwnPtr_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/OwnPtrCommon.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/OwnPtrCommon.h
new file mode 100644
index 0000000..6d91a54
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/OwnPtrCommon.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2009 Torch Mobile, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_OwnPtrCommon_h
+#define WTF_OwnPtrCommon_h
+
+#if PLATFORM(WIN)
+typedef struct HBITMAP__* HBITMAP;
+typedef struct HBRUSH__* HBRUSH;
+typedef struct HDC__* HDC;
+typedef struct HFONT__* HFONT;
+typedef struct HPALETTE__* HPALETTE;
+typedef struct HPEN__* HPEN;
+typedef struct HRGN__* HRGN;
+#endif
+
+namespace WTF {
+
+ template <typename T> inline void deleteOwnedPtr(T* ptr)
+ {
+ typedef char known[sizeof(T) ? 1 : -1];
+ if (sizeof(known))
+ delete ptr;
+ }
+
+#if PLATFORM(WIN)
+ void deleteOwnedPtr(HBITMAP);
+ void deleteOwnedPtr(HBRUSH);
+ void deleteOwnedPtr(HDC);
+ void deleteOwnedPtr(HFONT);
+ void deleteOwnedPtr(HPALETTE);
+ void deleteOwnedPtr(HPEN);
+ void deleteOwnedPtr(HRGN);
+#endif
+
+} // namespace WTF
+
+#endif // WTF_OwnPtrCommon_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/OwnPtrWin.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/OwnPtrWin.cpp
new file mode 100644
index 0000000..67a32ff
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/OwnPtrWin.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2007 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009 Torch Mobile, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "OwnPtr.h"
+
+#include <windows.h>
+
+namespace WTF {
+
+void deleteOwnedPtr(HBITMAP ptr)
+{
+ if (ptr)
+ DeleteObject(ptr);
+}
+
+void deleteOwnedPtr(HBRUSH ptr)
+{
+ if (ptr)
+ DeleteObject(ptr);
+}
+
+void deleteOwnedPtr(HDC ptr)
+{
+ if (ptr)
+ DeleteDC(ptr);
+}
+
+void deleteOwnedPtr(HFONT ptr)
+{
+ if (ptr)
+ DeleteObject(ptr);
+}
+
+void deleteOwnedPtr(HPALETTE ptr)
+{
+ if (ptr)
+ DeleteObject(ptr);
+}
+
+void deleteOwnedPtr(HPEN ptr)
+{
+ if (ptr)
+ DeleteObject(ptr);
+}
+
+void deleteOwnedPtr(HRGN ptr)
+{
+ if (ptr)
+ DeleteObject(ptr);
+}
+
+}
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/PassOwnPtr.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/PassOwnPtr.h
new file mode 100644
index 0000000..ae70457
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/PassOwnPtr.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_PassOwnPtr_h
+#define WTF_PassOwnPtr_h
+
+#include "Assertions.h"
+#include "OwnPtrCommon.h"
+#include "TypeTraits.h"
+
+namespace WTF {
+
+ // Unlike most of our smart pointers, PassOwnPtr can take either the pointer type or the pointed-to type.
+
+ template <typename T> class OwnPtr;
+
+ template <typename T> class PassOwnPtr {
+ public:
+ typedef typename RemovePointer<T>::Type ValueType;
+ typedef ValueType* PtrType;
+
+ PassOwnPtr(PtrType ptr = 0) : m_ptr(ptr) { }
+ // It somewhat breaks the type system to allow transfer of ownership out of
+ // a const PassOwnPtr. However, it makes it much easier to work with PassOwnPtr
+ // temporaries, and we don't really have a need to use real const PassOwnPtrs
+ // anyway.
+ PassOwnPtr(const PassOwnPtr& o) : m_ptr(o.release()) { }
+ template <typename U> PassOwnPtr(const PassOwnPtr<U>& o) : m_ptr(o.release()) { }
+
+ ~PassOwnPtr() { deleteOwnedPtr(m_ptr); }
+
+ PtrType get() const { return m_ptr; }
+
+ void clear() { m_ptr = 0; }
+ PtrType release() const { PtrType ptr = m_ptr; m_ptr = 0; return ptr; }
+
+ ValueType& operator*() const { ASSERT(m_ptr); return *m_ptr; }
+ PtrType operator->() const { ASSERT(m_ptr); return m_ptr; }
+
+ bool operator!() const { return !m_ptr; }
+
+ // This conversion operator allows implicit conversion to bool but not to other integer types.
+ typedef PtrType PassOwnPtr::*UnspecifiedBoolType;
+ operator UnspecifiedBoolType() const { return m_ptr ? &PassOwnPtr::m_ptr : 0; }
+
+ PassOwnPtr& operator=(T*);
+ PassOwnPtr& operator=(const PassOwnPtr<T>&);
+ template <typename U> PassOwnPtr& operator=(const PassOwnPtr<U>&);
+
+ private:
+ mutable PtrType m_ptr;
+ };
+
+ template <typename T> inline PassOwnPtr<T>& PassOwnPtr<T>::operator=(T* optr)
+ {
+ T* ptr = m_ptr;
+ m_ptr = optr;
+ ASSERT(!ptr || m_ptr != ptr);
+ if (ptr)
+ deleteOwnedPtr(ptr);
+ return *this;
+ }
+
+ template <typename T> inline PassOwnPtr<T>& PassOwnPtr<T>::operator=(const PassOwnPtr<T>& optr)
+ {
+ T* ptr = m_ptr;
+ m_ptr = optr.release();
+ ASSERT(!ptr || m_ptr != ptr);
+ if (ptr)
+ deleteOwnedPtr(ptr);
+ return *this;
+ }
+
+ template <typename T> template <typename U> inline PassOwnPtr<T>& PassOwnPtr<T>::operator=(const PassOwnPtr<U>& optr)
+ {
+ T* ptr = m_ptr;
+ m_ptr = optr.release();
+ ASSERT(!ptr || m_ptr != ptr);
+ if (ptr)
+ deleteOwnedPtr(ptr);
+ return *this;
+ }
+
+ template <typename T, typename U> inline bool operator==(const PassOwnPtr<T>& a, const PassOwnPtr<U>& b)
+ {
+ return a.get() == b.get();
+ }
+
+ template <typename T, typename U> inline bool operator==(const PassOwnPtr<T>& a, const OwnPtr<U>& b)
+ {
+ return a.get() == b.get();
+ }
+
+ template <typename T, typename U> inline bool operator==(const OwnPtr<T>& a, const PassOwnPtr<U>& b)
+ {
+ return a.get() == b.get();
+ }
+
+ template <typename T, typename U> inline bool operator==(const PassOwnPtr<T>& a, U* b)
+ {
+ return a.get() == b;
+ }
+
+ template <typename T, typename U> inline bool operator==(T* a, const PassOwnPtr<U>& b)
+ {
+ return a == b.get();
+ }
+
+ template <typename T, typename U> inline bool operator!=(const PassOwnPtr<T>& a, const PassOwnPtr<U>& b)
+ {
+ return a.get() != b.get();
+ }
+
+ template <typename T, typename U> inline bool operator!=(const PassOwnPtr<T>& a, const OwnPtr<U>& b)
+ {
+ return a.get() != b.get();
+ }
+
+ template <typename T, typename U> inline bool operator!=(const OwnPtr<T>& a, const PassOwnPtr<U>& b)
+ {
+ return a.get() != b.get();
+ }
+
+ template <typename T, typename U> inline bool operator!=(const PassOwnPtr<T>& a, U* b)
+ {
+ return a.get() != b;
+ }
+
+ template <typename T, typename U> inline bool operator!=(T* a, const PassOwnPtr<U>& b)
+ {
+ return a != b.get();
+ }
+
+ template <typename T, typename U> inline PassOwnPtr<T> static_pointer_cast(const PassOwnPtr<U>& p)
+ {
+ return PassOwnPtr<T>(static_cast<T*>(p.release()));
+ }
+
+ template <typename T, typename U> inline PassOwnPtr<T> const_pointer_cast(const PassOwnPtr<U>& p)
+ {
+ return PassOwnPtr<T>(const_cast<T*>(p.release()));
+ }
+
+ template <typename T> inline T* getPtr(const PassOwnPtr<T>& p)
+ {
+ return p.get();
+ }
+
+} // namespace WTF
+
+using WTF::PassOwnPtr;
+using WTF::const_pointer_cast;
+using WTF::static_pointer_cast;
+
+#endif // WTF_PassOwnPtr_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/PassRefPtr.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/PassRefPtr.h
new file mode 100644
index 0000000..d80ed62
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/PassRefPtr.h
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2005, 2006, 2007 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef WTF_PassRefPtr_h
+#define WTF_PassRefPtr_h
+
+#include "AlwaysInline.h"
+
+namespace WTF {
+
+ template<typename T> class RefPtr;
+ template<typename T> class PassRefPtr;
+ template <typename T> PassRefPtr<T> adoptRef(T*);
+
+ template<typename T> class PassRefPtr {
+ public:
+ PassRefPtr() : m_ptr(0) {}
+ PassRefPtr(T* ptr) : m_ptr(ptr) { if (ptr) ptr->ref(); }
+ // It somewhat breaks the type system to allow transfer of ownership out of
+ // a const PassRefPtr. However, it makes it much easier to work with PassRefPtr
+ // temporaries, and we don't really have a need to use real const PassRefPtrs
+ // anyway.
+ PassRefPtr(const PassRefPtr& o) : m_ptr(o.releaseRef()) {}
+ template <typename U> PassRefPtr(const PassRefPtr<U>& o) : m_ptr(o.releaseRef()) { }
+
+ ALWAYS_INLINE ~PassRefPtr() { if (UNLIKELY(m_ptr != 0)) m_ptr->deref(); }
+
+ template <class U>
+ PassRefPtr(const RefPtr<U>& o) : m_ptr(o.get()) { if (T* ptr = m_ptr) ptr->ref(); }
+
+ T* get() const { return m_ptr; }
+
+ void clear() { if (T* ptr = m_ptr) ptr->deref(); m_ptr = 0; }
+ T* releaseRef() const { T* tmp = m_ptr; m_ptr = 0; return tmp; }
+
+ T& operator*() const { return *m_ptr; }
+ T* operator->() const { return m_ptr; }
+
+ bool operator!() const { return !m_ptr; }
+
+ // This conversion operator allows implicit conversion to bool but not to other integer types.
+#if COMPILER(WINSCW)
+ operator bool() const { return m_ptr; }
+#else
+ typedef T* PassRefPtr::*UnspecifiedBoolType;
+ operator UnspecifiedBoolType() const { return m_ptr ? &PassRefPtr::m_ptr : 0; }
+#endif
+ PassRefPtr& operator=(T*);
+ PassRefPtr& operator=(const PassRefPtr&);
+ template <typename U> PassRefPtr& operator=(const PassRefPtr<U>&);
+ template <typename U> PassRefPtr& operator=(const RefPtr<U>&);
+
+ friend PassRefPtr adoptRef<T>(T*);
+ private:
+ // adopting constructor
+ PassRefPtr(T* ptr, bool) : m_ptr(ptr) {}
+ mutable T* m_ptr;
+ };
+
+ template <typename T> template <typename U> inline PassRefPtr<T>& PassRefPtr<T>::operator=(const RefPtr<U>& o)
+ {
+ T* optr = o.get();
+ if (optr)
+ optr->ref();
+ T* ptr = m_ptr;
+ m_ptr = optr;
+ if (ptr)
+ ptr->deref();
+ return *this;
+ }
+
+ template <typename T> inline PassRefPtr<T>& PassRefPtr<T>::operator=(T* optr)
+ {
+ if (optr)
+ optr->ref();
+ T* ptr = m_ptr;
+ m_ptr = optr;
+ if (ptr)
+ ptr->deref();
+ return *this;
+ }
+
+ template <typename T> inline PassRefPtr<T>& PassRefPtr<T>::operator=(const PassRefPtr<T>& ref)
+ {
+ T* ptr = m_ptr;
+ m_ptr = ref.releaseRef();
+ if (ptr)
+ ptr->deref();
+ return *this;
+ }
+
+ template <typename T> template <typename U> inline PassRefPtr<T>& PassRefPtr<T>::operator=(const PassRefPtr<U>& ref)
+ {
+ T* ptr = m_ptr;
+ m_ptr = ref.releaseRef();
+ if (ptr)
+ ptr->deref();
+ return *this;
+ }
+
+ template <typename T, typename U> inline bool operator==(const PassRefPtr<T>& a, const PassRefPtr<U>& b)
+ {
+ return a.get() == b.get();
+ }
+
+ template <typename T, typename U> inline bool operator==(const PassRefPtr<T>& a, const RefPtr<U>& b)
+ {
+ return a.get() == b.get();
+ }
+
+ template <typename T, typename U> inline bool operator==(const RefPtr<T>& a, const PassRefPtr<U>& b)
+ {
+ return a.get() == b.get();
+ }
+
+ template <typename T, typename U> inline bool operator==(const PassRefPtr<T>& a, U* b)
+ {
+ return a.get() == b;
+ }
+
+ template <typename T, typename U> inline bool operator==(T* a, const PassRefPtr<U>& b)
+ {
+ return a == b.get();
+ }
+
+ template <typename T, typename U> inline bool operator!=(const PassRefPtr<T>& a, const PassRefPtr<U>& b)
+ {
+ return a.get() != b.get();
+ }
+
+ template <typename T, typename U> inline bool operator!=(const PassRefPtr<T>& a, const RefPtr<U>& b)
+ {
+ return a.get() != b.get();
+ }
+
+ template <typename T, typename U> inline bool operator!=(const RefPtr<T>& a, const PassRefPtr<U>& b)
+ {
+ return a.get() != b.get();
+ }
+
+ template <typename T, typename U> inline bool operator!=(const PassRefPtr<T>& a, U* b)
+ {
+ return a.get() != b;
+ }
+
+ template <typename T, typename U> inline bool operator!=(T* a, const PassRefPtr<U>& b)
+ {
+ return a != b.get();
+ }
+
+ template <typename T> inline PassRefPtr<T> adoptRef(T* p)
+ {
+ return PassRefPtr<T>(p, true);
+ }
+
+ template <typename T, typename U> inline PassRefPtr<T> static_pointer_cast(const PassRefPtr<U>& p)
+ {
+ return adoptRef(static_cast<T*>(p.releaseRef()));
+ }
+
+ template <typename T, typename U> inline PassRefPtr<T> const_pointer_cast(const PassRefPtr<U>& p)
+ {
+ return adoptRef(const_cast<T*>(p.releaseRef()));
+ }
+
+ template <typename T> inline T* getPtr(const PassRefPtr<T>& p)
+ {
+ return p.get();
+ }
+
+} // namespace WTF
+
+using WTF::PassRefPtr;
+using WTF::adoptRef;
+using WTF::static_pointer_cast;
+using WTF::const_pointer_cast;
+
+#endif // WTF_PassRefPtr_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Platform.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Platform.h
new file mode 100644
index 0000000..fa37d55
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Platform.h
@@ -0,0 +1,787 @@
+/*
+ * Copyright (C) 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2007-2009 Torch Mobile, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_Platform_h
+#define WTF_Platform_h
+
+/* PLATFORM handles OS, operating environment, graphics API, and CPU */
+#define PLATFORM(WTF_FEATURE) (defined WTF_PLATFORM_##WTF_FEATURE && WTF_PLATFORM_##WTF_FEATURE)
+#define COMPILER(WTF_FEATURE) (defined WTF_COMPILER_##WTF_FEATURE && WTF_COMPILER_##WTF_FEATURE)
+#define HAVE(WTF_FEATURE) (defined HAVE_##WTF_FEATURE && HAVE_##WTF_FEATURE)
+#define USE(WTF_FEATURE) (defined WTF_USE_##WTF_FEATURE && WTF_USE_##WTF_FEATURE)
+#define ENABLE(WTF_FEATURE) (defined ENABLE_##WTF_FEATURE && ENABLE_##WTF_FEATURE)
+
+/* Operating systems - low-level dependencies */
+
+/* PLATFORM(DARWIN) */
+/* Operating system level dependencies for Mac OS X / Darwin that should */
+/* be used regardless of operating environment */
+#ifdef __APPLE__
+#define WTF_PLATFORM_DARWIN 1
+#include <AvailabilityMacros.h>
+#if !defined(MAC_OS_X_VERSION_10_5) || MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_5
+#define BUILDING_ON_TIGER 1
+#elif !defined(MAC_OS_X_VERSION_10_6) || MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_6
+#define BUILDING_ON_LEOPARD 1
+#endif
+#include <TargetConditionals.h>
+#endif
+
+/* PLATFORM(WIN_OS) */
+/* Operating system level dependencies for Windows that should be used */
+/* regardless of operating environment */
+#if defined(WIN32) || defined(_WIN32)
+#define WTF_PLATFORM_WIN_OS 1
+#endif
+
+/* PLATFORM(WINCE) */
+/* Operating system level dependencies for Windows CE that should be used */
+/* regardless of operating environment */
+/* Note that for this platform PLATFORM(WIN_OS) is also defined. */
+#if defined(_WIN32_WCE)
+#define WTF_PLATFORM_WINCE 1
+#include <ce_time.h>
+#endif
+
+/* PLATFORM(LINUX) */
+/* Operating system level dependencies for Linux-like systems that */
+/* should be used regardless of operating environment */
+#ifdef __linux__
+#define WTF_PLATFORM_LINUX 1
+#endif
+
+/* PLATFORM(FREEBSD) */
+/* Operating system level dependencies for FreeBSD-like systems that */
+/* should be used regardless of operating environment */
+#ifdef __FreeBSD__
+#define WTF_PLATFORM_FREEBSD 1
+#endif
+
+/* PLATFORM(OPENBSD) */
+/* Operating system level dependencies for OpenBSD systems that */
+/* should be used regardless of operating environment */
+#ifdef __OpenBSD__
+#define WTF_PLATFORM_OPENBSD 1
+#endif
+
+/* PLATFORM(SOLARIS) */
+/* Operating system level dependencies for Solaris that should be used */
+/* regardless of operating environment */
+#if defined(sun) || defined(__sun)
+#define WTF_PLATFORM_SOLARIS 1
+#endif
+
+/* PLATFORM(AIX) */
+/* Operating system level dependencies for AIX that should be used */
+/* regardless of operating environment */
+#if defined(_AIX)
+#define WTF_PLATFORM_AIX 1
+#endif
+
+/* PLATFORM(HPUX) */
+/* Operating system level dependencies for HP-UX that should be used */
+/* regardless of operating environment */
+#if defined(hpux) || defined(__hpux)
+#define WTF_PLATFORM_HPUX 1
+#ifndef MAP_ANON
+#define MAP_ANON MAP_ANONYMOUS
+#endif
+#endif
+
+#if defined (__SYMBIAN32__)
+/* we are cross-compiling, it is not really windows */
+#undef WTF_PLATFORM_WIN_OS
+#undef WTF_PLATFORM_WIN
+#define WTF_PLATFORM_SYMBIAN 1
+#endif
+
+
+/* PLATFORM(NETBSD) */
+/* Operating system level dependencies for NetBSD that should be used */
+/* regardless of operating environment */
+#if defined(__NetBSD__)
+#define WTF_PLATFORM_NETBSD 1
+#endif
+
+/* PLATFORM(QNX) */
+/* Operating system level dependencies for QNX that should be used */
+/* regardless of operating environment */
+#if defined(__QNXNTO__)
+#define WTF_PLATFORM_QNX 1
+#endif
+
+/* PLATFORM(UNIX) */
+/* Operating system level dependencies for Unix-like systems that */
+/* should be used regardless of operating environment */
+#if PLATFORM(DARWIN) \
+ || PLATFORM(FREEBSD) \
+ || PLATFORM(SYMBIAN) \
+ || PLATFORM(NETBSD) \
+ || PLATFORM(SOLARIS) \
+ || PLATFORM(HPUX) \
+ || defined(unix) \
+ || defined(__unix) \
+ || defined(__unix__) \
+ || PLATFORM(AIX) \
+ || defined(__QNXNTO__)
+#define WTF_PLATFORM_UNIX 1
+#endif
+
+/* Operating environments */
+
+/* PLATFORM(CHROMIUM) */
+/* PLATFORM(QT) */
+/* PLATFORM(GTK) */
+/* PLATFORM(MAC) */
+/* PLATFORM(WIN) */
+#if defined(BUILDING_CHROMIUM__)
+#define WTF_PLATFORM_CHROMIUM 1
+#elif defined(BUILDING_QT__)
+#define WTF_PLATFORM_QT 1
+
+/* PLATFORM(KDE) */
+#if defined(BUILDING_KDE__)
+#define WTF_PLATFORM_KDE 1
+#endif
+
+#elif defined(BUILDING_WX__)
+#define WTF_PLATFORM_WX 1
+#elif defined(BUILDING_GTK__)
+#define WTF_PLATFORM_GTK 1
+#elif PLATFORM(DARWIN)
+#define WTF_PLATFORM_MAC 1
+#elif PLATFORM(WIN_OS)
+#define WTF_PLATFORM_WIN 1
+#endif
+
+/* PLATFORM(IPHONE) */
+#if (defined(TARGET_OS_EMBEDDED) && TARGET_OS_EMBEDDED) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE)
+#define WTF_PLATFORM_IPHONE 1
+#endif
+
+/* PLATFORM(IPHONE_SIMULATOR) */
+#if defined(TARGET_IPHONE_SIMULATOR) && TARGET_IPHONE_SIMULATOR
+#define WTF_PLATFORM_IPHONE 1
+#define WTF_PLATFORM_IPHONE_SIMULATOR 1
+#else
+#define WTF_PLATFORM_IPHONE_SIMULATOR 0
+#endif
+
+#if !defined(WTF_PLATFORM_IPHONE)
+#define WTF_PLATFORM_IPHONE 0
+#endif
+
+/* Graphics engines */
+
+/* PLATFORM(CG) and PLATFORM(CI) */
+#if PLATFORM(MAC) || PLATFORM(IPHONE)
+#define WTF_PLATFORM_CG 1
+#endif
+#if PLATFORM(MAC) && !PLATFORM(IPHONE)
+#define WTF_PLATFORM_CI 1
+#endif
+
+/* PLATFORM(SKIA) for Win/Linux, CG/CI for Mac */
+#if PLATFORM(CHROMIUM)
+#if PLATFORM(DARWIN)
+#define WTF_PLATFORM_CG 1
+#define WTF_PLATFORM_CI 1
+#define WTF_USE_ATSUI 1
+#else
+#define WTF_PLATFORM_SKIA 1
+#endif
+#endif
+
+/* Makes PLATFORM(WIN) default to PLATFORM(CAIRO) */
+/* FIXME: This should be changed from a blacklist to a whitelist */
+#if !PLATFORM(MAC) && !PLATFORM(QT) && !PLATFORM(WX) && !PLATFORM(CHROMIUM) && !PLATFORM(WINCE)
+#define WTF_PLATFORM_CAIRO 1
+#endif
+
+/* CPU */
+
+/* PLATFORM(PPC) */
+#if defined(__ppc__) \
+ || defined(__PPC__) \
+ || defined(__powerpc__) \
+ || defined(__powerpc) \
+ || defined(__POWERPC__) \
+ || defined(_M_PPC) \
+ || defined(__PPC)
+#define WTF_PLATFORM_PPC 1
+#define WTF_PLATFORM_BIG_ENDIAN 1
+#endif
+
+/* PLATFORM(PPC64) */
+#if defined(__ppc64__) \
+ || defined(__PPC64__)
+#define WTF_PLATFORM_PPC64 1
+#define WTF_PLATFORM_BIG_ENDIAN 1
+#endif
+
+/* PLATFORM(ARM) */
+#if defined(arm) \
+ || defined(__arm__)
+#define WTF_PLATFORM_ARM 1
+#if defined(__ARMEB__)
+#define WTF_PLATFORM_BIG_ENDIAN 1
+#elif !defined(__ARM_EABI__) && !defined(__ARMEB__) && !defined(__VFP_FP__)
+#define WTF_PLATFORM_MIDDLE_ENDIAN 1
+#endif
+#if !defined(__ARM_EABI__)
+#define WTF_PLATFORM_FORCE_PACK 1
+#endif
+#define ARM_ARCH_VERSION 3
+#if defined(__ARM_ARCH_4__) || defined(__ARM_ARCH_4T__)
+#undef ARM_ARCH_VERSION
+#define ARM_ARCH_VERSION 4
+#endif
+#if defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \
+ || defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \
+ || defined(__ARM_ARCH_5TEJ__)
+#undef ARM_ARCH_VERSION
+#define ARM_ARCH_VERSION 5
+#endif
+#if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
+ || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
+ || defined(__ARM_ARCH_6ZK__)
+#undef ARM_ARCH_VERSION
+#define ARM_ARCH_VERSION 6
+#endif
+#if defined(__ARM_ARCH_7A__)
+#undef ARM_ARCH_VERSION
+#define ARM_ARCH_VERSION 7
+#endif
+#endif /* ARM */
+#define PLATFORM_ARM_ARCH(N) (PLATFORM(ARM) && ARM_ARCH_VERSION >= N)
+
+/* PLATFORM(X86) */
+#if defined(__i386__) \
+ || defined(i386) \
+ || defined(_M_IX86) \
+ || defined(_X86_) \
+ || defined(__THW_INTEL)
+#define WTF_PLATFORM_X86 1
+#endif
+
+/* PLATFORM(X86_64) */
+#if defined(__x86_64__) \
+ || defined(_M_X64)
+#define WTF_PLATFORM_X86_64 1
+#endif
+
+/* PLATFORM(SH4) */
+#if defined(__SH4__)
+#define WTF_PLATFORM_SH4 1
+#endif
+
+/* PLATFORM(SPARC64) */
+#if defined(__sparc64__)
+#define WTF_PLATFORM_SPARC64 1
+#define WTF_PLATFORM_BIG_ENDIAN 1
+#endif
+
+/* PLATFORM(HPPA) */
+/* a.k.a. PA-RISC */
+#if defined(__hppa) || defined(__hppa__)
+#define WTF_PLATFORM_HPPA 1
+#define WTF_PLATFORM_BIG_ENDIAN 1
+#endif
+
+/* PLATFORM(IA64) */
+/* a.k.a. Itanium Processor Family, IPF */
+#if defined(__ia64) || defined(__ia64__) || defined(_M_IA64)
+#define WTF_PLATFORM_IA64 1
+
+/* Itanium can be both big- and little-endian
+ we need to determine at compile time which one it is.
+ - HP's aCC compiler only compiles big-endian (so HP-UXi is always big-endian)
+ - GCC defines __BIG_ENDIAN__ for us (default on HP-UX)
+ - Linux is usually little-endian
+ - I've never seen AIX or Windows on IA-64, but they should be little-endian too
+*/
+#if defined(__BIG_ENDIAN__) || defined(__HP_aCC)
+# define WTF_PLATFORM_BIG_ENDIAN 1
+#endif
+#endif
+
+/* PLATFORM(WINCE) && PLATFORM(QT)
+ We can not determine the endianess at compile time. For
+ Qt for Windows CE the endianess is specified in the
+ device specific makespec
+*/
+#if PLATFORM(WINCE) && PLATFORM(QT)
+# include <QtGlobal>
+# undef WTF_PLATFORM_BIG_ENDIAN
+# undef WTF_PLATFORM_MIDDLE_ENDIAN
+# if Q_BYTE_ORDER == Q_BIG_EDIAN
+# define WTF_PLATFORM_BIG_ENDIAN 1
+# endif
+#endif
+
+/* Compiler */
+
+/* COMPILER(MSVC) */
+#if defined(_MSC_VER)
+#define WTF_COMPILER_MSVC 1
+#if _MSC_VER < 1400
+#define WTF_COMPILER_MSVC7 1
+#endif
+#endif
+
+/* COMPILER(RVCT) */
+#if defined(__CC_ARM) || defined(__ARMCC__)
+#define WTF_COMPILER_RVCT 1
+#endif
+
+/* COMPILER(GCC) */
+/* --gnu option of the RVCT compiler also defines __GNUC__ */
+#if defined(__GNUC__) && !COMPILER(RVCT)
+#define WTF_COMPILER_GCC 1
+#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
+#endif
+
+/* COMPILER(MINGW) */
+#if defined(MINGW) || defined(__MINGW32__)
+#define WTF_COMPILER_MINGW 1
+#endif
+
+/* COMPILER(BORLAND) */
+/* not really fully supported - is this relevant any more? */
+#if defined(__BORLANDC__)
+#define WTF_COMPILER_BORLAND 1
+#endif
+
+/* COMPILER(CYGWIN) */
+/* not really fully supported - is this relevant any more? */
+#if defined(__CYGWIN__)
+#define WTF_COMPILER_CYGWIN 1
+#endif
+
+/* COMPILER(WINSCW) */
+#if defined(__WINSCW__)
+#define WTF_COMPILER_WINSCW 1
+#endif
+
+/* COMPILER(SUNCC) */
+/* This is the Sun CC compiler, also known as Sun Studio or Sun Pro */
+#if defined(__SUNPRO_CC) || defined(__SUNPRO_C)
+#define WTF_COMPILER_SUNCC 1
+#endif
+
+/* COMPILER(XLC) */
+/* IBM Visual Age C/C++ compiler, a.k.a. xlC */
+#if defined(__xlC__)
+#define WTF_COMPILER_XLC 1
+#endif
+
+/* COMPILER(ACC) */
+/* HP's aC++/ANSI C compiler, a.k.a. aCC */
+#if defined(__HP_aCC)
+#define WTF_COMPILER_ACC
+#endif
+
+#if (PLATFORM(IPHONE) || PLATFORM(MAC) || PLATFORM(WIN)) && !defined(ENABLE_JSC_MULTIPLE_THREADS)
+#define ENABLE_JSC_MULTIPLE_THREADS 1
+#endif
+
+#if PLATFORM(WINCE) && !PLATFORM(QT)
+#undef ENABLE_JSC_MULTIPLE_THREADS
+#define ENABLE_JSC_MULTIPLE_THREADS 0
+#define USE_SYSTEM_MALLOC 0
+#define ENABLE_ICONDATABASE 0
+#define ENABLE_JAVASCRIPT_DEBUGGER 0
+#define ENABLE_FTPDIR 0
+#define ENABLE_PAN_SCROLLING 0
+#define ENABLE_WML 1
+#define HAVE_ACCESSIBILITY 0
+
+#define NOMINMAX // Windows min and max conflict with standard macros
+#define NOSHLWAPI // shlwapi.h not available on WinCe
+
+// MSDN documentation says these functions are provided with uspce.lib. But we cannot find this file.
+#define __usp10__ // disable "usp10.h"
+
+#define _INC_ASSERT // disable "assert.h"
+#define assert(x)
+
+// _countof is only included in CE6; for CE5 we need to define it ourself
+#ifndef _countof
+#define _countof(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+#endif /* PLATFORM(WINCE) && !PLATFORM(QT) */
+
+/* for Unicode, KDE uses Qt */
+#if PLATFORM(KDE) || PLATFORM(QT)
+#define WTF_USE_QT4_UNICODE 1
+#elif PLATFORM(GTK)
+/* The GTK+ Unicode backend is configurable */
+#else
+#define WTF_USE_ICU_UNICODE 1
+#endif
+
+#if PLATFORM(MAC) && !PLATFORM(IPHONE)
+#define WTF_PLATFORM_CF 1
+#define WTF_USE_PTHREADS 1
+#if !defined(ENABLE_MAC_JAVA_BRIDGE)
+#define ENABLE_MAC_JAVA_BRIDGE 1
+#endif
+#if !defined(ENABLE_DASHBOARD_SUPPORT)
+#define ENABLE_DASHBOARD_SUPPORT 1
+#endif
+#define HAVE_READLINE 1
+#define HAVE_RUNLOOP_TIMER 1
+#endif
+
+#if PLATFORM(CHROMIUM) && PLATFORM(DARWIN)
+#define WTF_PLATFORM_CF 1
+#define WTF_USE_PTHREADS 1
+#endif
+
+#if PLATFORM(IPHONE)
+#define WTF_PLATFORM_CF 1
+#define WTF_USE_PTHREADS 1
+#define ENABLE_FTPDIR 1
+#define ENABLE_MAC_JAVA_BRIDGE 0
+#define ENABLE_ICONDATABASE 0
+#define ENABLE_GEOLOCATION 1
+#define ENABLE_NETSCAPE_PLUGIN_API 0
+#define HAVE_READLINE 1
+#define ENABLE_REPAINT_THROTTLING 1
+#endif
+
+#if PLATFORM(WIN)
+#define WTF_USE_WININET 1
+#endif
+
+#if PLATFORM(WX)
+#define ENABLE_ASSEMBLER 1
+#define WTF_USE_CURL 1
+#define WTF_USE_PTHREADS 1
+#endif
+
+#if PLATFORM(GTK)
+#if HAVE(PTHREAD_H)
+#define WTF_USE_PTHREADS 1
+#endif
+#endif
+
+#if !defined(HAVE_ACCESSIBILITY)
+#if PLATFORM(IPHONE) || PLATFORM(MAC) || PLATFORM(WIN) || PLATFORM(GTK) || PLATFORM(CHROMIUM)
+#define HAVE_ACCESSIBILITY 1
+#endif
+#endif /* !defined(HAVE_ACCESSIBILITY) */
+
+#if PLATFORM(UNIX) && !PLATFORM(SYMBIAN)
+#define HAVE_SIGNAL_H 1
+#endif
+
+#if !PLATFORM(WIN_OS) && !PLATFORM(SOLARIS) && !PLATFORM(QNX) \
+ && !PLATFORM(SYMBIAN) && !COMPILER(RVCT) && !PLATFORM(AIX) && !PLATFORM(HPUX)
+#define HAVE_TM_GMTOFF 1
+#define HAVE_TM_ZONE 1
+#define HAVE_TIMEGM 1
+#endif
+
+#if PLATFORM(DARWIN)
+
+#define HAVE_ERRNO_H 1
+#define HAVE_LANGINFO_H 1
+#define HAVE_MMAP 1
+#define HAVE_MERGESORT 1
+#define HAVE_SBRK 1
+#define HAVE_STRINGS_H 1
+#define HAVE_SYS_PARAM_H 1
+#define HAVE_SYS_TIME_H 1
+#define HAVE_SYS_TIMEB_H 1
+
+#if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) && !PLATFORM(IPHONE)
+#define HAVE_MADV_FREE_REUSE 1
+#define HAVE_MADV_FREE 1
+#endif
+
+#if PLATFORM(IPHONE)
+#define HAVE_MADV_FREE 1
+#endif
+
+#elif PLATFORM(WIN_OS)
+
+#define HAVE_FLOAT_H 1
+#if PLATFORM(WINCE)
+#define HAVE_ERRNO_H 0
+#else
+#define HAVE_SYS_TIMEB_H 1
+#endif
+#define HAVE_VIRTUALALLOC 1
+
+#elif PLATFORM(SYMBIAN)
+
+#define HAVE_ERRNO_H 1
+#define HAVE_MMAP 0
+#define HAVE_SBRK 1
+
+#define HAVE_SYS_TIME_H 1
+#define HAVE_STRINGS_H 1
+
+#if !COMPILER(RVCT)
+#define HAVE_SYS_PARAM_H 1
+#endif
+
+#elif PLATFORM(QNX)
+
+#define HAVE_ERRNO_H 1
+#define HAVE_MMAP 1
+#define HAVE_SBRK 1
+#define HAVE_STRINGS_H 1
+#define HAVE_SYS_PARAM_H 1
+#define HAVE_SYS_TIME_H 1
+
+#else
+
+/* FIXME: is this actually used or do other platforms generate their own config.h? */
+
+#define HAVE_ERRNO_H 1
+#define HAVE_LANGINFO_H 1
+#define HAVE_MMAP 1
+#define HAVE_SBRK 1
+#define HAVE_STRINGS_H 1
+#define HAVE_SYS_PARAM_H 1
+#define HAVE_SYS_TIME_H 1
+
+#endif
+
+/* ENABLE macro defaults */
+
+/* fastMalloc match validation allows for runtime verification that
+ new is matched by delete, fastMalloc is matched by fastFree, etc. */
+#if !defined(ENABLE_FAST_MALLOC_MATCH_VALIDATION)
+#define ENABLE_FAST_MALLOC_MATCH_VALIDATION 0
+#endif
+
+#if !defined(ENABLE_ICONDATABASE)
+#define ENABLE_ICONDATABASE 1
+#endif
+
+#if !defined(ENABLE_DATABASE)
+#define ENABLE_DATABASE 1
+#endif
+
+#if !defined(ENABLE_JAVASCRIPT_DEBUGGER)
+#define ENABLE_JAVASCRIPT_DEBUGGER 1
+#endif
+
+#if !defined(ENABLE_FTPDIR)
+#define ENABLE_FTPDIR 1
+#endif
+
+#if !defined(ENABLE_DASHBOARD_SUPPORT)
+#define ENABLE_DASHBOARD_SUPPORT 0
+#endif
+
+#if !defined(ENABLE_MAC_JAVA_BRIDGE)
+#define ENABLE_MAC_JAVA_BRIDGE 0
+#endif
+
+#if !defined(ENABLE_NETSCAPE_PLUGIN_API)
+#define ENABLE_NETSCAPE_PLUGIN_API 1
+#endif
+
+#if !defined(ENABLE_OPCODE_STATS)
+#define ENABLE_OPCODE_STATS 0
+#endif
+
+#define ENABLE_SAMPLING_COUNTERS 0
+#define ENABLE_SAMPLING_FLAGS 0
+#define ENABLE_OPCODE_SAMPLING 0
+#define ENABLE_CODEBLOCK_SAMPLING 0
+#if ENABLE(CODEBLOCK_SAMPLING) && !ENABLE(OPCODE_SAMPLING)
+#error "CODEBLOCK_SAMPLING requires OPCODE_SAMPLING"
+#endif
+#if ENABLE(OPCODE_SAMPLING) || ENABLE(SAMPLING_FLAGS)
+#define ENABLE_SAMPLING_THREAD 1
+#endif
+
+#if !defined(ENABLE_GEOLOCATION)
+#define ENABLE_GEOLOCATION 0
+#endif
+
+#if !defined(ENABLE_TEXT_CARET)
+#define ENABLE_TEXT_CARET 1
+#endif
+
+#if !defined(ENABLE_ON_FIRST_TEXTAREA_FOCUS_SELECT_ALL)
+#define ENABLE_ON_FIRST_TEXTAREA_FOCUS_SELECT_ALL 0
+#endif
+
+#if !defined(WTF_USE_ALTERNATE_JSIMMEDIATE) && PLATFORM(X86_64) && PLATFORM(MAC)
+#define WTF_USE_ALTERNATE_JSIMMEDIATE 1
+#endif
+
+#if !defined(ENABLE_REPAINT_THROTTLING)
+#define ENABLE_REPAINT_THROTTLING 0
+#endif
+
+#if !defined(ENABLE_JIT)
+
+/* The JIT is tested & working on x86_64 Mac */
+#if PLATFORM(X86_64) && PLATFORM(MAC)
+ #define ENABLE_JIT 1
+/* The JIT is tested & working on x86 Mac */
+#elif PLATFORM(X86) && PLATFORM(MAC)
+ #define ENABLE_JIT 1
+ #define WTF_USE_JIT_STUB_ARGUMENT_VA_LIST 1
+#elif PLATFORM_ARM_ARCH(7) && PLATFORM(IPHONE)
+ /* Under development, temporarily disabled until 16Mb link range limit in assembler is fixed. */
+ #define ENABLE_JIT 0
+ #define ENABLE_JIT_OPTIMIZE_NATIVE_CALL 0
+/* The JIT is tested & working on x86 Windows */
+#elif PLATFORM(X86) && PLATFORM(WIN)
+ #define ENABLE_JIT 1
+#endif
+
+#if PLATFORM(X86) && PLATFORM(QT)
+#if PLATFORM(WIN_OS) && COMPILER(MINGW) && GCC_VERSION >= 40100
+ #define ENABLE_JIT 1
+ #define WTF_USE_JIT_STUB_ARGUMENT_VA_LIST 1
+#elif PLATFORM(WIN_OS) && COMPILER(MSVC)
+ #define ENABLE_JIT 1
+ #define WTF_USE_JIT_STUB_ARGUMENT_REGISTER 1
+#elif PLATFORM(LINUX) && GCC_VERSION >= 40100
+ #define ENABLE_JIT 1
+ #define WTF_USE_JIT_STUB_ARGUMENT_VA_LIST 1
+#endif
+#endif /* PLATFORM(QT) && PLATFORM(X86) */
+
+#endif /* !defined(ENABLE_JIT) */
+
+#if ENABLE(JIT)
+#ifndef ENABLE_JIT_OPTIMIZE_CALL
+#define ENABLE_JIT_OPTIMIZE_CALL 1
+#endif
+#ifndef ENABLE_JIT_OPTIMIZE_NATIVE_CALL
+#define ENABLE_JIT_OPTIMIZE_NATIVE_CALL 1
+#endif
+#ifndef ENABLE_JIT_OPTIMIZE_PROPERTY_ACCESS
+#define ENABLE_JIT_OPTIMIZE_PROPERTY_ACCESS 1
+#endif
+#ifndef ENABLE_JIT_OPTIMIZE_ARITHMETIC
+#define ENABLE_JIT_OPTIMIZE_ARITHMETIC 1
+#endif
+#ifndef ENABLE_JIT_OPTIMIZE_METHOD_CALLS
+#define ENABLE_JIT_OPTIMIZE_METHOD_CALLS 1
+#endif
+#endif
+
+#if PLATFORM(X86) && COMPILER(MSVC)
+#define JSC_HOST_CALL __fastcall
+#elif PLATFORM(X86) && COMPILER(GCC)
+#define JSC_HOST_CALL __attribute__ ((fastcall))
+#else
+#define JSC_HOST_CALL
+#endif
+
+#if COMPILER(GCC) && !ENABLE(JIT)
+#define HAVE_COMPUTED_GOTO 1
+#endif
+
+#if ENABLE(JIT) && defined(COVERAGE)
+ #define WTF_USE_INTERPRETER 0
+#else
+ #define WTF_USE_INTERPRETER 1
+#endif
+
+/* Yet Another Regex Runtime. */
+#if !defined(ENABLE_YARR_JIT)
+
+/* YARR supports x86 & x86-64, and has been tested on Mac and Windows. */
+#if (PLATFORM(X86) && PLATFORM(MAC)) \
+ || (PLATFORM(X86_64) && PLATFORM(MAC)) \
+ /* Under development, temporarily disabled until 16Mb link range limit in assembler is fixed. */ \
+ || (PLATFORM_ARM_ARCH(7) && PLATFORM(IPHONE) && 0) \
+ || (PLATFORM(X86) && PLATFORM(WIN))
+#define ENABLE_YARR 1
+#define ENABLE_YARR_JIT 1
+#endif
+
+#if PLATFORM(X86) && PLATFORM(QT)
+#if (PLATFORM(WIN_OS) && COMPILER(MINGW) && GCC_VERSION >= 40100) \
+ || (PLATFORM(WIN_OS) && COMPILER(MSVC)) \
+ || (PLATFORM(LINUX) && GCC_VERSION >= 40100)
+#define ENABLE_YARR 1
+#define ENABLE_YARR_JIT 1
+#endif
+#endif
+
+#endif /* !defined(ENABLE_YARR_JIT) */
+
+/* Sanity Check */
+#if ENABLE(YARR_JIT) && !ENABLE(YARR)
+#error "YARR_JIT requires YARR"
+#endif
+
+#if ENABLE(JIT) || ENABLE(YARR_JIT)
+#define ENABLE_ASSEMBLER 1
+#endif
+/* Setting this flag prevents the assembler from using RWX memory; this may improve
+ security but currectly comes at a significant performance cost. */
+#if PLATFORM(ARM)
+#define ENABLE_ASSEMBLER_WX_EXCLUSIVE 1
+#else
+#define ENABLE_ASSEMBLER_WX_EXCLUSIVE 0
+#endif
+
+#if !defined(ENABLE_PAN_SCROLLING) && PLATFORM(WIN_OS)
+#define ENABLE_PAN_SCROLLING 1
+#endif
+
+#if !defined(ENABLE_ACTIVEX_TYPE_CONVERSION_WMPLAYER)
+#define ENABLE_ACTIVEX_TYPE_CONVERSION_WMPLAYER 1
+#endif
+
+/* Use the QtXmlStreamReader implementation for XMLTokenizer */
+#if PLATFORM(QT)
+#if !ENABLE(XSLT)
+#define WTF_USE_QXMLSTREAM 1
+#endif
+#endif
+
+#if !PLATFORM(QT)
+#define WTF_USE_FONT_FAST_PATH 1
+#endif
+
+/* Accelerated compositing */
+#if PLATFORM(MAC)
+#if !defined(BUILDING_ON_TIGER)
+#define WTF_USE_ACCELERATED_COMPOSITING 1
+#endif
+#endif
+
+#if PLATFORM(IPHONE)
+#define WTF_USE_ACCELERATED_COMPOSITING 1
+#endif
+
+#endif /* WTF_Platform_h */
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/PtrAndFlags.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/PtrAndFlags.h
new file mode 100644
index 0000000..f4527df
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/PtrAndFlags.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2009 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PtrAndFlags_h
+#define PtrAndFlags_h
+
+#include <wtf/Assertions.h>
+
+namespace WTF {
+ template<class T, typename FlagEnum> class PtrAndFlags {
+ public:
+ PtrAndFlags() : m_ptrAndFlags(0) {}
+
+ bool isFlagSet(FlagEnum flagNumber) const { ASSERT(flagNumber < 2); return m_ptrAndFlags & (1 << flagNumber); }
+ void setFlag(FlagEnum flagNumber) { ASSERT(flagNumber < 2); m_ptrAndFlags |= (1 << flagNumber);}
+ void clearFlag(FlagEnum flagNumber) { ASSERT(flagNumber < 2); m_ptrAndFlags &= ~(1 << flagNumber);}
+ T* get() const { return reinterpret_cast<T*>(m_ptrAndFlags & ~3); }
+ void set(T* ptr)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(ptr) & 3));
+ m_ptrAndFlags = reinterpret_cast<intptr_t>(ptr) | (m_ptrAndFlags & 3);
+#ifndef NDEBUG
+ m_leaksPtr = ptr;
+#endif
+ }
+
+ private:
+ intptr_t m_ptrAndFlags;
+#ifndef NDEBUG
+ void* m_leaksPtr; // Only used to allow tools like leaks on OSX to detect that the memory is referenced.
+#endif
+ };
+} // namespace WTF
+
+using WTF::PtrAndFlags;
+
+#endif // PtrAndFlags_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/RandomNumber.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/RandomNumber.cpp
new file mode 100644
index 0000000..0e6e208
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/RandomNumber.cpp
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2006, 2007, 2008 Apple Inc. All rights reserved.
+ * (C) 2008, 2009 Torch Mobile Inc. All rights reserved. (http://www.torchmobile.com/)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "RandomNumber.h"
+
+#include "RandomNumberSeed.h"
+
+#include <limits>
+#include <limits.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#if PLATFORM(WINCE)
+extern "C" {
+#include "wince/mt19937ar.c"
+}
+#endif
+
+namespace WTF {
+
+double weakRandomNumber()
+{
+#if COMPILER(MSVC) && defined(_CRT_RAND_S)
+ // rand_s is incredibly slow on windows so we fall back on rand for Math.random
+ return (rand() + (rand() / (RAND_MAX + 1.0))) / (RAND_MAX + 1.0);
+#else
+ return randomNumber();
+#endif
+}
+
+double randomNumber()
+{
+#if !ENABLE(JSC_MULTIPLE_THREADS)
+ static bool s_initialized = false;
+ if (!s_initialized) {
+ initializeRandomNumberGenerator();
+ s_initialized = true;
+ }
+#endif
+
+#if COMPILER(MSVC) && defined(_CRT_RAND_S)
+ uint32_t bits;
+ rand_s(&bits);
+ return static_cast<double>(bits) / (static_cast<double>(std::numeric_limits<uint32_t>::max()) + 1.0);
+#elif PLATFORM(DARWIN)
+ uint32_t bits = arc4random();
+ return static_cast<double>(bits) / (static_cast<double>(std::numeric_limits<uint32_t>::max()) + 1.0);
+#elif PLATFORM(UNIX)
+ uint32_t part1 = random() & (RAND_MAX - 1);
+ uint32_t part2 = random() & (RAND_MAX - 1);
+ // random only provides 31 bits
+ uint64_t fullRandom = part1;
+ fullRandom <<= 31;
+ fullRandom |= part2;
+
+ // Mask off the low 53bits
+ fullRandom &= (1LL << 53) - 1;
+ return static_cast<double>(fullRandom)/static_cast<double>(1LL << 53);
+#elif PLATFORM(WINCE)
+ return genrand_res53();
+#else
+ uint32_t part1 = rand() & (RAND_MAX - 1);
+ uint32_t part2 = rand() & (RAND_MAX - 1);
+ // rand only provides 31 bits, and the low order bits of that aren't very random
+ // so we take the high 26 bits of part 1, and the high 27 bits of part2.
+ part1 >>= 5; // drop the low 5 bits
+ part2 >>= 4; // drop the low 4 bits
+ uint64_t fullRandom = part1;
+ fullRandom <<= 27;
+ fullRandom |= part2;
+
+ // Mask off the low 53bits
+ fullRandom &= (1LL << 53) - 1;
+ return static_cast<double>(fullRandom)/static_cast<double>(1LL << 53);
+#endif
+}
+
+}
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/RandomNumber.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/RandomNumber.h
new file mode 100644
index 0000000..fe1687c
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/RandomNumber.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2008 Torch Mobile Inc. All rights reserved. (http://www.torchmobile.com/)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_RandomNumber_h
+#define WTF_RandomNumber_h
+
+namespace WTF {
+
+ // Returns a pseudo-random number in the range [0, 1), attempts to be
+ // cryptographically secure if possible on the target platform
+ double randomNumber();
+
+ // Returns a pseudo-random number in the range [0, 1), attempts to
+ // produce a reasonable "random" number fast.
+ // We only need this because rand_s is so slow on windows.
+ double weakRandomNumber();
+
+}
+
+#endif
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/RandomNumberSeed.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/RandomNumberSeed.h
new file mode 100644
index 0000000..1c2d364
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/RandomNumberSeed.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2008 Torch Mobile Inc. All rights reserved. (http://www.torchmobile.com/)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_RandomNumberSeed_h
+#define WTF_RandomNumberSeed_h
+
+#include <stdlib.h>
+#include <time.h>
+
+#if HAVE(SYS_TIME_H)
+#include <sys/time.h>
+#endif
+
+#if PLATFORM(UNIX)
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
+#if PLATFORM(WINCE)
+extern "C" {
+void init_by_array(unsigned long init_key[],int key_length);
+}
+#include <ce_time.h>
+#endif
+
+// Internal JavaScriptCore usage only
+namespace WTF {
+
+inline void initializeRandomNumberGenerator()
+{
+#if PLATFORM(DARWIN)
+ // On Darwin we use arc4random which initialises itself.
+#elif PLATFORM(WINCE)
+ // initialize rand()
+ srand(static_cast<unsigned>(time(0)));
+
+ // use rand() to initialize the real RNG
+ unsigned long initializationBuffer[4];
+ initializationBuffer[0] = (rand() << 16) | rand();
+ initializationBuffer[1] = (rand() << 16) | rand();
+ initializationBuffer[2] = (rand() << 16) | rand();
+ initializationBuffer[3] = (rand() << 16) | rand();
+ init_by_array(initializationBuffer, 4);
+#elif COMPILER(MSVC) && defined(_CRT_RAND_S)
+ // On Windows we use rand_s which initialises itself
+#elif PLATFORM(UNIX)
+ // srandomdev is not guaranteed to exist on linux so we use this poor seed, this should be improved
+ timeval time;
+ gettimeofday(&time, 0);
+ srandom(static_cast<unsigned>(time.tv_usec * getpid()));
+#else
+ srand(static_cast<unsigned>(time(0)));
+#endif
+}
+
+inline void initializeWeakRandomNumberGenerator()
+{
+#if COMPILER(MSVC) && defined(_CRT_RAND_S)
+ // We need to initialise windows rand() explicitly for Math.random
+ unsigned seed = 0;
+ rand_s(&seed);
+ srand(seed);
+#endif
+}
+}
+
+#endif
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/RefCounted.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/RefCounted.h
new file mode 100644
index 0000000..761a856
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/RefCounted.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2006, 2007, 2008 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef RefCounted_h
+#define RefCounted_h
+
+#include <wtf/Assertions.h>
+#include <wtf/Noncopyable.h>
+
+namespace WTF {
+
+// This base class holds the non-template methods and attributes.
+// The RefCounted class inherits from it reducing the template bloat
+// generated by the compiler (technique called template hoisting).
+class RefCountedBase {
+public:
+ void ref()
+ {
+ ASSERT(!m_deletionHasBegun);
+ ++m_refCount;
+ }
+
+ bool hasOneRef() const
+ {
+ ASSERT(!m_deletionHasBegun);
+ return m_refCount == 1;
+ }
+
+ int refCount() const
+ {
+ return m_refCount;
+ }
+
+protected:
+ RefCountedBase()
+ : m_refCount(1)
+#ifndef NDEBUG
+ , m_deletionHasBegun(false)
+#endif
+ {
+ }
+
+ ~RefCountedBase()
+ {
+ }
+
+ // Returns whether the pointer should be freed or not.
+ bool derefBase()
+ {
+ ASSERT(!m_deletionHasBegun);
+ ASSERT(m_refCount > 0);
+ if (m_refCount == 1) {
+#ifndef NDEBUG
+ m_deletionHasBegun = true;
+#endif
+ return true;
+ }
+
+ --m_refCount;
+ return false;
+ }
+
+ // Helper for generating JIT code. Please do not use for non-JIT purposes.
+ int* addressOfCount()
+ {
+ return &m_refCount;
+ }
+
+#ifndef NDEBUG
+ bool deletionHasBegun() const
+ {
+ return m_deletionHasBegun;
+ }
+#endif
+
+private:
+ template<class T>
+ friend class CrossThreadRefCounted;
+
+ int m_refCount;
+#ifndef NDEBUG
+ bool m_deletionHasBegun;
+#endif
+};
+
+
+template<class T> class RefCounted : public RefCountedBase, public Noncopyable {
+public:
+ void deref()
+ {
+ if (derefBase())
+ delete static_cast<T*>(this);
+ }
+
+protected:
+ ~RefCounted()
+ {
+ }
+};
+
+template<class T> class RefCountedCustomAllocated : public RefCountedBase, public NoncopyableCustomAllocated {
+public:
+ void deref()
+ {
+ if (derefBase())
+ delete static_cast<T*>(this);
+ }
+
+protected:
+ ~RefCountedCustomAllocated()
+ {
+ }
+};
+
+} // namespace WTF
+
+using WTF::RefCounted;
+using WTF::RefCountedCustomAllocated;
+
+#endif // RefCounted_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/RefCountedLeakCounter.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/RefCountedLeakCounter.cpp
new file mode 100644
index 0000000..80922d3
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/RefCountedLeakCounter.cpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include "config.h"
+#include "RefCountedLeakCounter.h"
+
+#include <wtf/HashCountedSet.h>
+
+namespace WTF {
+
+#ifdef NDEBUG
+
+void RefCountedLeakCounter::suppressMessages(const char*) { }
+void RefCountedLeakCounter::cancelMessageSuppression(const char*) { }
+
+RefCountedLeakCounter::RefCountedLeakCounter(const char*) { }
+RefCountedLeakCounter::~RefCountedLeakCounter() { }
+
+void RefCountedLeakCounter::increment() { }
+void RefCountedLeakCounter::decrement() { }
+
+#else
+
+#define LOG_CHANNEL_PREFIX Log
+static WTFLogChannel LogRefCountedLeaks = { 0x00000000, "", WTFLogChannelOn };
+
+typedef HashCountedSet<const char*, PtrHash<const char*> > ReasonSet;
+static ReasonSet* leakMessageSuppressionReasons;
+
+void RefCountedLeakCounter::suppressMessages(const char* reason)
+{
+ if (!leakMessageSuppressionReasons)
+ leakMessageSuppressionReasons = new ReasonSet;
+ leakMessageSuppressionReasons->add(reason);
+}
+
+void RefCountedLeakCounter::cancelMessageSuppression(const char* reason)
+{
+ ASSERT(leakMessageSuppressionReasons);
+ ASSERT(leakMessageSuppressionReasons->contains(reason));
+ leakMessageSuppressionReasons->remove(reason);
+}
+
+RefCountedLeakCounter::RefCountedLeakCounter(const char* description)
+ : m_description(description)
+{
+}
+
+RefCountedLeakCounter::~RefCountedLeakCounter()
+{
+ static bool loggedSuppressionReason;
+ if (m_count) {
+ if (!leakMessageSuppressionReasons || leakMessageSuppressionReasons->isEmpty())
+ LOG(RefCountedLeaks, "LEAK: %u %s", m_count, m_description);
+ else if (!loggedSuppressionReason) {
+ // This logs only one reason. Later we could change it so we log all the reasons.
+ LOG(RefCountedLeaks, "No leak checking done: %s", leakMessageSuppressionReasons->begin()->first);
+ loggedSuppressionReason = true;
+ }
+ }
+}
+
+void RefCountedLeakCounter::increment()
+{
+#if ENABLE(JSC_MULTIPLE_THREADS)
+ atomicIncrement(&m_count);
+#else
+ ++m_count;
+#endif
+}
+
+void RefCountedLeakCounter::decrement()
+{
+#if ENABLE(JSC_MULTIPLE_THREADS)
+ atomicDecrement(&m_count);
+#else
+ --m_count;
+#endif
+}
+
+#endif
+
+} // namespace WTF
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/RefCountedLeakCounter.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/RefCountedLeakCounter.h
new file mode 100644
index 0000000..57cc283
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/RefCountedLeakCounter.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef RefCountedLeakCounter_h
+#define RefCountedLeakCounter_h
+
+#include "Assertions.h"
+#include "Threading.h"
+
+namespace WTF {
+
+ struct RefCountedLeakCounter {
+ static void suppressMessages(const char*);
+ static void cancelMessageSuppression(const char*);
+
+ explicit RefCountedLeakCounter(const char* description);
+ ~RefCountedLeakCounter();
+
+ void increment();
+ void decrement();
+
+#ifndef NDEBUG
+ private:
+ volatile int m_count;
+ const char* m_description;
+#endif
+ };
+
+} // namespace WTF
+
+#endif
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/RefPtr.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/RefPtr.h
new file mode 100644
index 0000000..74cd0ea
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/RefPtr.h
@@ -0,0 +1,206 @@
+/*
+ * Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef WTF_RefPtr_h
+#define WTF_RefPtr_h
+
+#include <algorithm>
+#include "AlwaysInline.h"
+#include "FastAllocBase.h"
+
+namespace WTF {
+
+ enum PlacementNewAdoptType { PlacementNewAdopt };
+
+ template <typename T> class PassRefPtr;
+
+ enum HashTableDeletedValueType { HashTableDeletedValue };
+
+ template <typename T> class RefPtr : public FastAllocBase {
+ public:
+ RefPtr() : m_ptr(0) { }
+ RefPtr(T* ptr) : m_ptr(ptr) { if (ptr) ptr->ref(); }
+ RefPtr(const RefPtr& o) : m_ptr(o.m_ptr) { if (T* ptr = m_ptr) ptr->ref(); }
+ // see comment in PassRefPtr.h for why this takes const reference
+ template <typename U> RefPtr(const PassRefPtr<U>&);
+
+ // Special constructor for cases where we overwrite an object in place.
+ RefPtr(PlacementNewAdoptType) { }
+
+ // Hash table deleted values, which are only constructed and never copied or destroyed.
+ RefPtr(HashTableDeletedValueType) : m_ptr(hashTableDeletedValue()) { }
+ bool isHashTableDeletedValue() const { return m_ptr == hashTableDeletedValue(); }
+
+ ~RefPtr() { if (T* ptr = m_ptr) ptr->deref(); }
+
+ template <typename U> RefPtr(const RefPtr<U>& o) : m_ptr(o.get()) { if (T* ptr = m_ptr) ptr->ref(); }
+
+ T* get() const { return m_ptr; }
+
+ void clear() { if (T* ptr = m_ptr) ptr->deref(); m_ptr = 0; }
+ PassRefPtr<T> release() { PassRefPtr<T> tmp = adoptRef(m_ptr); m_ptr = 0; return tmp; }
+
+ T& operator*() const { return *m_ptr; }
+ ALWAYS_INLINE T* operator->() const { return m_ptr; }
+
+ bool operator!() const { return !m_ptr; }
+
+ // This conversion operator allows implicit conversion to bool but not to other integer types.
+#if COMPILER(WINSCW)
+ operator bool() const { return m_ptr; }
+#else
+ typedef T* RefPtr::*UnspecifiedBoolType;
+ operator UnspecifiedBoolType() const { return m_ptr ? &RefPtr::m_ptr : 0; }
+#endif
+
+ RefPtr& operator=(const RefPtr&);
+ RefPtr& operator=(T*);
+ RefPtr& operator=(const PassRefPtr<T>&);
+ template <typename U> RefPtr& operator=(const RefPtr<U>&);
+ template <typename U> RefPtr& operator=(const PassRefPtr<U>&);
+
+ void swap(RefPtr&);
+
+ private:
+ static T* hashTableDeletedValue() { return reinterpret_cast<T*>(-1); }
+
+ T* m_ptr;
+ };
+
+ template <typename T> template <typename U> inline RefPtr<T>::RefPtr(const PassRefPtr<U>& o)
+ : m_ptr(o.releaseRef())
+ {
+ }
+
+ template <typename T> inline RefPtr<T>& RefPtr<T>::operator=(const RefPtr<T>& o)
+ {
+ T* optr = o.get();
+ if (optr)
+ optr->ref();
+ T* ptr = m_ptr;
+ m_ptr = optr;
+ if (ptr)
+ ptr->deref();
+ return *this;
+ }
+
+ template <typename T> template <typename U> inline RefPtr<T>& RefPtr<T>::operator=(const RefPtr<U>& o)
+ {
+ T* optr = o.get();
+ if (optr)
+ optr->ref();
+ T* ptr = m_ptr;
+ m_ptr = optr;
+ if (ptr)
+ ptr->deref();
+ return *this;
+ }
+
+ template <typename T> inline RefPtr<T>& RefPtr<T>::operator=(T* optr)
+ {
+ if (optr)
+ optr->ref();
+ T* ptr = m_ptr;
+ m_ptr = optr;
+ if (ptr)
+ ptr->deref();
+ return *this;
+ }
+
+ template <typename T> inline RefPtr<T>& RefPtr<T>::operator=(const PassRefPtr<T>& o)
+ {
+ T* ptr = m_ptr;
+ m_ptr = o.releaseRef();
+ if (ptr)
+ ptr->deref();
+ return *this;
+ }
+
+ template <typename T> template <typename U> inline RefPtr<T>& RefPtr<T>::operator=(const PassRefPtr<U>& o)
+ {
+ T* ptr = m_ptr;
+ m_ptr = o.releaseRef();
+ if (ptr)
+ ptr->deref();
+ return *this;
+ }
+
+ template <class T> inline void RefPtr<T>::swap(RefPtr<T>& o)
+ {
+ std::swap(m_ptr, o.m_ptr);
+ }
+
+ template <class T> inline void swap(RefPtr<T>& a, RefPtr<T>& b)
+ {
+ a.swap(b);
+ }
+
+ template <typename T, typename U> inline bool operator==(const RefPtr<T>& a, const RefPtr<U>& b)
+ {
+ return a.get() == b.get();
+ }
+
+ template <typename T, typename U> inline bool operator==(const RefPtr<T>& a, U* b)
+ {
+ return a.get() == b;
+ }
+
+ template <typename T, typename U> inline bool operator==(T* a, const RefPtr<U>& b)
+ {
+ return a == b.get();
+ }
+
+ template <typename T, typename U> inline bool operator!=(const RefPtr<T>& a, const RefPtr<U>& b)
+ {
+ return a.get() != b.get();
+ }
+
+ template <typename T, typename U> inline bool operator!=(const RefPtr<T>& a, U* b)
+ {
+ return a.get() != b;
+ }
+
+ template <typename T, typename U> inline bool operator!=(T* a, const RefPtr<U>& b)
+ {
+ return a != b.get();
+ }
+
+ template <typename T, typename U> inline RefPtr<T> static_pointer_cast(const RefPtr<U>& p)
+ {
+ return RefPtr<T>(static_cast<T*>(p.get()));
+ }
+
+ template <typename T, typename U> inline RefPtr<T> const_pointer_cast(const RefPtr<U>& p)
+ {
+ return RefPtr<T>(const_cast<T*>(p.get()));
+ }
+
+ template <typename T> inline T* getPtr(const RefPtr<T>& p)
+ {
+ return p.get();
+ }
+
+} // namespace WTF
+
+using WTF::RefPtr;
+using WTF::static_pointer_cast;
+using WTF::const_pointer_cast;
+
+#endif // WTF_RefPtr_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/RefPtrHashMap.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/RefPtrHashMap.h
new file mode 100644
index 0000000..b3ccd3a
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/RefPtrHashMap.h
@@ -0,0 +1,350 @@
+/*
+ * Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+namespace WTF {
+
+ // This specialization is a direct copy of HashMap, with overloaded functions
+ // to allow for lookup by pointer instead of RefPtr, avoiding ref-count churn.
+
+ // FIXME: Find a better way that doesn't require an entire copy of the HashMap template.
+
+ template<typename RawKeyType, typename ValueType, typename ValueTraits, typename HashFunctions>
+ struct RefPtrHashMapRawKeyTranslator {
+ typedef typename ValueType::first_type KeyType;
+ typedef typename ValueType::second_type MappedType;
+ typedef typename ValueTraits::FirstTraits KeyTraits;
+ typedef typename ValueTraits::SecondTraits MappedTraits;
+
+ static unsigned hash(RawKeyType key) { return HashFunctions::hash(key); }
+ static bool equal(const KeyType& a, RawKeyType b) { return HashFunctions::equal(a, b); }
+ static void translate(ValueType& location, RawKeyType key, const MappedType& mapped)
+ {
+ location.first = key;
+ location.second = mapped;
+ }
+ };
+
+ template<typename T, typename MappedArg, typename HashArg, typename KeyTraitsArg, typename MappedTraitsArg>
+ class RefPtrHashMap {
+ private:
+ typedef KeyTraitsArg KeyTraits;
+ typedef MappedTraitsArg MappedTraits;
+ typedef PairHashTraits<KeyTraits, MappedTraits> ValueTraits;
+
+ public:
+ typedef typename KeyTraits::TraitType KeyType;
+ typedef T* RawKeyType;
+ typedef typename MappedTraits::TraitType MappedType;
+ typedef typename ValueTraits::TraitType ValueType;
+
+ private:
+ typedef HashArg HashFunctions;
+
+ typedef HashTable<KeyType, ValueType, PairFirstExtractor<ValueType>,
+ HashFunctions, ValueTraits, KeyTraits> HashTableType;
+
+ typedef RefPtrHashMapRawKeyTranslator<RawKeyType, ValueType, ValueTraits, HashFunctions>
+ RawKeyTranslator;
+
+ public:
+ typedef HashTableIteratorAdapter<HashTableType, ValueType> iterator;
+ typedef HashTableConstIteratorAdapter<HashTableType, ValueType> const_iterator;
+
+ void swap(RefPtrHashMap&);
+
+ int size() const;
+ int capacity() const;
+ bool isEmpty() const;
+
+ // iterators iterate over pairs of keys and values
+ iterator begin();
+ iterator end();
+ const_iterator begin() const;
+ const_iterator end() const;
+
+ iterator find(const KeyType&);
+ iterator find(RawKeyType);
+ const_iterator find(const KeyType&) const;
+ const_iterator find(RawKeyType) const;
+ bool contains(const KeyType&) const;
+ bool contains(RawKeyType) const;
+ MappedType get(const KeyType&) const;
+ MappedType get(RawKeyType) const;
+ MappedType inlineGet(RawKeyType) const;
+
+ // replaces value but not key if key is already present
+ // return value is a pair of the iterator to the key location,
+ // and a boolean that's true if a new value was actually added
+ pair<iterator, bool> set(const KeyType&, const MappedType&);
+ pair<iterator, bool> set(RawKeyType, const MappedType&);
+
+ // does nothing if key is already present
+ // return value is a pair of the iterator to the key location,
+ // and a boolean that's true if a new value was actually added
+ pair<iterator, bool> add(const KeyType&, const MappedType&);
+ pair<iterator, bool> add(RawKeyType, const MappedType&);
+
+ void remove(const KeyType&);
+ void remove(RawKeyType);
+ void remove(iterator);
+ void clear();
+
+ MappedType take(const KeyType&); // efficient combination of get with remove
+ MappedType take(RawKeyType); // efficient combination of get with remove
+
+ private:
+ pair<iterator, bool> inlineAdd(const KeyType&, const MappedType&);
+ pair<iterator, bool> inlineAdd(RawKeyType, const MappedType&);
+
+ HashTableType m_impl;
+ };
+ template<typename T, typename MappedArg, typename HashArg, typename KeyTraitsArg, typename MappedTraitsArg>
+ class HashMap<RefPtr<T>, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg> :
+ public RefPtrHashMap<T, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg>
+ {
+ };
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline void RefPtrHashMap<T, U, V, W, X>::swap(RefPtrHashMap& other)
+ {
+ m_impl.swap(other.m_impl);
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline int RefPtrHashMap<T, U, V, W, X>::size() const
+ {
+ return m_impl.size();
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline int RefPtrHashMap<T, U, V, W, X>::capacity() const
+ {
+ return m_impl.capacity();
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline bool RefPtrHashMap<T, U, V, W, X>::isEmpty() const
+ {
+ return m_impl.isEmpty();
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline typename RefPtrHashMap<T, U, V, W, X>::iterator RefPtrHashMap<T, U, V, W, X>::begin()
+ {
+ return m_impl.begin();
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline typename RefPtrHashMap<T, U, V, W, X>::iterator RefPtrHashMap<T, U, V, W, X>::end()
+ {
+ return m_impl.end();
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline typename RefPtrHashMap<T, U, V, W, X>::const_iterator RefPtrHashMap<T, U, V, W, X>::begin() const
+ {
+ return m_impl.begin();
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline typename RefPtrHashMap<T, U, V, W, X>::const_iterator RefPtrHashMap<T, U, V, W, X>::end() const
+ {
+ return m_impl.end();
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline typename RefPtrHashMap<T, U, V, W, X>::iterator RefPtrHashMap<T, U, V, W, X>::find(const KeyType& key)
+ {
+ return m_impl.find(key);
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline typename RefPtrHashMap<T, U, V, W, X>::iterator RefPtrHashMap<T, U, V, W, X>::find(RawKeyType key)
+ {
+ return m_impl.template find<RawKeyType, RawKeyTranslator>(key);
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline typename RefPtrHashMap<T, U, V, W, X>::const_iterator RefPtrHashMap<T, U, V, W, X>::find(const KeyType& key) const
+ {
+ return m_impl.find(key);
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline typename RefPtrHashMap<T, U, V, W, X>::const_iterator RefPtrHashMap<T, U, V, W, X>::find(RawKeyType key) const
+ {
+ return m_impl.template find<RawKeyType, RawKeyTranslator>(key);
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline bool RefPtrHashMap<T, U, V, W, X>::contains(const KeyType& key) const
+ {
+ return m_impl.contains(key);
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline bool RefPtrHashMap<T, U, V, W, X>::contains(RawKeyType key) const
+ {
+ return m_impl.template contains<RawKeyType, RawKeyTranslator>(key);
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline pair<typename RefPtrHashMap<T, U, V, W, X>::iterator, bool>
+ RefPtrHashMap<T, U, V, W, X>::inlineAdd(const KeyType& key, const MappedType& mapped)
+ {
+ typedef HashMapTranslator<ValueType, ValueTraits, HashFunctions> TranslatorType;
+ pair<typename HashTableType::iterator, bool> p = m_impl.template add<KeyType, MappedType, TranslatorType>(key, mapped);
+// typename RefPtrHashMap<T, U, V, W, X>::iterator temp = p.first;
+ return make_pair<typename RefPtrHashMap<T, U, V, W, X>::iterator, bool>(
+ typename RefPtrHashMap<T, U, V, W, X>::iterator(p.first), p.second);
+
+// return m_impl.template add<KeyType, MappedType, TranslatorType>(key, mapped);
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline pair<typename RefPtrHashMap<T, U, V, W, X>::iterator, bool>
+ RefPtrHashMap<T, U, V, W, X>::inlineAdd(RawKeyType key, const MappedType& mapped)
+ {
+ pair<typename HashTableType::iterator, bool> p = m_impl.template add<RawKeyType, MappedType, RawKeyTranslator>(key, mapped);
+ return make_pair<typename RefPtrHashMap<T, U, V, W, X>::iterator, bool>(
+ typename RefPtrHashMap<T, U, V, W, X>::iterator(p.first), p.second);
+
+ // return m_impl.template add<RawKeyType, MappedType, RawKeyTranslator>(key, mapped);
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ pair<typename RefPtrHashMap<T, U, V, W, X>::iterator, bool>
+ RefPtrHashMap<T, U, V, W, X>::set(const KeyType& key, const MappedType& mapped)
+ {
+ pair<iterator, bool> result = inlineAdd(key, mapped);
+ if (!result.second) {
+ // add call above didn't change anything, so set the mapped value
+ result.first->second = mapped;
+ }
+ return result;
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ pair<typename RefPtrHashMap<T, U, V, W, X>::iterator, bool>
+ RefPtrHashMap<T, U, V, W, X>::set(RawKeyType key, const MappedType& mapped)
+ {
+ pair<iterator, bool> result = inlineAdd(key, mapped);
+ if (!result.second) {
+ // add call above didn't change anything, so set the mapped value
+ result.first->second = mapped;
+ }
+ return result;
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ pair<typename RefPtrHashMap<T, U, V, W, X>::iterator, bool>
+ RefPtrHashMap<T, U, V, W, X>::add(const KeyType& key, const MappedType& mapped)
+ {
+ return inlineAdd(key, mapped);
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ pair<typename RefPtrHashMap<T, U, V, W, X>::iterator, bool>
+ RefPtrHashMap<T, U, V, W, X>::add(RawKeyType key, const MappedType& mapped)
+ {
+ return inlineAdd(key, mapped);
+ }
+
+ template<typename T, typename U, typename V, typename W, typename MappedTraits>
+ typename RefPtrHashMap<T, U, V, W, MappedTraits>::MappedType
+ RefPtrHashMap<T, U, V, W, MappedTraits>::get(const KeyType& key) const
+ {
+ ValueType* entry = const_cast<HashTableType&>(m_impl).lookup(key);
+ if (!entry)
+ return MappedTraits::emptyValue();
+ return entry->second;
+ }
+
+ template<typename T, typename U, typename V, typename W, typename MappedTraits>
+ typename RefPtrHashMap<T, U, V, W, MappedTraits>::MappedType
+ inline RefPtrHashMap<T, U, V, W, MappedTraits>::inlineGet(RawKeyType key) const
+ {
+ ValueType* entry = const_cast<HashTableType&>(m_impl).template lookup<RawKeyType, RawKeyTranslator>(key);
+ if (!entry)
+ return MappedTraits::emptyValue();
+ return entry->second;
+ }
+
+ template<typename T, typename U, typename V, typename W, typename MappedTraits>
+ typename RefPtrHashMap<T, U, V, W, MappedTraits>::MappedType
+ RefPtrHashMap<T, U, V, W, MappedTraits>::get(RawKeyType key) const
+ {
+ return inlineGet(key);
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline void RefPtrHashMap<T, U, V, W, X>::remove(iterator it)
+ {
+ if (it.m_impl == m_impl.end())
+ return;
+ m_impl.checkTableConsistency();
+ m_impl.removeWithoutEntryConsistencyCheck(it.m_impl);
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline void RefPtrHashMap<T, U, V, W, X>::remove(const KeyType& key)
+ {
+ remove(find(key));
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline void RefPtrHashMap<T, U, V, W, X>::remove(RawKeyType key)
+ {
+ remove(find(key));
+ }
+
+ template<typename T, typename U, typename V, typename W, typename X>
+ inline void RefPtrHashMap<T, U, V, W, X>::clear()
+ {
+ m_impl.clear();
+ }
+
+ template<typename T, typename U, typename V, typename W, typename MappedTraits>
+ typename RefPtrHashMap<T, U, V, W, MappedTraits>::MappedType
+ RefPtrHashMap<T, U, V, W, MappedTraits>::take(const KeyType& key)
+ {
+ // This can probably be made more efficient to avoid ref/deref churn.
+ iterator it = find(key);
+ if (it == end())
+ return MappedTraits::emptyValue();
+ typename RefPtrHashMap<T, U, V, W, MappedTraits>::MappedType result = it->second;
+ remove(it);
+ return result;
+ }
+
+ template<typename T, typename U, typename V, typename W, typename MappedTraits>
+ typename RefPtrHashMap<T, U, V, W, MappedTraits>::MappedType
+ RefPtrHashMap<T, U, V, W, MappedTraits>::take(RawKeyType key)
+ {
+ // This can probably be made more efficient to avoid ref/deref churn.
+ iterator it = find(key);
+ if (it == end())
+ return MappedTraits::emptyValue();
+ typename RefPtrHashMap<T, U, V, W, MappedTraits>::MappedType result = it->second;
+ remove(it);
+ return result;
+ }
+
+} // namespace WTF
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/RetainPtr.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/RetainPtr.h
new file mode 100644
index 0000000..77f25e0
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/RetainPtr.h
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef RetainPtr_h
+#define RetainPtr_h
+
+#include "TypeTraits.h"
+#include <algorithm>
+#include <CoreFoundation/CoreFoundation.h>
+
+#ifdef __OBJC__
+#import <Foundation/Foundation.h>
+#endif
+
+namespace WTF {
+
+ // Unlike most most of our smart pointers, RetainPtr can take either the pointer type or the pointed-to type,
+ // so both RetainPtr<NSDictionary> and RetainPtr<CFDictionaryRef> will work.
+
+ enum AdoptCFTag { AdoptCF };
+ enum AdoptNSTag { AdoptNS };
+
+#ifdef __OBJC__
+ inline void adoptNSReference(id ptr)
+ {
+ if (ptr) {
+ CFRetain(ptr);
+ [ptr release];
+ }
+ }
+#endif
+
+ template <typename T> class RetainPtr {
+ public:
+ typedef typename RemovePointer<T>::Type ValueType;
+ typedef ValueType* PtrType;
+
+ RetainPtr() : m_ptr(0) {}
+ RetainPtr(PtrType ptr) : m_ptr(ptr) { if (ptr) CFRetain(ptr); }
+
+ RetainPtr(AdoptCFTag, PtrType ptr) : m_ptr(ptr) { }
+ RetainPtr(AdoptNSTag, PtrType ptr) : m_ptr(ptr) { adoptNSReference(ptr); }
+
+ RetainPtr(const RetainPtr& o) : m_ptr(o.m_ptr) { if (PtrType ptr = m_ptr) CFRetain(ptr); }
+
+ ~RetainPtr() { if (PtrType ptr = m_ptr) CFRelease(ptr); }
+
+ template <typename U> RetainPtr(const RetainPtr<U>& o) : m_ptr(o.get()) { if (PtrType ptr = m_ptr) CFRetain(ptr); }
+
+ PtrType get() const { return m_ptr; }
+
+ PtrType releaseRef() { PtrType tmp = m_ptr; m_ptr = 0; return tmp; }
+
+ PtrType operator->() const { return m_ptr; }
+
+ bool operator!() const { return !m_ptr; }
+
+ // This conversion operator allows implicit conversion to bool but not to other integer types.
+ typedef PtrType RetainPtr::*UnspecifiedBoolType;
+ operator UnspecifiedBoolType() const { return m_ptr ? &RetainPtr::m_ptr : 0; }
+
+ RetainPtr& operator=(const RetainPtr&);
+ template <typename U> RetainPtr& operator=(const RetainPtr<U>&);
+ RetainPtr& operator=(PtrType);
+ template <typename U> RetainPtr& operator=(U*);
+
+ void adoptCF(PtrType);
+ void adoptNS(PtrType);
+
+ void swap(RetainPtr&);
+
+ private:
+ PtrType m_ptr;
+ };
+
+ template <typename T> inline RetainPtr<T>& RetainPtr<T>::operator=(const RetainPtr<T>& o)
+ {
+ PtrType optr = o.get();
+ if (optr)
+ CFRetain(optr);
+ PtrType ptr = m_ptr;
+ m_ptr = optr;
+ if (ptr)
+ CFRelease(ptr);
+ return *this;
+ }
+
+ template <typename T> template <typename U> inline RetainPtr<T>& RetainPtr<T>::operator=(const RetainPtr<U>& o)
+ {
+ PtrType optr = o.get();
+ if (optr)
+ CFRetain(optr);
+ PtrType ptr = m_ptr;
+ m_ptr = optr;
+ if (ptr)
+ CFRelease(ptr);
+ return *this;
+ }
+
+ template <typename T> inline RetainPtr<T>& RetainPtr<T>::operator=(PtrType optr)
+ {
+ if (optr)
+ CFRetain(optr);
+ PtrType ptr = m_ptr;
+ m_ptr = optr;
+ if (ptr)
+ CFRelease(ptr);
+ return *this;
+ }
+
+ template <typename T> inline void RetainPtr<T>::adoptCF(PtrType optr)
+ {
+ PtrType ptr = m_ptr;
+ m_ptr = optr;
+ if (ptr)
+ CFRelease(ptr);
+ }
+
+ template <typename T> inline void RetainPtr<T>::adoptNS(PtrType optr)
+ {
+ adoptNSReference(optr);
+
+ PtrType ptr = m_ptr;
+ m_ptr = optr;
+ if (ptr)
+ CFRelease(ptr);
+ }
+
+ template <typename T> template <typename U> inline RetainPtr<T>& RetainPtr<T>::operator=(U* optr)
+ {
+ if (optr)
+ CFRetain(optr);
+ PtrType ptr = m_ptr;
+ m_ptr = optr;
+ if (ptr)
+ CFRelease(ptr);
+ return *this;
+ }
+
+ template <class T> inline void RetainPtr<T>::swap(RetainPtr<T>& o)
+ {
+ std::swap(m_ptr, o.m_ptr);
+ }
+
+ template <class T> inline void swap(RetainPtr<T>& a, RetainPtr<T>& b)
+ {
+ a.swap(b);
+ }
+
+ template <typename T, typename U> inline bool operator==(const RetainPtr<T>& a, const RetainPtr<U>& b)
+ {
+ return a.get() == b.get();
+ }
+
+ template <typename T, typename U> inline bool operator==(const RetainPtr<T>& a, U* b)
+ {
+ return a.get() == b;
+ }
+
+ template <typename T, typename U> inline bool operator==(T* a, const RetainPtr<U>& b)
+ {
+ return a == b.get();
+ }
+
+ template <typename T, typename U> inline bool operator!=(const RetainPtr<T>& a, const RetainPtr<U>& b)
+ {
+ return a.get() != b.get();
+ }
+
+ template <typename T, typename U> inline bool operator!=(const RetainPtr<T>& a, U* b)
+ {
+ return a.get() != b;
+ }
+
+ template <typename T, typename U> inline bool operator!=(T* a, const RetainPtr<U>& b)
+ {
+ return a != b.get();
+ }
+
+} // namespace WTF
+
+using WTF::AdoptCF;
+using WTF::AdoptNS;
+using WTF::RetainPtr;
+
+#endif // WTF_RetainPtr_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/SegmentedVector.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/SegmentedVector.h
new file mode 100644
index 0000000..065c19c
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/SegmentedVector.h
@@ -0,0 +1,252 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SegmentedVector_h
+#define SegmentedVector_h
+
+#include <wtf/Vector.h>
+
+namespace WTF {
+
+ // An iterator for SegmentedVector. It supports only the pre ++ operator
+ template <typename T, size_t SegmentSize> class SegmentedVector;
+ template <typename T, size_t SegmentSize> class SegmentedVectorIterator {
+ private:
+ friend class SegmentedVector<T, SegmentSize>;
+ public:
+ typedef SegmentedVectorIterator<T, SegmentSize> Iterator;
+
+ ~SegmentedVectorIterator() { }
+
+ T& operator*() const { return m_vector.m_segments.at(m_segment)->at(m_index); }
+ T* operator->() const { return &m_vector.m_segments.at(m_segment)->at(m_index); }
+
+ // Only prefix ++ operator supported
+ Iterator& operator++()
+ {
+ ASSERT(m_index != SegmentSize);
+ ++m_index;
+ if (m_index >= m_vector.m_segments.at(m_segment)->size()) {
+ if (m_segment + 1 < m_vector.m_segments.size()) {
+ ASSERT(m_vector.m_segments.at(m_segment)->size() > 0);
+ ++m_segment;
+ m_index = 0;
+ } else {
+ // Points to the "end" symbol
+ m_segment = 0;
+ m_index = SegmentSize;
+ }
+ }
+ return *this;
+ }
+
+ bool operator==(const Iterator& other) const
+ {
+ return (m_index == other.m_index && m_segment = other.m_segment && &m_vector == &other.m_vector);
+ }
+
+ bool operator!=(const Iterator& other) const
+ {
+ return (m_index != other.m_index || m_segment != other.m_segment || &m_vector != &other.m_vector);
+ }
+
+ SegmentedVectorIterator& operator=(const SegmentedVectorIterator<T, SegmentSize>& other)
+ {
+ m_vector = other.m_vector;
+ m_segment = other.m_segment;
+ m_index = other.m_index;
+ return *this;
+ }
+
+ private:
+ SegmentedVectorIterator(SegmentedVector<T, SegmentSize>& vector, size_t segment, size_t index)
+ : m_vector(vector)
+ , m_segment(segment)
+ , m_index(index)
+ {
+ }
+
+ SegmentedVector<T, SegmentSize>& m_vector;
+ size_t m_segment;
+ size_t m_index;
+ };
+
+ // SegmentedVector is just like Vector, but it doesn't move the values
+ // stored in its buffer when it grows. Therefore, it is safe to keep
+ // pointers into a SegmentedVector.
+ template <typename T, size_t SegmentSize> class SegmentedVector {
+ friend class SegmentedVectorIterator<T, SegmentSize>;
+ public:
+ typedef SegmentedVectorIterator<T, SegmentSize> Iterator;
+
+ SegmentedVector()
+ : m_size(0)
+ {
+ m_segments.append(&m_inlineSegment);
+ }
+
+ ~SegmentedVector()
+ {
+ deleteAllSegments();
+ }
+
+ size_t size() const { return m_size; }
+
+ T& at(size_t index)
+ {
+ if (index < SegmentSize)
+ return m_inlineSegment[index];
+ return segmentFor(index)->at(subscriptFor(index));
+ }
+
+ T& operator[](size_t index)
+ {
+ return at(index);
+ }
+
+ T& last()
+ {
+ return at(size() - 1);
+ }
+
+ template <typename U> void append(const U& value)
+ {
+ ++m_size;
+
+ if (m_size <= SegmentSize) {
+ m_inlineSegment.uncheckedAppend(value);
+ return;
+ }
+
+ if (!segmentExistsFor(m_size - 1))
+ m_segments.append(new Segment);
+ segmentFor(m_size - 1)->uncheckedAppend(value);
+ }
+
+ T& alloc()
+ {
+ append<T>(T());
+ return last();
+ }
+
+ void removeLast()
+ {
+ if (m_size <= SegmentSize)
+ m_inlineSegment.removeLast();
+ else
+ segmentFor(m_size - 1)->removeLast();
+ --m_size;
+ }
+
+ void grow(size_t size)
+ {
+ ASSERT(size > m_size);
+ ensureSegmentsFor(size);
+ m_size = size;
+ }
+
+ void clear()
+ {
+ deleteAllSegments();
+ m_segments.resize(1);
+ m_inlineSegment.clear();
+ m_size = 0;
+ }
+
+ Iterator begin()
+ {
+ return Iterator(*this, 0, m_size ? 0 : SegmentSize);
+ }
+
+ Iterator end()
+ {
+ return Iterator(*this, 0, SegmentSize);
+ }
+
+ private:
+ typedef Vector<T, SegmentSize> Segment;
+
+ void deleteAllSegments()
+ {
+ // Skip the first segment, because it's our inline segment, which was
+ // not created by new.
+ for (size_t i = 1; i < m_segments.size(); i++)
+ delete m_segments[i];
+ }
+
+ bool segmentExistsFor(size_t index)
+ {
+ return index / SegmentSize < m_segments.size();
+ }
+
+ Segment* segmentFor(size_t index)
+ {
+ return m_segments[index / SegmentSize];
+ }
+
+ size_t subscriptFor(size_t index)
+ {
+ return index % SegmentSize;
+ }
+
+ void ensureSegmentsFor(size_t size)
+ {
+ size_t segmentCount = m_size / SegmentSize;
+ if (m_size % SegmentSize)
+ ++segmentCount;
+ segmentCount = std::max<size_t>(segmentCount, 1); // We always have at least our inline segment.
+
+ size_t neededSegmentCount = size / SegmentSize;
+ if (size % SegmentSize)
+ ++neededSegmentCount;
+
+ // Fill up to N - 1 segments.
+ size_t end = neededSegmentCount - 1;
+ for (size_t i = segmentCount - 1; i < end; ++i)
+ ensureSegment(i, SegmentSize);
+
+ // Grow segment N to accomodate the remainder.
+ ensureSegment(end, subscriptFor(size - 1) + 1);
+ }
+
+ void ensureSegment(size_t segmentIndex, size_t size)
+ {
+ ASSERT(segmentIndex <= m_segments.size());
+ if (segmentIndex == m_segments.size())
+ m_segments.append(new Segment);
+ m_segments[segmentIndex]->grow(size);
+ }
+
+ size_t m_size;
+ Segment m_inlineSegment;
+ Vector<Segment*, 32> m_segments;
+ };
+
+} // namespace WTF
+
+#endif // SegmentedVector_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/StdLibExtras.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/StdLibExtras.h
new file mode 100644
index 0000000..afc5e8a
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/StdLibExtras.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_StdLibExtras_h
+#define WTF_StdLibExtras_h
+
+#include <wtf/Platform.h>
+#include <wtf/Assertions.h>
+
+// Use these to declare and define a static local variable (static T;) so that
+// it is leaked so that its destructors are not called at exit. Using this
+// macro also allows workarounds a compiler bug present in Apple's version of GCC 4.0.1.
+#if COMPILER(GCC) && defined(__APPLE_CC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 0 && __GNUC_PATCHLEVEL__ == 1
+#define DEFINE_STATIC_LOCAL(type, name, arguments) \
+ static type* name##Ptr = new type arguments; \
+ type& name = *name##Ptr
+#else
+#define DEFINE_STATIC_LOCAL(type, name, arguments) \
+ static type& name = *new type arguments
+#endif
+
+namespace WTF {
+
+ /*
+ * C++'s idea of a reinterpret_cast lacks sufficient cojones.
+ */
+ template<typename TO, typename FROM>
+ TO bitwise_cast(FROM from)
+ {
+ COMPILE_ASSERT(sizeof(TO) == sizeof(FROM), WTF_bitwise_cast_sizeof_casted_types_is_equal);
+ union {
+ FROM from;
+ TO to;
+ } u;
+ u.from = from;
+ return u.to;
+ }
+
+} // namespace WTF
+
+#endif
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/StringExtras.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/StringExtras.h
new file mode 100644
index 0000000..1c23390
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/StringExtras.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2006 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_StringExtras_h
+#define WTF_StringExtras_h
+
+#include <stdarg.h>
+#include <stdio.h>
+
+#if HAVE(STRINGS_H)
+#include <strings.h>
+#endif
+
+#if COMPILER(MSVC)
+
+inline int snprintf(char* buffer, size_t count, const char* format, ...)
+{
+ int result;
+ va_list args;
+ va_start(args, format);
+ result = _vsnprintf(buffer, count, format, args);
+ va_end(args);
+ return result;
+}
+
+#if COMPILER(MSVC7) || PLATFORM(WINCE)
+
+inline int vsnprintf(char* buffer, size_t count, const char* format, va_list args)
+{
+ return _vsnprintf(buffer, count, format, args);
+}
+
+#endif
+
+#if PLATFORM(WINCE)
+
+inline int strnicmp(const char* string1, const char* string2, size_t count)
+{
+ return _strnicmp(string1, string2, count);
+}
+
+inline int stricmp(const char* string1, const char* string2)
+{
+ return _stricmp(string1, string2);
+}
+
+inline char* strdup(const char* strSource)
+{
+ return _strdup(strSource);
+}
+
+#endif
+
+inline int strncasecmp(const char* s1, const char* s2, size_t len)
+{
+ return strnicmp(s1, s2, len);
+}
+
+inline int strcasecmp(const char* s1, const char* s2)
+{
+ return stricmp(s1, s2);
+}
+
+#endif
+
+#endif // WTF_StringExtras_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/TCPackedCache.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/TCPackedCache.h
new file mode 100644
index 0000000..0464f8f
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/TCPackedCache.h
@@ -0,0 +1,234 @@
+// Copyright (c) 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// ---
+// Author: Geoff Pike
+//
+// This file provides a minimal cache that can hold a <key, value> pair
+// with little if any wasted space. The types of the key and value
+// must be unsigned integral types or at least have unsigned semantics
+// for >>, casting, and similar operations.
+//
+// Synchronization is not provided. However, the cache is implemented
+// as an array of cache entries whose type is chosen at compile time.
+// If a[i] is atomic on your hardware for the chosen array type then
+// raciness will not necessarily lead to bugginess. The cache entries
+// must be large enough to hold a partial key and a value packed
+// together. The partial keys are bit strings of length
+// kKeybits - kHashbits, and the values are bit strings of length kValuebits.
+//
+// In an effort to use minimal space, every cache entry represents
+// some <key, value> pair; the class provides no way to mark a cache
+// entry as empty or uninitialized. In practice, you may want to have
+// reserved keys or values to get around this limitation. For example, in
+// tcmalloc's PageID-to-sizeclass cache, a value of 0 is used as
+// "unknown sizeclass."
+//
+// Usage Considerations
+// --------------------
+//
+// kHashbits controls the size of the cache. The best value for
+// kHashbits will of course depend on the application. Perhaps try
+// tuning the value of kHashbits by measuring different values on your
+// favorite benchmark. Also remember not to be a pig; other
+// programs that need resources may suffer if you are.
+//
+// The main uses for this class will be when performance is
+// critical and there's a convenient type to hold the cache's
+// entries. As described above, the number of bits required
+// for a cache entry is (kKeybits - kHashbits) + kValuebits. Suppose
+// kKeybits + kValuebits is 43. Then it probably makes sense to
+// chose kHashbits >= 11 so that cache entries fit in a uint32.
+//
+// On the other hand, suppose kKeybits = kValuebits = 64. Then
+// using this class may be less worthwhile. You'll probably
+// be using 128 bits for each entry anyway, so maybe just pick
+// a hash function, H, and use an array indexed by H(key):
+// void Put(K key, V value) { a_[H(key)] = pair<K, V>(key, value); }
+// V GetOrDefault(K key, V default) { const pair<K, V> &p = a_[H(key)]; ... }
+// etc.
+//
+// Further Details
+// ---------------
+//
+// For caches used only by one thread, the following is true:
+// 1. For a cache c,
+// (c.Put(key, value), c.GetOrDefault(key, 0)) == value
+// and
+// (c.Put(key, value), <...>, c.GetOrDefault(key, 0)) == value
+// if the elided code contains no c.Put calls.
+//
+// 2. Has(key) will return false if no <key, value> pair with that key
+// has ever been Put. However, a newly initialized cache will have
+// some <key, value> pairs already present. When you create a new
+// cache, you must specify an "initial value." The initialization
+// procedure is equivalent to Clear(initial_value), which is
+// equivalent to Put(k, initial_value) for all keys k from 0 to
+// 2^kHashbits - 1.
+//
+// 3. If key and key' differ then the only way Put(key, value) may
+// cause Has(key') to change is that Has(key') may change from true to
+// false. Furthermore, a Put() call that doesn't change Has(key')
+// doesn't change GetOrDefault(key', ...) either.
+//
+// Implementation details:
+//
+// This is a direct-mapped cache with 2^kHashbits entries;
+// the hash function simply takes the low bits of the key.
+// So, we don't have to store the low bits of the key in the entries.
+// Instead, an entry is the high bits of a key and a value, packed
+// together. E.g., a 20 bit key and a 7 bit value only require
+// a uint16 for each entry if kHashbits >= 11.
+//
+// Alternatives to this scheme will be added as needed.
+
+#ifndef TCMALLOC_PACKED_CACHE_INL_H__
+#define TCMALLOC_PACKED_CACHE_INL_H__
+
+#ifndef WTF_CHANGES
+#include "base/basictypes.h" // for COMPILE_ASSERT
+#include "base/logging.h" // for DCHECK
+#endif
+
+#ifndef DCHECK_EQ
+#define DCHECK_EQ(val1, val2) ASSERT((val1) == (val2))
+#endif
+
+// A safe way of doing "(1 << n) - 1" -- without worrying about overflow
+// Note this will all be resolved to a constant expression at compile-time
+#define N_ONES_(IntType, N) \
+ ( (N) == 0 ? 0 : ((static_cast<IntType>(1) << ((N)-1))-1 + \
+ (static_cast<IntType>(1) << ((N)-1))) )
+
+// The types K and V provide upper bounds on the number of valid keys
+// and values, but we explicitly require the keys to be less than
+// 2^kKeybits and the values to be less than 2^kValuebits. The size of
+// the table is controlled by kHashbits, and the type of each entry in
+// the cache is T. See also the big comment at the top of the file.
+template <int kKeybits, typename T>
+class PackedCache {
+ public:
+ typedef uintptr_t K;
+ typedef size_t V;
+ static const size_t kHashbits = 12;
+ static const size_t kValuebits = 8;
+
+ explicit PackedCache(V initial_value) {
+ COMPILE_ASSERT(kKeybits <= sizeof(K) * 8, key_size);
+ COMPILE_ASSERT(kValuebits <= sizeof(V) * 8, value_size);
+ COMPILE_ASSERT(kHashbits <= kKeybits, hash_function);
+ COMPILE_ASSERT(kKeybits - kHashbits + kValuebits <= kTbits,
+ entry_size_must_be_big_enough);
+ Clear(initial_value);
+ }
+
+ void Put(K key, V value) {
+ DCHECK_EQ(key, key & kKeyMask);
+ DCHECK_EQ(value, value & kValueMask);
+ array_[Hash(key)] = static_cast<T>(KeyToUpper(key) | value);
+ }
+
+ bool Has(K key) const {
+ DCHECK_EQ(key, key & kKeyMask);
+ return KeyMatch(array_[Hash(key)], key);
+ }
+
+ V GetOrDefault(K key, V default_value) const {
+ // As with other code in this class, we touch array_ as few times
+ // as we can. Assuming entries are read atomically (e.g., their
+ // type is uintptr_t on most hardware) then certain races are
+ // harmless.
+ DCHECK_EQ(key, key & kKeyMask);
+ T entry = array_[Hash(key)];
+ return KeyMatch(entry, key) ? EntryToValue(entry) : default_value;
+ }
+
+ void Clear(V value) {
+ DCHECK_EQ(value, value & kValueMask);
+ for (int i = 0; i < 1 << kHashbits; i++) {
+ array_[i] = static_cast<T>(value);
+ }
+ }
+
+ private:
+ // We are going to pack a value and the upper part of a key into
+ // an entry of type T. The UPPER type is for the upper part of a key,
+ // after the key has been masked and shifted for inclusion in an entry.
+ typedef T UPPER;
+
+ static V EntryToValue(T t) { return t & kValueMask; }
+
+ static UPPER EntryToUpper(T t) { return t & kUpperMask; }
+
+ // If v is a V and u is an UPPER then you can create an entry by
+ // doing u | v. kHashbits determines where in a K to find the upper
+ // part of the key, and kValuebits determines where in the entry to put
+ // it.
+ static UPPER KeyToUpper(K k) {
+ const int shift = kHashbits - kValuebits;
+ // Assume kHashbits >= kValuebits. It would be easy to lift this assumption.
+ return static_cast<T>(k >> shift) & kUpperMask;
+ }
+
+ // This is roughly the inverse of KeyToUpper(). Some of the key has been
+ // thrown away, since KeyToUpper() masks off the low bits of the key.
+ static K UpperToPartialKey(UPPER u) {
+ DCHECK_EQ(u, u & kUpperMask);
+ const int shift = kHashbits - kValuebits;
+ // Assume kHashbits >= kValuebits. It would be easy to lift this assumption.
+ return static_cast<K>(u) << shift;
+ }
+
+ static size_t Hash(K key) {
+ return static_cast<size_t>(key) & N_ONES_(size_t, kHashbits);
+ }
+
+ // Does the entry's partial key match the relevant part of the given key?
+ static bool KeyMatch(T entry, K key) {
+ return ((KeyToUpper(key) ^ entry) & kUpperMask) == 0;
+ }
+
+ static const size_t kTbits = 8 * sizeof(T);
+ static const int kUpperbits = kKeybits - kHashbits;
+
+ // For masking a K.
+ static const K kKeyMask = N_ONES_(K, kKeybits);
+
+ // For masking a T.
+ static const T kUpperMask = N_ONES_(T, kUpperbits) << kValuebits;
+
+ // For masking a V or a T.
+ static const V kValueMask = N_ONES_(V, kValuebits);
+
+ T array_[1 << kHashbits];
+};
+
+#undef N_ONES_
+
+#endif // TCMALLOC_PACKED_CACHE_INL_H__
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/TCPageMap.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/TCPageMap.h
new file mode 100644
index 0000000..3f56c24
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/TCPageMap.h
@@ -0,0 +1,316 @@
+// Copyright (c) 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// ---
+// Author: Sanjay Ghemawat <opensource@google.com>
+//
+// A data structure used by the caching malloc. It maps from page# to
+// a pointer that contains info about that page. We use two
+// representations: one for 32-bit addresses, and another for 64 bit
+// addresses. Both representations provide the same interface. The
+// first representation is implemented as a flat array, the seconds as
+// a three-level radix tree that strips away approximately 1/3rd of
+// the bits every time.
+//
+// The BITS parameter should be the number of bits required to hold
+// a page number. E.g., with 32 bit pointers and 4K pages (i.e.,
+// page offset fits in lower 12 bits), BITS == 20.
+
+#ifndef TCMALLOC_PAGEMAP_H__
+#define TCMALLOC_PAGEMAP_H__
+
+#if HAVE(STDINT_H)
+#include <stdint.h>
+#elif HAVE(INTTYPES_H)
+#include <inttypes.h>
+#else
+#include <sys/types.h>
+#endif
+
+#include <string.h>
+#include "Assertions.h"
+
+// Single-level array
+template <int BITS>
+class TCMalloc_PageMap1 {
+ private:
+ void** array_;
+
+ public:
+ typedef uintptr_t Number;
+
+ void init(void* (*allocator)(size_t)) {
+ array_ = reinterpret_cast<void**>((*allocator)(sizeof(void*) << BITS));
+ memset(array_, 0, sizeof(void*) << BITS);
+ }
+
+ // Ensure that the map contains initialized entries "x .. x+n-1".
+ // Returns true if successful, false if we could not allocate memory.
+ bool Ensure(Number x, size_t n) {
+ // Nothing to do since flat array was allocate at start
+ return true;
+ }
+
+ void PreallocateMoreMemory() {}
+
+ // REQUIRES "k" is in range "[0,2^BITS-1]".
+ // REQUIRES "k" has been ensured before.
+ //
+ // Return the current value for KEY. Returns "Value()" if not
+ // yet set.
+ void* get(Number k) const {
+ return array_[k];
+ }
+
+ // REQUIRES "k" is in range "[0,2^BITS-1]".
+ // REQUIRES "k" has been ensured before.
+ //
+ // Sets the value for KEY.
+ void set(Number k, void* v) {
+ array_[k] = v;
+ }
+};
+
+// Two-level radix tree
+template <int BITS>
+class TCMalloc_PageMap2 {
+ private:
+ // Put 32 entries in the root and (2^BITS)/32 entries in each leaf.
+ static const int ROOT_BITS = 5;
+ static const int ROOT_LENGTH = 1 << ROOT_BITS;
+
+ static const int LEAF_BITS = BITS - ROOT_BITS;
+ static const int LEAF_LENGTH = 1 << LEAF_BITS;
+
+ // Leaf node
+ struct Leaf {
+ void* values[LEAF_LENGTH];
+ };
+
+ Leaf* root_[ROOT_LENGTH]; // Pointers to 32 child nodes
+ void* (*allocator_)(size_t); // Memory allocator
+
+ public:
+ typedef uintptr_t Number;
+
+ void init(void* (*allocator)(size_t)) {
+ allocator_ = allocator;
+ memset(root_, 0, sizeof(root_));
+ }
+
+ void* get(Number k) const {
+ ASSERT(k >> BITS == 0);
+ const Number i1 = k >> LEAF_BITS;
+ const Number i2 = k & (LEAF_LENGTH-1);
+ return root_[i1]->values[i2];
+ }
+
+ void set(Number k, void* v) {
+ ASSERT(k >> BITS == 0);
+ const Number i1 = k >> LEAF_BITS;
+ const Number i2 = k & (LEAF_LENGTH-1);
+ root_[i1]->values[i2] = v;
+ }
+
+ bool Ensure(Number start, size_t n) {
+ for (Number key = start; key <= start + n - 1; ) {
+ const Number i1 = key >> LEAF_BITS;
+
+ // Make 2nd level node if necessary
+ if (root_[i1] == NULL) {
+ Leaf* leaf = reinterpret_cast<Leaf*>((*allocator_)(sizeof(Leaf)));
+ if (leaf == NULL) return false;
+ memset(leaf, 0, sizeof(*leaf));
+ root_[i1] = leaf;
+ }
+
+ // Advance key past whatever is covered by this leaf node
+ key = ((key >> LEAF_BITS) + 1) << LEAF_BITS;
+ }
+ return true;
+ }
+
+ void PreallocateMoreMemory() {
+ // Allocate enough to keep track of all possible pages
+ Ensure(0, 1 << BITS);
+ }
+
+#ifdef WTF_CHANGES
+ template<class Visitor, class MemoryReader>
+ void visitValues(Visitor& visitor, const MemoryReader& reader)
+ {
+ for (int i = 0; i < ROOT_LENGTH; i++) {
+ if (!root_[i])
+ continue;
+
+ Leaf* l = reader(reinterpret_cast<Leaf*>(root_[i]));
+ for (int j = 0; j < LEAF_LENGTH; j += visitor.visit(l->values[j]))
+ ;
+ }
+ }
+
+ template<class Visitor, class MemoryReader>
+ void visitAllocations(Visitor& visitor, const MemoryReader&) {
+ for (int i = 0; i < ROOT_LENGTH; i++) {
+ if (root_[i])
+ visitor.visit(root_[i], sizeof(Leaf));
+ }
+ }
+#endif
+};
+
+// Three-level radix tree
+template <int BITS>
+class TCMalloc_PageMap3 {
+ private:
+ // How many bits should we consume at each interior level
+ static const int INTERIOR_BITS = (BITS + 2) / 3; // Round-up
+ static const int INTERIOR_LENGTH = 1 << INTERIOR_BITS;
+
+ // How many bits should we consume at leaf level
+ static const int LEAF_BITS = BITS - 2*INTERIOR_BITS;
+ static const int LEAF_LENGTH = 1 << LEAF_BITS;
+
+ // Interior node
+ struct Node {
+ Node* ptrs[INTERIOR_LENGTH];
+ };
+
+ // Leaf node
+ struct Leaf {
+ void* values[LEAF_LENGTH];
+ };
+
+ Node* root_; // Root of radix tree
+ void* (*allocator_)(size_t); // Memory allocator
+
+ Node* NewNode() {
+ Node* result = reinterpret_cast<Node*>((*allocator_)(sizeof(Node)));
+ if (result != NULL) {
+ memset(result, 0, sizeof(*result));
+ }
+ return result;
+ }
+
+ public:
+ typedef uintptr_t Number;
+
+ void init(void* (*allocator)(size_t)) {
+ allocator_ = allocator;
+ root_ = NewNode();
+ }
+
+ void* get(Number k) const {
+ ASSERT(k >> BITS == 0);
+ const Number i1 = k >> (LEAF_BITS + INTERIOR_BITS);
+ const Number i2 = (k >> LEAF_BITS) & (INTERIOR_LENGTH-1);
+ const Number i3 = k & (LEAF_LENGTH-1);
+ return reinterpret_cast<Leaf*>(root_->ptrs[i1]->ptrs[i2])->values[i3];
+ }
+
+ void set(Number k, void* v) {
+ ASSERT(k >> BITS == 0);
+ const Number i1 = k >> (LEAF_BITS + INTERIOR_BITS);
+ const Number i2 = (k >> LEAF_BITS) & (INTERIOR_LENGTH-1);
+ const Number i3 = k & (LEAF_LENGTH-1);
+ reinterpret_cast<Leaf*>(root_->ptrs[i1]->ptrs[i2])->values[i3] = v;
+ }
+
+ bool Ensure(Number start, size_t n) {
+ for (Number key = start; key <= start + n - 1; ) {
+ const Number i1 = key >> (LEAF_BITS + INTERIOR_BITS);
+ const Number i2 = (key >> LEAF_BITS) & (INTERIOR_LENGTH-1);
+
+ // Make 2nd level node if necessary
+ if (root_->ptrs[i1] == NULL) {
+ Node* n = NewNode();
+ if (n == NULL) return false;
+ root_->ptrs[i1] = n;
+ }
+
+ // Make leaf node if necessary
+ if (root_->ptrs[i1]->ptrs[i2] == NULL) {
+ Leaf* leaf = reinterpret_cast<Leaf*>((*allocator_)(sizeof(Leaf)));
+ if (leaf == NULL) return false;
+ memset(leaf, 0, sizeof(*leaf));
+ root_->ptrs[i1]->ptrs[i2] = reinterpret_cast<Node*>(leaf);
+ }
+
+ // Advance key past whatever is covered by this leaf node
+ key = ((key >> LEAF_BITS) + 1) << LEAF_BITS;
+ }
+ return true;
+ }
+
+ void PreallocateMoreMemory() {
+ }
+
+#ifdef WTF_CHANGES
+ template<class Visitor, class MemoryReader>
+ void visitValues(Visitor& visitor, const MemoryReader& reader) {
+ Node* root = reader(root_);
+ for (int i = 0; i < INTERIOR_LENGTH; i++) {
+ if (!root->ptrs[i])
+ continue;
+
+ Node* n = reader(root->ptrs[i]);
+ for (int j = 0; j < INTERIOR_LENGTH; j++) {
+ if (!n->ptrs[j])
+ continue;
+
+ Leaf* l = reader(reinterpret_cast<Leaf*>(n->ptrs[j]));
+ for (int k = 0; k < LEAF_LENGTH; k += visitor.visit(l->values[k]))
+ ;
+ }
+ }
+ }
+
+ template<class Visitor, class MemoryReader>
+ void visitAllocations(Visitor& visitor, const MemoryReader& reader) {
+ visitor.visit(root_, sizeof(Node));
+
+ Node* root = reader(root_);
+ for (int i = 0; i < INTERIOR_LENGTH; i++) {
+ if (!root->ptrs[i])
+ continue;
+
+ visitor.visit(root->ptrs[i], sizeof(Node));
+ Node* n = reader(root->ptrs[i]);
+ for (int j = 0; j < INTERIOR_LENGTH; j++) {
+ if (!n->ptrs[j])
+ continue;
+
+ visitor.visit(n->ptrs[j], sizeof(Leaf));
+ }
+ }
+ }
+#endif
+};
+
+#endif // TCMALLOC_PAGEMAP_H__
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/TCSpinLock.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/TCSpinLock.h
new file mode 100644
index 0000000..74c02f3
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/TCSpinLock.h
@@ -0,0 +1,239 @@
+// Copyright (c) 2005, 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// ---
+// Author: Sanjay Ghemawat <opensource@google.com>
+
+#ifndef TCMALLOC_INTERNAL_SPINLOCK_H__
+#define TCMALLOC_INTERNAL_SPINLOCK_H__
+
+#if (PLATFORM(X86) || PLATFORM(PPC)) && (COMPILER(GCC) || COMPILER(MSVC))
+
+#include <time.h> /* For nanosleep() */
+
+#if !PLATFORM(WIN_OS)
+#include <sched.h> /* For sched_yield() */
+#endif
+
+#if HAVE(STDINT_H)
+#include <stdint.h>
+#elif HAVE(INTTYPES_H)
+#include <inttypes.h>
+#else
+#include <sys/types.h>
+#endif
+
+#if PLATFORM(WIN_OS)
+#ifndef WIN32_LEAN_AND_MEAN
+#define WIN32_LEAN_AND_MEAN
+#endif
+#include <windows.h>
+#endif
+
+static void TCMalloc_SlowLock(volatile unsigned int* lockword);
+
+// The following is a struct so that it can be initialized at compile time
+struct TCMalloc_SpinLock {
+
+ inline void Lock() {
+ int r;
+#if COMPILER(GCC)
+#if PLATFORM(X86)
+ __asm__ __volatile__
+ ("xchgl %0, %1"
+ : "=r"(r), "=m"(lockword_)
+ : "0"(1), "m"(lockword_)
+ : "memory");
+#else
+ volatile unsigned int *lockword_ptr = &lockword_;
+ __asm__ __volatile__
+ ("1: lwarx %0, 0, %1\n\t"
+ "stwcx. %2, 0, %1\n\t"
+ "bne- 1b\n\t"
+ "isync"
+ : "=&r" (r), "=r" (lockword_ptr)
+ : "r" (1), "1" (lockword_ptr)
+ : "memory");
+#endif
+#elif COMPILER(MSVC)
+ __asm {
+ mov eax, this ; store &lockword_ (which is this+0) in eax
+ mov ebx, 1 ; store 1 in ebx
+ xchg [eax], ebx ; exchange lockword_ and 1
+ mov r, ebx ; store old value of lockword_ in r
+ }
+#endif
+ if (r) TCMalloc_SlowLock(&lockword_);
+ }
+
+ inline void Unlock() {
+#if COMPILER(GCC)
+#if PLATFORM(X86)
+ __asm__ __volatile__
+ ("movl $0, %0"
+ : "=m"(lockword_)
+ : "m" (lockword_)
+ : "memory");
+#else
+ __asm__ __volatile__
+ ("isync\n\t"
+ "eieio\n\t"
+ "stw %1, %0"
+#if PLATFORM(DARWIN) || PLATFORM(PPC)
+ : "=o" (lockword_)
+#else
+ : "=m" (lockword_)
+#endif
+ : "r" (0)
+ : "memory");
+#endif
+#elif COMPILER(MSVC)
+ __asm {
+ mov eax, this ; store &lockword_ (which is this+0) in eax
+ mov [eax], 0 ; set lockword_ to 0
+ }
+#endif
+ }
+ // Report if we think the lock can be held by this thread.
+ // When the lock is truly held by the invoking thread
+ // we will always return true.
+ // Indended to be used as CHECK(lock.IsHeld());
+ inline bool IsHeld() const {
+ return lockword_ != 0;
+ }
+
+ inline void Init() { lockword_ = 0; }
+
+ volatile unsigned int lockword_;
+};
+
+#define SPINLOCK_INITIALIZER { 0 }
+
+static void TCMalloc_SlowLock(volatile unsigned int* lockword) {
+#if !PLATFORM(WIN_OS)
+ sched_yield(); // Yield immediately since fast path failed
+#else
+ SwitchToThread();
+#endif
+ while (true) {
+ int r;
+#if COMPILER(GCC)
+#if PLATFORM(X86)
+ __asm__ __volatile__
+ ("xchgl %0, %1"
+ : "=r"(r), "=m"(*lockword)
+ : "0"(1), "m"(*lockword)
+ : "memory");
+
+#else
+ int tmp = 1;
+ __asm__ __volatile__
+ ("1: lwarx %0, 0, %1\n\t"
+ "stwcx. %2, 0, %1\n\t"
+ "bne- 1b\n\t"
+ "isync"
+ : "=&r" (r), "=r" (lockword)
+ : "r" (tmp), "1" (lockword)
+ : "memory");
+#endif
+#elif COMPILER(MSVC)
+ __asm {
+ mov eax, lockword ; assign lockword into eax
+ mov ebx, 1 ; assign 1 into ebx
+ xchg [eax], ebx ; exchange *lockword and 1
+ mov r, ebx ; store old value of *lockword in r
+ }
+#endif
+ if (!r) {
+ return;
+ }
+
+ // This code was adapted from the ptmalloc2 implementation of
+ // spinlocks which would sched_yield() upto 50 times before
+ // sleeping once for a few milliseconds. Mike Burrows suggested
+ // just doing one sched_yield() outside the loop and always
+ // sleeping after that. This change helped a great deal on the
+ // performance of spinlocks under high contention. A test program
+ // with 10 threads on a dual Xeon (four virtual processors) went
+ // from taking 30 seconds to 16 seconds.
+
+ // Sleep for a few milliseconds
+#if PLATFORM(WIN_OS)
+ Sleep(2);
+#else
+ struct timespec tm;
+ tm.tv_sec = 0;
+ tm.tv_nsec = 2000001;
+ nanosleep(&tm, NULL);
+#endif
+ }
+}
+
+#else
+
+#include <pthread.h>
+
+// Portable version
+struct TCMalloc_SpinLock {
+ pthread_mutex_t private_lock_;
+
+ inline void Init() {
+ if (pthread_mutex_init(&private_lock_, NULL) != 0) CRASH();
+ }
+ inline void Finalize() {
+ if (pthread_mutex_destroy(&private_lock_) != 0) CRASH();
+ }
+ inline void Lock() {
+ if (pthread_mutex_lock(&private_lock_) != 0) CRASH();
+ }
+ inline void Unlock() {
+ if (pthread_mutex_unlock(&private_lock_) != 0) CRASH();
+ }
+};
+
+#define SPINLOCK_INITIALIZER { PTHREAD_MUTEX_INITIALIZER }
+
+#endif
+
+// Corresponding locker object that arranges to acquire a spinlock for
+// the duration of a C++ scope.
+class TCMalloc_SpinLockHolder {
+ private:
+ TCMalloc_SpinLock* lock_;
+ public:
+ inline explicit TCMalloc_SpinLockHolder(TCMalloc_SpinLock* l)
+ : lock_(l) { l->Lock(); }
+ inline ~TCMalloc_SpinLockHolder() { lock_->Unlock(); }
+};
+
+// Short-hands for convenient use by tcmalloc.cc
+typedef TCMalloc_SpinLock SpinLock;
+typedef TCMalloc_SpinLockHolder SpinLockHolder;
+
+#endif // TCMALLOC_INTERNAL_SPINLOCK_H__
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/TCSystemAlloc.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/TCSystemAlloc.cpp
new file mode 100644
index 0000000..478ce63
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/TCSystemAlloc.cpp
@@ -0,0 +1,469 @@
+// Copyright (c) 2005, 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// ---
+// Author: Sanjay Ghemawat
+
+#include "config.h"
+#if HAVE(STDINT_H)
+#include <stdint.h>
+#elif HAVE(INTTYPES_H)
+#include <inttypes.h>
+#else
+#include <sys/types.h>
+#endif
+#if PLATFORM(WIN_OS)
+#include "windows.h"
+#else
+#include <errno.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#endif
+#include <fcntl.h>
+#include "Assertions.h"
+#include "TCSystemAlloc.h"
+#include "TCSpinLock.h"
+#include "UnusedParam.h"
+
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+// Structure for discovering alignment
+union MemoryAligner {
+ void* p;
+ double d;
+ size_t s;
+};
+
+static SpinLock spinlock = SPINLOCK_INITIALIZER;
+
+// Page size is initialized on demand
+static size_t pagesize = 0;
+
+// Configuration parameters.
+//
+// if use_devmem is true, either use_sbrk or use_mmap must also be true.
+// For 2.2 kernels, it looks like the sbrk address space (500MBish) and
+// the mmap address space (1300MBish) are disjoint, so we need both allocators
+// to get as much virtual memory as possible.
+#ifndef WTF_CHANGES
+static bool use_devmem = false;
+#endif
+
+#if HAVE(SBRK)
+static bool use_sbrk = false;
+#endif
+
+#if HAVE(MMAP)
+static bool use_mmap = true;
+#endif
+
+#if HAVE(VIRTUALALLOC)
+static bool use_VirtualAlloc = true;
+#endif
+
+// Flags to keep us from retrying allocators that failed.
+static bool devmem_failure = false;
+static bool sbrk_failure = false;
+static bool mmap_failure = false;
+static bool VirtualAlloc_failure = false;
+
+#ifndef WTF_CHANGES
+DEFINE_int32(malloc_devmem_start, 0,
+ "Physical memory starting location in MB for /dev/mem allocation."
+ " Setting this to 0 disables /dev/mem allocation");
+DEFINE_int32(malloc_devmem_limit, 0,
+ "Physical memory limit location in MB for /dev/mem allocation."
+ " Setting this to 0 means no limit.");
+#else
+static const int32_t FLAGS_malloc_devmem_start = 0;
+static const int32_t FLAGS_malloc_devmem_limit = 0;
+#endif
+
+#if HAVE(SBRK)
+
+static void* TrySbrk(size_t size, size_t *actual_size, size_t alignment) {
+ size = ((size + alignment - 1) / alignment) * alignment;
+
+ // could theoretically return the "extra" bytes here, but this
+ // is simple and correct.
+ if (actual_size)
+ *actual_size = size;
+
+ void* result = sbrk(size);
+ if (result == reinterpret_cast<void*>(-1)) {
+ sbrk_failure = true;
+ return NULL;
+ }
+
+ // Is it aligned?
+ uintptr_t ptr = reinterpret_cast<uintptr_t>(result);
+ if ((ptr & (alignment-1)) == 0) return result;
+
+ // Try to get more memory for alignment
+ size_t extra = alignment - (ptr & (alignment-1));
+ void* r2 = sbrk(extra);
+ if (reinterpret_cast<uintptr_t>(r2) == (ptr + size)) {
+ // Contiguous with previous result
+ return reinterpret_cast<void*>(ptr + extra);
+ }
+
+ // Give up and ask for "size + alignment - 1" bytes so
+ // that we can find an aligned region within it.
+ result = sbrk(size + alignment - 1);
+ if (result == reinterpret_cast<void*>(-1)) {
+ sbrk_failure = true;
+ return NULL;
+ }
+ ptr = reinterpret_cast<uintptr_t>(result);
+ if ((ptr & (alignment-1)) != 0) {
+ ptr += alignment - (ptr & (alignment-1));
+ }
+ return reinterpret_cast<void*>(ptr);
+}
+
+#endif /* HAVE(SBRK) */
+
+#if HAVE(MMAP)
+
+static void* TryMmap(size_t size, size_t *actual_size, size_t alignment) {
+ // Enforce page alignment
+ if (pagesize == 0) pagesize = getpagesize();
+ if (alignment < pagesize) alignment = pagesize;
+ size = ((size + alignment - 1) / alignment) * alignment;
+
+ // could theoretically return the "extra" bytes here, but this
+ // is simple and correct.
+ if (actual_size)
+ *actual_size = size;
+
+ // Ask for extra memory if alignment > pagesize
+ size_t extra = 0;
+ if (alignment > pagesize) {
+ extra = alignment - pagesize;
+ }
+ void* result = mmap(NULL, size + extra,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE|MAP_ANONYMOUS,
+ -1, 0);
+ if (result == reinterpret_cast<void*>(MAP_FAILED)) {
+ mmap_failure = true;
+ return NULL;
+ }
+
+ // Adjust the return memory so it is aligned
+ uintptr_t ptr = reinterpret_cast<uintptr_t>(result);
+ size_t adjust = 0;
+ if ((ptr & (alignment - 1)) != 0) {
+ adjust = alignment - (ptr & (alignment - 1));
+ }
+
+ // Return the unused memory to the system
+ if (adjust > 0) {
+ munmap(reinterpret_cast<void*>(ptr), adjust);
+ }
+ if (adjust < extra) {
+ munmap(reinterpret_cast<void*>(ptr + adjust + size), extra - adjust);
+ }
+
+ ptr += adjust;
+ return reinterpret_cast<void*>(ptr);
+}
+
+#endif /* HAVE(MMAP) */
+
+#if HAVE(VIRTUALALLOC)
+
+static void* TryVirtualAlloc(size_t size, size_t *actual_size, size_t alignment) {
+ // Enforce page alignment
+ if (pagesize == 0) {
+ SYSTEM_INFO system_info;
+ GetSystemInfo(&system_info);
+ pagesize = system_info.dwPageSize;
+ }
+
+ if (alignment < pagesize) alignment = pagesize;
+ size = ((size + alignment - 1) / alignment) * alignment;
+
+ // could theoretically return the "extra" bytes here, but this
+ // is simple and correct.
+ if (actual_size)
+ *actual_size = size;
+
+ // Ask for extra memory if alignment > pagesize
+ size_t extra = 0;
+ if (alignment > pagesize) {
+ extra = alignment - pagesize;
+ }
+ void* result = VirtualAlloc(NULL, size + extra,
+ MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN,
+ PAGE_READWRITE);
+
+ if (result == NULL) {
+ VirtualAlloc_failure = true;
+ return NULL;
+ }
+
+ // Adjust the return memory so it is aligned
+ uintptr_t ptr = reinterpret_cast<uintptr_t>(result);
+ size_t adjust = 0;
+ if ((ptr & (alignment - 1)) != 0) {
+ adjust = alignment - (ptr & (alignment - 1));
+ }
+
+ // Return the unused memory to the system - we'd like to release but the best we can do
+ // is decommit, since Windows only lets you free the whole allocation.
+ if (adjust > 0) {
+ VirtualFree(reinterpret_cast<void*>(ptr), adjust, MEM_DECOMMIT);
+ }
+ if (adjust < extra) {
+ VirtualFree(reinterpret_cast<void*>(ptr + adjust + size), extra-adjust, MEM_DECOMMIT);
+ }
+
+ ptr += adjust;
+ return reinterpret_cast<void*>(ptr);
+}
+
+#endif /* HAVE(MMAP) */
+
+#ifndef WTF_CHANGES
+static void* TryDevMem(size_t size, size_t *actual_size, size_t alignment) {
+ static bool initialized = false;
+ static off_t physmem_base; // next physical memory address to allocate
+ static off_t physmem_limit; // maximum physical address allowed
+ static int physmem_fd; // file descriptor for /dev/mem
+
+ // Check if we should use /dev/mem allocation. Note that it may take
+ // a while to get this flag initialized, so meanwhile we fall back to
+ // the next allocator. (It looks like 7MB gets allocated before
+ // this flag gets initialized -khr.)
+ if (FLAGS_malloc_devmem_start == 0) {
+ // NOTE: not a devmem_failure - we'd like TCMalloc_SystemAlloc to
+ // try us again next time.
+ return NULL;
+ }
+
+ if (!initialized) {
+ physmem_fd = open("/dev/mem", O_RDWR);
+ if (physmem_fd < 0) {
+ devmem_failure = true;
+ return NULL;
+ }
+ physmem_base = FLAGS_malloc_devmem_start*1024LL*1024LL;
+ physmem_limit = FLAGS_malloc_devmem_limit*1024LL*1024LL;
+ initialized = true;
+ }
+
+ // Enforce page alignment
+ if (pagesize == 0) pagesize = getpagesize();
+ if (alignment < pagesize) alignment = pagesize;
+ size = ((size + alignment - 1) / alignment) * alignment;
+
+ // could theoretically return the "extra" bytes here, but this
+ // is simple and correct.
+ if (actual_size)
+ *actual_size = size;
+
+ // Ask for extra memory if alignment > pagesize
+ size_t extra = 0;
+ if (alignment > pagesize) {
+ extra = alignment - pagesize;
+ }
+
+ // check to see if we have any memory left
+ if (physmem_limit != 0 && physmem_base + size + extra > physmem_limit) {
+ devmem_failure = true;
+ return NULL;
+ }
+ void *result = mmap(0, size + extra, PROT_READ | PROT_WRITE,
+ MAP_SHARED, physmem_fd, physmem_base);
+ if (result == reinterpret_cast<void*>(MAP_FAILED)) {
+ devmem_failure = true;
+ return NULL;
+ }
+ uintptr_t ptr = reinterpret_cast<uintptr_t>(result);
+
+ // Adjust the return memory so it is aligned
+ size_t adjust = 0;
+ if ((ptr & (alignment - 1)) != 0) {
+ adjust = alignment - (ptr & (alignment - 1));
+ }
+
+ // Return the unused virtual memory to the system
+ if (adjust > 0) {
+ munmap(reinterpret_cast<void*>(ptr), adjust);
+ }
+ if (adjust < extra) {
+ munmap(reinterpret_cast<void*>(ptr + adjust + size), extra - adjust);
+ }
+
+ ptr += adjust;
+ physmem_base += adjust + size;
+
+ return reinterpret_cast<void*>(ptr);
+}
+#endif
+
+void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size, size_t alignment) {
+ // Discard requests that overflow
+ if (size + alignment < size) return NULL;
+
+ SpinLockHolder lock_holder(&spinlock);
+
+ // Enforce minimum alignment
+ if (alignment < sizeof(MemoryAligner)) alignment = sizeof(MemoryAligner);
+
+ // Try twice, once avoiding allocators that failed before, and once
+ // more trying all allocators even if they failed before.
+ for (int i = 0; i < 2; i++) {
+
+#ifndef WTF_CHANGES
+ if (use_devmem && !devmem_failure) {
+ void* result = TryDevMem(size, actual_size, alignment);
+ if (result != NULL) return result;
+ }
+#endif
+
+#if HAVE(SBRK)
+ if (use_sbrk && !sbrk_failure) {
+ void* result = TrySbrk(size, actual_size, alignment);
+ if (result != NULL) return result;
+ }
+#endif
+
+#if HAVE(MMAP)
+ if (use_mmap && !mmap_failure) {
+ void* result = TryMmap(size, actual_size, alignment);
+ if (result != NULL) return result;
+ }
+#endif
+
+#if HAVE(VIRTUALALLOC)
+ if (use_VirtualAlloc && !VirtualAlloc_failure) {
+ void* result = TryVirtualAlloc(size, actual_size, alignment);
+ if (result != NULL) return result;
+ }
+#endif
+
+ // nothing worked - reset failure flags and try again
+ devmem_failure = false;
+ sbrk_failure = false;
+ mmap_failure = false;
+ VirtualAlloc_failure = false;
+ }
+ return NULL;
+}
+
+#if HAVE(MADV_FREE_REUSE)
+
+void TCMalloc_SystemRelease(void* start, size_t length)
+{
+ while (madvise(start, length, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN) { }
+}
+
+#elif HAVE(MADV_FREE) || HAVE(MADV_DONTNEED)
+
+void TCMalloc_SystemRelease(void* start, size_t length)
+{
+ // MADV_FREE clears the modified bit on pages, which allows
+ // them to be discarded immediately.
+#if HAVE(MADV_FREE)
+ const int advice = MADV_FREE;
+#else
+ const int advice = MADV_DONTNEED;
+#endif
+ if (FLAGS_malloc_devmem_start) {
+ // It's not safe to use MADV_DONTNEED if we've been mapping
+ // /dev/mem for heap memory
+ return;
+ }
+ if (pagesize == 0) pagesize = getpagesize();
+ const size_t pagemask = pagesize - 1;
+
+ size_t new_start = reinterpret_cast<size_t>(start);
+ size_t end = new_start + length;
+ size_t new_end = end;
+
+ // Round up the starting address and round down the ending address
+ // to be page aligned:
+ new_start = (new_start + pagesize - 1) & ~pagemask;
+ new_end = new_end & ~pagemask;
+
+ ASSERT((new_start & pagemask) == 0);
+ ASSERT((new_end & pagemask) == 0);
+ ASSERT(new_start >= reinterpret_cast<size_t>(start));
+ ASSERT(new_end <= end);
+
+ if (new_end > new_start) {
+ // Note -- ignoring most return codes, because if this fails it
+ // doesn't matter...
+ while (madvise(reinterpret_cast<char*>(new_start), new_end - new_start,
+ advice) == -1 &&
+ errno == EAGAIN) {
+ // NOP
+ }
+ }
+}
+
+#elif HAVE(MMAP)
+
+void TCMalloc_SystemRelease(void* start, size_t length)
+{
+ void* newAddress = mmap(start, length, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
+ // If the mmap failed then that's ok, we just won't return the memory to the system.
+ ASSERT_UNUSED(newAddress, newAddress == start || newAddress == reinterpret_cast<void*>(MAP_FAILED));
+}
+
+#else
+
+// Platforms that don't support returning memory use an empty inline version of TCMalloc_SystemRelease
+// declared in TCSystemAlloc.h
+
+#endif
+
+#if HAVE(MADV_FREE_REUSE)
+
+void TCMalloc_SystemCommit(void* start, size_t length)
+{
+ while (madvise(start, length, MADV_FREE_REUSE) == -1 && errno == EAGAIN) { }
+}
+
+#elif HAVE(VIRTUALALLOC)
+
+void TCMalloc_SystemCommit(void*, size_t)
+{
+}
+
+#else
+
+// Platforms that don't need to explicitly commit memory use an empty inline version of TCMalloc_SystemCommit
+// declared in TCSystemAlloc.h
+
+#endif
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/TCSystemAlloc.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/TCSystemAlloc.h
new file mode 100644
index 0000000..8e3a01a
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/TCSystemAlloc.h
@@ -0,0 +1,75 @@
+// Copyright (c) 2005, 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// ---
+// Author: Sanjay Ghemawat
+//
+// Routine that uses sbrk/mmap to allocate memory from the system.
+// Useful for implementing malloc.
+
+#ifndef TCMALLOC_SYSTEM_ALLOC_H__
+#define TCMALLOC_SYSTEM_ALLOC_H__
+
+// REQUIRES: "alignment" is a power of two or "0" to indicate default alignment
+//
+// Allocate and return "N" bytes of zeroed memory.
+//
+// If actual_bytes is NULL then the returned memory is exactly the
+// requested size. If actual bytes is non-NULL then the allocator
+// may optionally return more bytes than asked for (i.e. return an
+// entire "huge" page if a huge page allocator is in use).
+//
+// The returned pointer is a multiple of "alignment" if non-zero.
+//
+// Returns NULL when out of memory.
+extern void* TCMalloc_SystemAlloc(size_t bytes, size_t *actual_bytes,
+ size_t alignment = 0);
+
+// This call is a hint to the operating system that the pages
+// contained in the specified range of memory will not be used for a
+// while, and can be released for use by other processes or the OS.
+// Pages which are released in this way may be destroyed (zeroed) by
+// the OS. The benefit of this function is that it frees memory for
+// use by the system, the cost is that the pages are faulted back into
+// the address space next time they are touched, which can impact
+// performance. (Only pages fully covered by the memory region will
+// be released, partial pages will not.)
+extern void TCMalloc_SystemRelease(void* start, size_t length);
+
+extern void TCMalloc_SystemCommit(void* start, size_t length);
+
+#if !HAVE(MADV_FREE_REUSE) && !HAVE(MADV_DONTNEED) && !HAVE(MMAP)
+inline void TCMalloc_SystemRelease(void*, size_t) { }
+#endif
+
+#if !HAVE(VIRTUALALLOC) && !HAVE(MADV_FREE_REUSE)
+inline void TCMalloc_SystemCommit(void*, size_t) { }
+#endif
+
+#endif /* TCMALLOC_SYSTEM_ALLOC_H__ */
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ThreadSpecific.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ThreadSpecific.h
new file mode 100644
index 0000000..4d5d2f7
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ThreadSpecific.h
@@ -0,0 +1,266 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2009 Jian Li <jianli@chromium.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Thread local storage is implemented by using either pthread API or Windows
+ * native API. There is subtle semantic discrepancy for the cleanup function
+ * implementation as noted below:
+ * @ In pthread implementation, the destructor function will be called
+ * repeatedly if there is still non-NULL value associated with the function.
+ * @ In Windows native implementation, the destructor function will be called
+ * only once.
+ * This semantic discrepancy does not impose any problem because nowhere in
+ * WebKit the repeated call bahavior is utilized.
+ */
+
+#ifndef WTF_ThreadSpecific_h
+#define WTF_ThreadSpecific_h
+
+#include <wtf/Noncopyable.h>
+
+#if USE(PTHREADS)
+#include <pthread.h>
+#elif PLATFORM(QT)
+#include <QThreadStorage>
+#elif PLATFORM(WIN_OS)
+#include <windows.h>
+#endif
+
+namespace WTF {
+
+#if !USE(PTHREADS) && !PLATFORM(QT) && PLATFORM(WIN_OS)
+// ThreadSpecificThreadExit should be called each time when a thread is detached.
+// This is done automatically for threads created with WTF::createThread.
+void ThreadSpecificThreadExit();
+#endif
+
+template<typename T> class ThreadSpecific : public Noncopyable {
+public:
+ ThreadSpecific();
+ T* operator->();
+ operator T*();
+ T& operator*();
+ ~ThreadSpecific();
+
+private:
+#if !USE(PTHREADS) && !PLATFORM(QT) && PLATFORM(WIN_OS)
+ friend void ThreadSpecificThreadExit();
+#endif
+
+ T* get();
+ void set(T*);
+ void static destroy(void* ptr);
+
+#if USE(PTHREADS) || PLATFORM(QT) || PLATFORM(WIN_OS)
+ struct Data : Noncopyable {
+ Data(T* value, ThreadSpecific<T>* owner) : value(value), owner(owner) {}
+
+ T* value;
+ ThreadSpecific<T>* owner;
+#if !USE(PTHREADS)
+ void (*destructor)(void*);
+#endif
+ };
+#endif
+
+#if USE(PTHREADS)
+ pthread_key_t m_key;
+#elif PLATFORM(QT)
+ QThreadStorage<Data*> m_key;
+#elif PLATFORM(WIN_OS)
+ int m_index;
+#endif
+};
+
+#if USE(PTHREADS)
+template<typename T>
+inline ThreadSpecific<T>::ThreadSpecific()
+{
+ int error = pthread_key_create(&m_key, destroy);
+ if (error)
+ CRASH();
+}
+
+template<typename T>
+inline ThreadSpecific<T>::~ThreadSpecific()
+{
+ pthread_key_delete(m_key); // Does not invoke destructor functions.
+}
+
+template<typename T>
+inline T* ThreadSpecific<T>::get()
+{
+ Data* data = static_cast<Data*>(pthread_getspecific(m_key));
+ return data ? data->value : 0;
+}
+
+template<typename T>
+inline void ThreadSpecific<T>::set(T* ptr)
+{
+ ASSERT(!get());
+ pthread_setspecific(m_key, new Data(ptr, this));
+}
+
+#elif PLATFORM(QT)
+
+template<typename T>
+inline ThreadSpecific<T>::ThreadSpecific()
+{
+}
+
+template<typename T>
+inline ThreadSpecific<T>::~ThreadSpecific()
+{
+ Data* data = static_cast<Data*>(m_key.localData());
+ if (data)
+ data->destructor(data);
+}
+
+template<typename T>
+inline T* ThreadSpecific<T>::get()
+{
+ Data* data = static_cast<Data*>(m_key.localData());
+ return data ? data->value : 0;
+}
+
+template<typename T>
+inline void ThreadSpecific<T>::set(T* ptr)
+{
+ ASSERT(!get());
+ Data* data = new Data(ptr, this);
+ data->destructor = &ThreadSpecific<T>::destroy;
+ m_key.setLocalData(data);
+}
+
+#elif PLATFORM(WIN_OS)
+
+// The maximum number of TLS keys that can be created. For simplification, we assume that:
+// 1) Once the instance of ThreadSpecific<> is created, it will not be destructed until the program dies.
+// 2) We do not need to hold many instances of ThreadSpecific<> data. This fixed number should be far enough.
+const int kMaxTlsKeySize = 256;
+
+long& tlsKeyCount();
+DWORD* tlsKeys();
+
+template<typename T>
+inline ThreadSpecific<T>::ThreadSpecific()
+ : m_index(-1)
+{
+ DWORD tls_key = TlsAlloc();
+ if (tls_key == TLS_OUT_OF_INDEXES)
+ CRASH();
+
+ m_index = InterlockedIncrement(&tlsKeyCount()) - 1;
+ if (m_index >= kMaxTlsKeySize)
+ CRASH();
+ tlsKeys()[m_index] = tls_key;
+}
+
+template<typename T>
+inline ThreadSpecific<T>::~ThreadSpecific()
+{
+ // Does not invoke destructor functions. They will be called from ThreadSpecificThreadExit when the thread is detached.
+ TlsFree(tlsKeys()[m_index]);
+}
+
+template<typename T>
+inline T* ThreadSpecific<T>::get()
+{
+ Data* data = static_cast<Data*>(TlsGetValue(tlsKeys()[m_index]));
+ return data ? data->value : 0;
+}
+
+template<typename T>
+inline void ThreadSpecific<T>::set(T* ptr)
+{
+ ASSERT(!get());
+ Data* data = new Data(ptr, this);
+ data->destructor = &ThreadSpecific<T>::destroy;
+ TlsSetValue(tlsKeys()[m_index], data);
+}
+
+#else
+#error ThreadSpecific is not implemented for this platform.
+#endif
+
+template<typename T>
+inline void ThreadSpecific<T>::destroy(void* ptr)
+{
+ Data* data = static_cast<Data*>(ptr);
+
+#if USE(PTHREADS)
+ // We want get() to keep working while data destructor works, because it can be called indirectly by the destructor.
+ // Some pthreads implementations zero out the pointer before calling destroy(), so we temporarily reset it.
+ pthread_setspecific(data->owner->m_key, ptr);
+#endif
+
+ data->value->~T();
+ fastFree(data->value);
+
+#if USE(PTHREADS)
+ pthread_setspecific(data->owner->m_key, 0);
+#elif PLATFORM(QT)
+ data->owner->m_key.setLocalData(0);
+#elif PLATFORM(WIN_OS)
+ TlsSetValue(tlsKeys()[data->owner->m_index], 0);
+#else
+#error ThreadSpecific is not implemented for this platform.
+#endif
+
+ delete data;
+}
+
+template<typename T>
+inline ThreadSpecific<T>::operator T*()
+{
+ T* ptr = static_cast<T*>(get());
+ if (!ptr) {
+ // Set up thread-specific value's memory pointer before invoking constructor, in case any function it calls
+ // needs to access the value, to avoid recursion.
+ ptr = static_cast<T*>(fastMalloc(sizeof(T)));
+ set(ptr);
+ new (ptr) T;
+ }
+ return ptr;
+}
+
+template<typename T>
+inline T* ThreadSpecific<T>::operator->()
+{
+ return operator T*();
+}
+
+template<typename T>
+inline T& ThreadSpecific<T>::operator*()
+{
+ return *operator T*();
+}
+
+}
+
+#endif
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ThreadSpecificWin.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ThreadSpecificWin.cpp
new file mode 100644
index 0000000..f2c0cad
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ThreadSpecificWin.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2009 Jian Li <jianli@chromium.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include "config.h"
+
+#include "ThreadSpecific.h"
+#include <wtf/Noncopyable.h>
+
+#if USE(PTHREADS)
+#error This file should not be compiled by ports that do not use Windows native ThreadSpecific implementation.
+#endif
+
+namespace WTF {
+
+long& tlsKeyCount()
+{
+ static long count;
+ return count;
+}
+
+DWORD* tlsKeys()
+{
+ static DWORD keys[kMaxTlsKeySize];
+ return keys;
+}
+
+void ThreadSpecificThreadExit()
+{
+ for (long i = 0; i < tlsKeyCount(); i++) {
+ // The layout of ThreadSpecific<T>::Data does not depend on T. So we are safe to do the static cast to ThreadSpecific<int> in order to access its data member.
+ ThreadSpecific<int>::Data* data = static_cast<ThreadSpecific<int>::Data*>(TlsGetValue(tlsKeys()[i]));
+ if (data)
+ data->destructor(data);
+ }
+}
+
+} // namespace WTF
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Threading.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Threading.cpp
new file mode 100644
index 0000000..56bf438
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Threading.cpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "Threading.h"
+
+#include <string.h>
+
+namespace WTF {
+
+struct NewThreadContext : FastAllocBase {
+ NewThreadContext(ThreadFunction entryPoint, void* data, const char* name)
+ : entryPoint(entryPoint)
+ , data(data)
+ , name(name)
+ {
+ }
+
+ ThreadFunction entryPoint;
+ void* data;
+ const char* name;
+
+ Mutex creationMutex;
+};
+
+static void* threadEntryPoint(void* contextData)
+{
+ NewThreadContext* context = reinterpret_cast<NewThreadContext*>(contextData);
+
+ setThreadNameInternal(context->name);
+
+ // Block until our creating thread has completed any extra setup work
+ {
+ MutexLocker locker(context->creationMutex);
+ }
+
+ // Grab the info that we need out of the context, then deallocate it.
+ ThreadFunction entryPoint = context->entryPoint;
+ void* data = context->data;
+ delete context;
+
+ return entryPoint(data);
+}
+
+ThreadIdentifier createThread(ThreadFunction entryPoint, void* data, const char* name)
+{
+ // Visual Studio has a 31-character limit on thread names. Longer names will
+ // be truncated silently, but we'd like callers to know about the limit.
+#if !LOG_DISABLED
+ if (strlen(name) > 31)
+ LOG_ERROR("Thread name \"%s\" is longer than 31 characters and will be truncated by Visual Studio", name);
+#endif
+
+ NewThreadContext* context = new NewThreadContext(entryPoint, data, name);
+
+ // Prevent the thread body from executing until we've established the thread identifier
+ MutexLocker locker(context->creationMutex);
+
+ return createThreadInternal(threadEntryPoint, context, name);
+}
+
+#if PLATFORM(MAC) || PLATFORM(WIN)
+
+// This function is deprecated but needs to be kept around for backward
+// compatibility. Use the 3-argument version of createThread above.
+
+ThreadIdentifier createThread(ThreadFunction entryPoint, void* data);
+
+ThreadIdentifier createThread(ThreadFunction entryPoint, void* data)
+{
+ return createThread(entryPoint, data, 0);
+}
+#endif
+
+} // namespace WTF
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Threading.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Threading.h
new file mode 100644
index 0000000..6578151
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Threading.h
@@ -0,0 +1,332 @@
+/*
+ * Copyright (C) 2007, 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Note: The implementations of InterlockedIncrement and InterlockedDecrement are based
+ * on atomic_increment and atomic_exchange_and_add from the Boost C++ Library. The license
+ * is virtually identical to the Apple license above but is included here for completeness.
+ *
+ * Boost Software License - Version 1.0 - August 17th, 2003
+ *
+ * Permission is hereby granted, free of charge, to any person or organization
+ * obtaining a copy of the software and accompanying documentation covered by
+ * this license (the "Software") to use, reproduce, display, distribute,
+ * execute, and transmit the Software, and to prepare derivative works of the
+ * Software, and to permit third-parties to whom the Software is furnished to
+ * do so, all subject to the following:
+ *
+ * The copyright notices in the Software and this entire statement, including
+ * the above license grant, this restriction and the following disclaimer,
+ * must be included in all copies of the Software, in whole or in part, and
+ * all derivative works of the Software, unless such copies or derivative
+ * works are solely in the form of machine-executable object code generated by
+ * a source language processor.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+ * SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+ * FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef Threading_h
+#define Threading_h
+
+#if PLATFORM(WINCE)
+#include <windows.h>
+#endif
+
+#include <wtf/Assertions.h>
+#include <wtf/Locker.h>
+#include <wtf/Noncopyable.h>
+
+#if PLATFORM(WIN_OS) && !PLATFORM(WINCE)
+#include <windows.h>
+#elif PLATFORM(DARWIN)
+#include <libkern/OSAtomic.h>
+#elif COMPILER(GCC)
+#if (__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 2))
+#include <ext/atomicity.h>
+#else
+#include <bits/atomicity.h>
+#endif
+#endif
+
+#if USE(PTHREADS)
+#include <pthread.h>
+#elif PLATFORM(GTK)
+#include <wtf/GOwnPtr.h>
+typedef struct _GMutex GMutex;
+typedef struct _GCond GCond;
+#endif
+
+#if PLATFORM(QT)
+#include <qglobal.h>
+QT_BEGIN_NAMESPACE
+class QMutex;
+class QWaitCondition;
+QT_END_NAMESPACE
+#endif
+
+#include <stdint.h>
+
+// For portability, we do not use thread-safe statics natively supported by some compilers (e.g. gcc).
+#define AtomicallyInitializedStatic(T, name) \
+ WTF::lockAtomicallyInitializedStaticMutex(); \
+ static T name; \
+ WTF::unlockAtomicallyInitializedStaticMutex();
+
+namespace WTF {
+
+typedef uint32_t ThreadIdentifier;
+typedef void* (*ThreadFunction)(void* argument);
+
+// Returns 0 if thread creation failed.
+// The thread name must be a literal since on some platforms it's passed in to the thread.
+ThreadIdentifier createThread(ThreadFunction, void*, const char* threadName);
+
+// Internal platform-specific createThread implementation.
+ThreadIdentifier createThreadInternal(ThreadFunction, void*, const char* threadName);
+
+// Called in the thread during initialization.
+// Helpful for platforms where the thread name must be set from within the thread.
+void setThreadNameInternal(const char* threadName);
+
+ThreadIdentifier currentThread();
+bool isMainThread();
+int waitForThreadCompletion(ThreadIdentifier, void**);
+void detachThread(ThreadIdentifier);
+
+#if USE(PTHREADS)
+typedef pthread_mutex_t PlatformMutex;
+typedef pthread_rwlock_t PlatformReadWriteLock;
+typedef pthread_cond_t PlatformCondition;
+#elif PLATFORM(GTK)
+typedef GOwnPtr<GMutex> PlatformMutex;
+typedef void* PlatformReadWriteLock; // FIXME: Implement.
+typedef GOwnPtr<GCond> PlatformCondition;
+#elif PLATFORM(QT)
+typedef QT_PREPEND_NAMESPACE(QMutex)* PlatformMutex;
+typedef void* PlatformReadWriteLock; // FIXME: Implement.
+typedef QT_PREPEND_NAMESPACE(QWaitCondition)* PlatformCondition;
+#elif PLATFORM(WIN_OS)
+struct PlatformMutex {
+ CRITICAL_SECTION m_internalMutex;
+ size_t m_recursionCount;
+};
+typedef void* PlatformReadWriteLock; // FIXME: Implement.
+struct PlatformCondition {
+ size_t m_waitersGone;
+ size_t m_waitersBlocked;
+ size_t m_waitersToUnblock;
+ HANDLE m_blockLock;
+ HANDLE m_blockQueue;
+ HANDLE m_unblockLock;
+
+ bool timedWait(PlatformMutex&, DWORD durationMilliseconds);
+ void signal(bool unblockAll);
+};
+#else
+typedef void* PlatformMutex;
+typedef void* PlatformReadWriteLock;
+typedef void* PlatformCondition;
+#endif
+
+class Mutex : public Noncopyable {
+public:
+ Mutex();
+ ~Mutex();
+
+ void lock();
+ bool tryLock();
+ void unlock();
+
+public:
+ PlatformMutex& impl() { return m_mutex; }
+private:
+ PlatformMutex m_mutex;
+};
+
+typedef Locker<Mutex> MutexLocker;
+
+class ReadWriteLock : public Noncopyable {
+public:
+ ReadWriteLock();
+ ~ReadWriteLock();
+
+ void readLock();
+ bool tryReadLock();
+
+ void writeLock();
+ bool tryWriteLock();
+
+ void unlock();
+
+private:
+ PlatformReadWriteLock m_readWriteLock;
+};
+
+class ThreadCondition : public Noncopyable {
+public:
+ ThreadCondition();
+ ~ThreadCondition();
+
+ void wait(Mutex& mutex);
+ // Returns true if the condition was signaled before absoluteTime, false if the absoluteTime was reached or is in the past.
+ // The absoluteTime is in seconds, starting on January 1, 1970. The time is assumed to use the same time zone as WTF::currentTime().
+ bool timedWait(Mutex&, double absoluteTime);
+ void signal();
+ void broadcast();
+
+private:
+ PlatformCondition m_condition;
+};
+
+#if PLATFORM(WIN_OS)
+#define WTF_USE_LOCKFREE_THREADSAFESHARED 1
+
+#if COMPILER(MINGW) || COMPILER(MSVC7) || PLATFORM(WINCE)
+inline void atomicIncrement(int* addend) { InterlockedIncrement(reinterpret_cast<long*>(addend)); }
+inline int atomicDecrement(int* addend) { return InterlockedDecrement(reinterpret_cast<long*>(addend)); }
+#else
+inline void atomicIncrement(int volatile* addend) { InterlockedIncrement(reinterpret_cast<long volatile*>(addend)); }
+inline int atomicDecrement(int volatile* addend) { return InterlockedDecrement(reinterpret_cast<long volatile*>(addend)); }
+#endif
+
+#elif PLATFORM(DARWIN)
+#define WTF_USE_LOCKFREE_THREADSAFESHARED 1
+
+inline void atomicIncrement(int volatile* addend) { OSAtomicIncrement32Barrier(const_cast<int*>(addend)); }
+inline int atomicDecrement(int volatile* addend) { return OSAtomicDecrement32Barrier(const_cast<int*>(addend)); }
+
+#elif COMPILER(GCC)
+#define WTF_USE_LOCKFREE_THREADSAFESHARED 1
+
+inline void atomicIncrement(int volatile* addend) { __gnu_cxx::__atomic_add(addend, 1); }
+inline int atomicDecrement(int volatile* addend) { return __gnu_cxx::__exchange_and_add(addend, -1) - 1; }
+
+#endif
+
+class ThreadSafeSharedBase : public Noncopyable {
+public:
+ ThreadSafeSharedBase(int initialRefCount = 1)
+ : m_refCount(initialRefCount)
+ {
+ }
+
+ void ref()
+ {
+#if USE(LOCKFREE_THREADSAFESHARED)
+ atomicIncrement(&m_refCount);
+#else
+ MutexLocker locker(m_mutex);
+ ++m_refCount;
+#endif
+ }
+
+ bool hasOneRef()
+ {
+ return refCount() == 1;
+ }
+
+ int refCount() const
+ {
+#if !USE(LOCKFREE_THREADSAFESHARED)
+ MutexLocker locker(m_mutex);
+#endif
+ return static_cast<int const volatile &>(m_refCount);
+ }
+
+protected:
+ // Returns whether the pointer should be freed or not.
+ bool derefBase()
+ {
+#if USE(LOCKFREE_THREADSAFESHARED)
+ if (atomicDecrement(&m_refCount) <= 0)
+ return true;
+#else
+ int refCount;
+ {
+ MutexLocker locker(m_mutex);
+ --m_refCount;
+ refCount = m_refCount;
+ }
+ if (refCount <= 0)
+ return true;
+#endif
+ return false;
+ }
+
+private:
+ template<class T>
+ friend class CrossThreadRefCounted;
+
+ int m_refCount;
+#if !USE(LOCKFREE_THREADSAFESHARED)
+ mutable Mutex m_mutex;
+#endif
+};
+
+template<class T> class ThreadSafeShared : public ThreadSafeSharedBase {
+public:
+ ThreadSafeShared(int initialRefCount = 1)
+ : ThreadSafeSharedBase(initialRefCount)
+ {
+ }
+
+ void deref()
+ {
+ if (derefBase())
+ delete static_cast<T*>(this);
+ }
+};
+
+// This function must be called from the main thread. It is safe to call it repeatedly.
+// Darwin is an exception to this rule: it is OK to call it from any thread, the only requirement is that the calls are not reentrant.
+void initializeThreading();
+
+void lockAtomicallyInitializedStaticMutex();
+void unlockAtomicallyInitializedStaticMutex();
+
+} // namespace WTF
+
+using WTF::Mutex;
+using WTF::MutexLocker;
+using WTF::ThreadCondition;
+using WTF::ThreadIdentifier;
+using WTF::ThreadSafeShared;
+
+using WTF::createThread;
+using WTF::currentThread;
+using WTF::isMainThread;
+using WTF::detachThread;
+using WTF::waitForThreadCompletion;
+
+#endif // Threading_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ThreadingNone.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ThreadingNone.cpp
new file mode 100644
index 0000000..46f23d2
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ThreadingNone.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2007 Apple Inc. All rights reserved.
+ * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "Threading.h"
+
+namespace WTF {
+
+void initializeThreading() { }
+ThreadIdentifier createThreadInternal(ThreadFunction, void*, const char*) { return ThreadIdentifier(); }
+void setThreadNameInternal(const char*) { }
+int waitForThreadCompletion(ThreadIdentifier, void**) { return 0; }
+void detachThread(ThreadIdentifier) { }
+ThreadIdentifier currentThread() { return ThreadIdentifier(); }
+bool isMainThread() { return true; }
+
+Mutex::Mutex() { }
+Mutex::~Mutex() { }
+void Mutex::lock() { }
+bool Mutex::tryLock() { return false; }
+void Mutex::unlock() { }
+
+ThreadCondition::ThreadCondition() { }
+ThreadCondition::~ThreadCondition() { }
+void ThreadCondition::wait(Mutex&) { }
+bool ThreadCondition::timedWait(Mutex&, double) { return false; }
+void ThreadCondition::signal() { }
+void ThreadCondition::broadcast() { }
+
+void lockAtomicallyInitializedStaticMutex() { }
+void unlockAtomicallyInitializedStaticMutex() { }
+
+} // namespace WebCore
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ThreadingPthreads.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ThreadingPthreads.cpp
new file mode 100644
index 0000000..d0e6df8
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ThreadingPthreads.cpp
@@ -0,0 +1,375 @@
+/*
+ * Copyright (C) 2007, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "Threading.h"
+
+#if USE(PTHREADS)
+
+#include "CurrentTime.h"
+#include "HashMap.h"
+#include "MainThread.h"
+#include "RandomNumberSeed.h"
+#include "StdLibExtras.h"
+#include "UnusedParam.h"
+#include <errno.h>
+#include <limits.h>
+#include <sys/time.h>
+
+#if PLATFORM(ANDROID)
+#include "jni_utility.h"
+#endif
+
+namespace WTF {
+
+typedef HashMap<ThreadIdentifier, pthread_t> ThreadMap;
+
+static Mutex* atomicallyInitializedStaticMutex;
+
+#if !PLATFORM(DARWIN) || PLATFORM(CHROMIUM)
+static ThreadIdentifier mainThreadIdentifier; // The thread that was the first to call initializeThreading(), which must be the main thread.
+#endif
+
+static Mutex& threadMapMutex()
+{
+ DEFINE_STATIC_LOCAL(Mutex, mutex, ());
+ return mutex;
+}
+
+void initializeThreading()
+{
+ if (!atomicallyInitializedStaticMutex) {
+ atomicallyInitializedStaticMutex = new Mutex;
+ threadMapMutex();
+ initializeRandomNumberGenerator();
+#if !PLATFORM(DARWIN) || PLATFORM(CHROMIUM)
+ mainThreadIdentifier = currentThread();
+#endif
+ initializeMainThread();
+ }
+}
+
+void lockAtomicallyInitializedStaticMutex()
+{
+ ASSERT(atomicallyInitializedStaticMutex);
+ atomicallyInitializedStaticMutex->lock();
+}
+
+void unlockAtomicallyInitializedStaticMutex()
+{
+ atomicallyInitializedStaticMutex->unlock();
+}
+
+static ThreadMap& threadMap()
+{
+ DEFINE_STATIC_LOCAL(ThreadMap, map, ());
+ return map;
+}
+
+static ThreadIdentifier identifierByPthreadHandle(const pthread_t& pthreadHandle)
+{
+ MutexLocker locker(threadMapMutex());
+
+ ThreadMap::iterator i = threadMap().begin();
+ for (; i != threadMap().end(); ++i) {
+ if (pthread_equal(i->second, pthreadHandle))
+ return i->first;
+ }
+
+ return 0;
+}
+
+static ThreadIdentifier establishIdentifierForPthreadHandle(pthread_t& pthreadHandle)
+{
+ ASSERT(!identifierByPthreadHandle(pthreadHandle));
+
+ MutexLocker locker(threadMapMutex());
+
+ static ThreadIdentifier identifierCount = 1;
+
+ threadMap().add(identifierCount, pthreadHandle);
+
+ return identifierCount++;
+}
+
+static pthread_t pthreadHandleForIdentifier(ThreadIdentifier id)
+{
+ MutexLocker locker(threadMapMutex());
+
+ return threadMap().get(id);
+}
+
+static void clearPthreadHandleForIdentifier(ThreadIdentifier id)
+{
+ MutexLocker locker(threadMapMutex());
+
+ ASSERT(threadMap().contains(id));
+
+ threadMap().remove(id);
+}
+
+#if PLATFORM(ANDROID)
+// On the Android platform, threads must be registered with the VM before they run.
+struct ThreadData {
+ ThreadFunction entryPoint;
+ void* arg;
+};
+
+static void* runThreadWithRegistration(void* arg)
+{
+ ThreadData* data = static_cast<ThreadData*>(arg);
+ JavaVM* vm = JSC::Bindings::getJavaVM();
+ JNIEnv* env;
+ void* ret = 0;
+ if (vm->AttachCurrentThread(&env, 0) == JNI_OK) {
+ ret = data->entryPoint(data->arg);
+ vm->DetachCurrentThread();
+ }
+ delete data;
+ return ret;
+}
+
+ThreadIdentifier createThreadInternal(ThreadFunction entryPoint, void* data, const char*)
+{
+ pthread_t threadHandle;
+ ThreadData* threadData = new ThreadData();
+ threadData->entryPoint = entryPoint;
+ threadData->arg = data;
+
+ if (pthread_create(&threadHandle, 0, runThreadWithRegistration, static_cast<void*>(threadData))) {
+ LOG_ERROR("Failed to create pthread at entry point %p with data %p", entryPoint, data);
+ return 0;
+ }
+ return establishIdentifierForPthreadHandle(threadHandle);
+}
+#else
+ThreadIdentifier createThreadInternal(ThreadFunction entryPoint, void* data, const char*)
+{
+ pthread_t threadHandle;
+ if (pthread_create(&threadHandle, 0, entryPoint, data)) {
+ LOG_ERROR("Failed to create pthread at entry point %p with data %p", entryPoint, data);
+ return 0;
+ }
+
+ return establishIdentifierForPthreadHandle(threadHandle);
+}
+#endif
+
+void setThreadNameInternal(const char* threadName)
+{
+#if PLATFORM(DARWIN) && !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) && !PLATFORM(IPHONE)
+ pthread_setname_np(threadName);
+#else
+ UNUSED_PARAM(threadName);
+#endif
+}
+
+int waitForThreadCompletion(ThreadIdentifier threadID, void** result)
+{
+ ASSERT(threadID);
+
+ pthread_t pthreadHandle = pthreadHandleForIdentifier(threadID);
+
+ int joinResult = pthread_join(pthreadHandle, result);
+ if (joinResult == EDEADLK)
+ LOG_ERROR("ThreadIdentifier %u was found to be deadlocked trying to quit", threadID);
+
+ clearPthreadHandleForIdentifier(threadID);
+ return joinResult;
+}
+
+void detachThread(ThreadIdentifier threadID)
+{
+ ASSERT(threadID);
+
+ pthread_t pthreadHandle = pthreadHandleForIdentifier(threadID);
+
+ pthread_detach(pthreadHandle);
+
+ clearPthreadHandleForIdentifier(threadID);
+}
+
+ThreadIdentifier currentThread()
+{
+ pthread_t currentThread = pthread_self();
+ if (ThreadIdentifier id = identifierByPthreadHandle(currentThread))
+ return id;
+ return establishIdentifierForPthreadHandle(currentThread);
+}
+
+bool isMainThread()
+{
+#if PLATFORM(DARWIN) && !PLATFORM(CHROMIUM)
+ return pthread_main_np();
+#else
+ return currentThread() == mainThreadIdentifier;
+#endif
+}
+
+Mutex::Mutex()
+{
+ pthread_mutex_init(&m_mutex, NULL);
+}
+
+Mutex::~Mutex()
+{
+ pthread_mutex_destroy(&m_mutex);
+}
+
+void Mutex::lock()
+{
+ int result = pthread_mutex_lock(&m_mutex);
+ ASSERT_UNUSED(result, !result);
+}
+
+bool Mutex::tryLock()
+{
+ int result = pthread_mutex_trylock(&m_mutex);
+
+ if (result == 0)
+ return true;
+ if (result == EBUSY)
+ return false;
+
+ ASSERT_NOT_REACHED();
+ return false;
+}
+
+void Mutex::unlock()
+{
+ int result = pthread_mutex_unlock(&m_mutex);
+ ASSERT_UNUSED(result, !result);
+}
+
+
+ReadWriteLock::ReadWriteLock()
+{
+ pthread_rwlock_init(&m_readWriteLock, NULL);
+}
+
+ReadWriteLock::~ReadWriteLock()
+{
+ pthread_rwlock_destroy(&m_readWriteLock);
+}
+
+void ReadWriteLock::readLock()
+{
+ int result = pthread_rwlock_rdlock(&m_readWriteLock);
+ ASSERT_UNUSED(result, !result);
+}
+
+bool ReadWriteLock::tryReadLock()
+{
+ int result = pthread_rwlock_tryrdlock(&m_readWriteLock);
+
+ if (result == 0)
+ return true;
+ if (result == EBUSY || result == EAGAIN)
+ return false;
+
+ ASSERT_NOT_REACHED();
+ return false;
+}
+
+void ReadWriteLock::writeLock()
+{
+ int result = pthread_rwlock_wrlock(&m_readWriteLock);
+ ASSERT_UNUSED(result, !result);
+}
+
+bool ReadWriteLock::tryWriteLock()
+{
+ int result = pthread_rwlock_trywrlock(&m_readWriteLock);
+
+ if (result == 0)
+ return true;
+ if (result == EBUSY || result == EAGAIN)
+ return false;
+
+ ASSERT_NOT_REACHED();
+ return false;
+}
+
+void ReadWriteLock::unlock()
+{
+ int result = pthread_rwlock_unlock(&m_readWriteLock);
+ ASSERT_UNUSED(result, !result);
+}
+
+ThreadCondition::ThreadCondition()
+{
+ pthread_cond_init(&m_condition, NULL);
+}
+
+ThreadCondition::~ThreadCondition()
+{
+ pthread_cond_destroy(&m_condition);
+}
+
+void ThreadCondition::wait(Mutex& mutex)
+{
+ int result = pthread_cond_wait(&m_condition, &mutex.impl());
+ ASSERT_UNUSED(result, !result);
+}
+
+bool ThreadCondition::timedWait(Mutex& mutex, double absoluteTime)
+{
+ if (absoluteTime < currentTime())
+ return false;
+
+ if (absoluteTime > INT_MAX) {
+ wait(mutex);
+ return true;
+ }
+
+ int timeSeconds = static_cast<int>(absoluteTime);
+ int timeNanoseconds = static_cast<int>((absoluteTime - timeSeconds) * 1E9);
+
+ timespec targetTime;
+ targetTime.tv_sec = timeSeconds;
+ targetTime.tv_nsec = timeNanoseconds;
+
+ return pthread_cond_timedwait(&m_condition, &mutex.impl(), &targetTime) == 0;
+}
+
+void ThreadCondition::signal()
+{
+ int result = pthread_cond_signal(&m_condition);
+ ASSERT_UNUSED(result, !result);
+}
+
+void ThreadCondition::broadcast()
+{
+ int result = pthread_cond_broadcast(&m_condition);
+ ASSERT_UNUSED(result, !result);
+}
+
+} // namespace WTF
+
+#endif // USE(PTHREADS)
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ThreadingWin.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ThreadingWin.cpp
new file mode 100644
index 0000000..cccbda1
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/ThreadingWin.cpp
@@ -0,0 +1,493 @@
+/*
+ * Copyright (C) 2007, 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2009 Google Inc. All rights reserved.
+ * Copyright (C) 2009 Torch Mobile, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * There are numerous academic and practical works on how to implement pthread_cond_wait/pthread_cond_signal/pthread_cond_broadcast
+ * functions on Win32. Here is one example: http://www.cs.wustl.edu/~schmidt/win32-cv-1.html which is widely credited as a 'starting point'
+ * of modern attempts. There are several more or less proven implementations, one in Boost C++ library (http://www.boost.org) and another
+ * in pthreads-win32 (http://sourceware.org/pthreads-win32/).
+ *
+ * The number of articles and discussions is the evidence of significant difficulties in implementing these primitives correctly.
+ * The brief search of revisions, ChangeLog entries, discussions in comp.programming.threads and other places clearly documents
+ * numerous pitfalls and performance problems the authors had to overcome to arrive to the suitable implementations.
+ * Optimally, WebKit would use one of those supported/tested libraries directly. To roll out our own implementation is impractical,
+ * if even for the lack of sufficient testing. However, a faithful reproduction of the code from one of the popular supported
+ * libraries seems to be a good compromise.
+ *
+ * The early Boost implementation (http://www.boxbackup.org/trac/browser/box/nick/win/lib/win32/boost_1_32_0/libs/thread/src/condition.cpp?rev=30)
+ * is identical to pthreads-win32 (http://sourceware.org/cgi-bin/cvsweb.cgi/pthreads/pthread_cond_wait.c?rev=1.10&content-type=text/x-cvsweb-markup&cvsroot=pthreads-win32).
+ * Current Boost uses yet another (although seemingly equivalent) algorithm which came from their 'thread rewrite' effort.
+ *
+ * This file includes timedWait/signal/broadcast implementations translated to WebKit coding style from the latest algorithm by
+ * Alexander Terekhov and Louis Thomas, as captured here: http://sourceware.org/cgi-bin/cvsweb.cgi/pthreads/pthread_cond_wait.c?rev=1.10&content-type=text/x-cvsweb-markup&cvsroot=pthreads-win32
+ * It replaces the implementation of their previous algorithm, also documented in the same source above.
+ * The naming and comments are left very close to original to enable easy cross-check.
+ *
+ * The corresponding Pthreads-win32 License is included below, and CONTRIBUTORS file which it refers to is added to
+ * source directory (as CONTRIBUTORS.pthreads-win32).
+ */
+
+/*
+ * Pthreads-win32 - POSIX Threads Library for Win32
+ * Copyright(C) 1998 John E. Bossom
+ * Copyright(C) 1999,2005 Pthreads-win32 contributors
+ *
+ * Contact Email: rpj@callisto.canberra.edu.au
+ *
+ * The current list of contributors is contained
+ * in the file CONTRIBUTORS included with the source
+ * code distribution. The list can also be seen at the
+ * following World Wide Web location:
+ * http://sources.redhat.com/pthreads-win32/contributors.html
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library in the file COPYING.LIB;
+ * if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+
+#include "config.h"
+#include "Threading.h"
+
+#include "MainThread.h"
+#if !USE(PTHREADS) && PLATFORM(WIN_OS)
+#include "ThreadSpecific.h"
+#endif
+#if !PLATFORM(WINCE)
+#include <process.h>
+#endif
+#if HAVE(ERRNO_H)
+#include <errno.h>
+#else
+#define NO_ERRNO
+#endif
+#include <windows.h>
+#include <wtf/CurrentTime.h>
+#include <wtf/HashMap.h>
+#include <wtf/MathExtras.h>
+#include <wtf/RandomNumberSeed.h>
+
+namespace WTF {
+
+// MS_VC_EXCEPTION, THREADNAME_INFO, and setThreadNameInternal all come from <http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx>.
+static const DWORD MS_VC_EXCEPTION = 0x406D1388;
+
+#pragma pack(push, 8)
+typedef struct tagTHREADNAME_INFO {
+ DWORD dwType; // must be 0x1000
+ LPCSTR szName; // pointer to name (in user addr space)
+ DWORD dwThreadID; // thread ID (-1=caller thread)
+ DWORD dwFlags; // reserved for future use, must be zero
+} THREADNAME_INFO;
+#pragma pack(pop)
+
+void setThreadNameInternal(const char* szThreadName)
+{
+ THREADNAME_INFO info;
+ info.dwType = 0x1000;
+ info.szName = szThreadName;
+ info.dwThreadID = GetCurrentThreadId();
+ info.dwFlags = 0;
+
+ __try {
+ RaiseException(MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(ULONG_PTR), reinterpret_cast<ULONG_PTR*>(&info));
+ } __except (EXCEPTION_CONTINUE_EXECUTION) {
+ }
+}
+
+static Mutex* atomicallyInitializedStaticMutex;
+
+void lockAtomicallyInitializedStaticMutex()
+{
+ ASSERT(atomicallyInitializedStaticMutex);
+ atomicallyInitializedStaticMutex->lock();
+}
+
+void unlockAtomicallyInitializedStaticMutex()
+{
+ atomicallyInitializedStaticMutex->unlock();
+}
+
+static ThreadIdentifier mainThreadIdentifier;
+
+static Mutex& threadMapMutex()
+{
+ static Mutex mutex;
+ return mutex;
+}
+
+void initializeThreading()
+{
+ if (!atomicallyInitializedStaticMutex) {
+ atomicallyInitializedStaticMutex = new Mutex;
+ threadMapMutex();
+ initializeRandomNumberGenerator();
+ initializeMainThread();
+ mainThreadIdentifier = currentThread();
+ setThreadNameInternal("Main Thread");
+ }
+}
+
+static HashMap<DWORD, HANDLE>& threadMap()
+{
+ static HashMap<DWORD, HANDLE> map;
+ return map;
+}
+
+static void storeThreadHandleByIdentifier(DWORD threadID, HANDLE threadHandle)
+{
+ MutexLocker locker(threadMapMutex());
+ ASSERT(!threadMap().contains(threadID));
+ threadMap().add(threadID, threadHandle);
+}
+
+static HANDLE threadHandleForIdentifier(ThreadIdentifier id)
+{
+ MutexLocker locker(threadMapMutex());
+ return threadMap().get(id);
+}
+
+static void clearThreadHandleForIdentifier(ThreadIdentifier id)
+{
+ MutexLocker locker(threadMapMutex());
+ ASSERT(threadMap().contains(id));
+ threadMap().remove(id);
+}
+
+struct ThreadFunctionInvocation {
+ ThreadFunctionInvocation(ThreadFunction function, void* data) : function(function), data(data) {}
+
+ ThreadFunction function;
+ void* data;
+};
+
+static unsigned __stdcall wtfThreadEntryPoint(void* param)
+{
+ ThreadFunctionInvocation invocation = *static_cast<ThreadFunctionInvocation*>(param);
+ delete static_cast<ThreadFunctionInvocation*>(param);
+
+ void* result = invocation.function(invocation.data);
+
+#if !USE(PTHREADS) && PLATFORM(WIN_OS)
+ // Do the TLS cleanup.
+ ThreadSpecificThreadExit();
+#endif
+
+ return reinterpret_cast<unsigned>(result);
+}
+
+ThreadIdentifier createThreadInternal(ThreadFunction entryPoint, void* data, const char* threadName)
+{
+ unsigned threadIdentifier = 0;
+ ThreadIdentifier threadID = 0;
+ ThreadFunctionInvocation* invocation = new ThreadFunctionInvocation(entryPoint, data);
+#if PLATFORM(WINCE)
+ // This is safe on WINCE, since CRT is in the core and innately multithreaded.
+ // On desktop Windows, need to use _beginthreadex (not available on WinCE) if using any CRT functions
+ HANDLE threadHandle = CreateThread(0, 0, (LPTHREAD_START_ROUTINE)wtfThreadEntryPoint, invocation, 0, (LPDWORD)&threadIdentifier);
+#else
+ HANDLE threadHandle = reinterpret_cast<HANDLE>(_beginthreadex(0, 0, wtfThreadEntryPoint, invocation, 0, &threadIdentifier));
+#endif
+ if (!threadHandle) {
+#if PLATFORM(WINCE)
+ LOG_ERROR("Failed to create thread at entry point %p with data %p: %ld", entryPoint, data, ::GetLastError());
+#elif defined(NO_ERRNO)
+ LOG_ERROR("Failed to create thread at entry point %p with data %p.", entryPoint, data);
+#else
+ LOG_ERROR("Failed to create thread at entry point %p with data %p: %ld", entryPoint, data, errno);
+#endif
+ return 0;
+ }
+
+ threadID = static_cast<ThreadIdentifier>(threadIdentifier);
+ storeThreadHandleByIdentifier(threadIdentifier, threadHandle);
+
+ return threadID;
+}
+
+int waitForThreadCompletion(ThreadIdentifier threadID, void** result)
+{
+ ASSERT(threadID);
+
+ HANDLE threadHandle = threadHandleForIdentifier(threadID);
+ if (!threadHandle)
+ LOG_ERROR("ThreadIdentifier %u did not correspond to an active thread when trying to quit", threadID);
+
+ DWORD joinResult = WaitForSingleObject(threadHandle, INFINITE);
+ if (joinResult == WAIT_FAILED)
+ LOG_ERROR("ThreadIdentifier %u was found to be deadlocked trying to quit", threadID);
+
+ CloseHandle(threadHandle);
+ clearThreadHandleForIdentifier(threadID);
+
+ return joinResult;
+}
+
+void detachThread(ThreadIdentifier threadID)
+{
+ ASSERT(threadID);
+
+ HANDLE threadHandle = threadHandleForIdentifier(threadID);
+ if (threadHandle)
+ CloseHandle(threadHandle);
+ clearThreadHandleForIdentifier(threadID);
+}
+
+ThreadIdentifier currentThread()
+{
+ return static_cast<ThreadIdentifier>(GetCurrentThreadId());
+}
+
+bool isMainThread()
+{
+ return currentThread() == mainThreadIdentifier;
+}
+
+Mutex::Mutex()
+{
+ m_mutex.m_recursionCount = 0;
+ InitializeCriticalSection(&m_mutex.m_internalMutex);
+}
+
+Mutex::~Mutex()
+{
+ DeleteCriticalSection(&m_mutex.m_internalMutex);
+}
+
+void Mutex::lock()
+{
+ EnterCriticalSection(&m_mutex.m_internalMutex);
+ ++m_mutex.m_recursionCount;
+}
+
+bool Mutex::tryLock()
+{
+ // This method is modeled after the behavior of pthread_mutex_trylock,
+ // which will return an error if the lock is already owned by the
+ // current thread. Since the primitive Win32 'TryEnterCriticalSection'
+ // treats this as a successful case, it changes the behavior of several
+ // tests in WebKit that check to see if the current thread already
+ // owned this mutex (see e.g., IconDatabase::getOrCreateIconRecord)
+ DWORD result = TryEnterCriticalSection(&m_mutex.m_internalMutex);
+
+ if (result != 0) { // We got the lock
+ // If this thread already had the lock, we must unlock and
+ // return false so that we mimic the behavior of POSIX's
+ // pthread_mutex_trylock:
+ if (m_mutex.m_recursionCount > 0) {
+ LeaveCriticalSection(&m_mutex.m_internalMutex);
+ return false;
+ }
+
+ ++m_mutex.m_recursionCount;
+ return true;
+ }
+
+ return false;
+}
+
+void Mutex::unlock()
+{
+ --m_mutex.m_recursionCount;
+ LeaveCriticalSection(&m_mutex.m_internalMutex);
+}
+
+bool PlatformCondition::timedWait(PlatformMutex& mutex, DWORD durationMilliseconds)
+{
+ // Enter the wait state.
+ DWORD res = WaitForSingleObject(m_blockLock, INFINITE);
+ ASSERT(res == WAIT_OBJECT_0);
+ ++m_waitersBlocked;
+ res = ReleaseSemaphore(m_blockLock, 1, 0);
+ ASSERT(res);
+
+ LeaveCriticalSection(&mutex.m_internalMutex);
+
+ // Main wait - use timeout.
+ bool timedOut = (WaitForSingleObject(m_blockQueue, durationMilliseconds) == WAIT_TIMEOUT);
+
+ res = WaitForSingleObject(m_unblockLock, INFINITE);
+ ASSERT(res == WAIT_OBJECT_0);
+
+ int signalsLeft = m_waitersToUnblock;
+
+ if (m_waitersToUnblock)
+ --m_waitersToUnblock;
+ else if (++m_waitersGone == (INT_MAX / 2)) { // timeout/canceled or spurious semaphore
+ // timeout or spurious wakeup occured, normalize the m_waitersGone count
+ // this may occur if many calls to wait with a timeout are made and
+ // no call to notify_* is made
+ res = WaitForSingleObject(m_blockLock, INFINITE);
+ ASSERT(res == WAIT_OBJECT_0);
+ m_waitersBlocked -= m_waitersGone;
+ res = ReleaseSemaphore(m_blockLock, 1, 0);
+ ASSERT(res);
+ m_waitersGone = 0;
+ }
+
+ res = ReleaseMutex(m_unblockLock);
+ ASSERT(res);
+
+ if (signalsLeft == 1) {
+ res = ReleaseSemaphore(m_blockLock, 1, 0); // Open the gate.
+ ASSERT(res);
+ }
+
+ EnterCriticalSection (&mutex.m_internalMutex);
+
+ return !timedOut;
+}
+
+void PlatformCondition::signal(bool unblockAll)
+{
+ unsigned signalsToIssue = 0;
+
+ DWORD res = WaitForSingleObject(m_unblockLock, INFINITE);
+ ASSERT(res == WAIT_OBJECT_0);
+
+ if (m_waitersToUnblock) { // the gate is already closed
+ if (!m_waitersBlocked) { // no-op
+ res = ReleaseMutex(m_unblockLock);
+ ASSERT(res);
+ return;
+ }
+
+ if (unblockAll) {
+ signalsToIssue = m_waitersBlocked;
+ m_waitersToUnblock += m_waitersBlocked;
+ m_waitersBlocked = 0;
+ } else {
+ signalsToIssue = 1;
+ ++m_waitersToUnblock;
+ --m_waitersBlocked;
+ }
+ } else if (m_waitersBlocked > m_waitersGone) {
+ res = WaitForSingleObject(m_blockLock, INFINITE); // Close the gate.
+ ASSERT(res == WAIT_OBJECT_0);
+ if (m_waitersGone != 0) {
+ m_waitersBlocked -= m_waitersGone;
+ m_waitersGone = 0;
+ }
+ if (unblockAll) {
+ signalsToIssue = m_waitersBlocked;
+ m_waitersToUnblock = m_waitersBlocked;
+ m_waitersBlocked = 0;
+ } else {
+ signalsToIssue = 1;
+ m_waitersToUnblock = 1;
+ --m_waitersBlocked;
+ }
+ } else { // No-op.
+ res = ReleaseMutex(m_unblockLock);
+ ASSERT(res);
+ return;
+ }
+
+ res = ReleaseMutex(m_unblockLock);
+ ASSERT(res);
+
+ if (signalsToIssue) {
+ res = ReleaseSemaphore(m_blockQueue, signalsToIssue, 0);
+ ASSERT(res);
+ }
+}
+
+static const long MaxSemaphoreCount = static_cast<long>(~0UL >> 1);
+
+ThreadCondition::ThreadCondition()
+{
+ m_condition.m_waitersGone = 0;
+ m_condition.m_waitersBlocked = 0;
+ m_condition.m_waitersToUnblock = 0;
+ m_condition.m_blockLock = CreateSemaphore(0, 1, 1, 0);
+ m_condition.m_blockQueue = CreateSemaphore(0, 0, MaxSemaphoreCount, 0);
+ m_condition.m_unblockLock = CreateMutex(0, 0, 0);
+
+ if (!m_condition.m_blockLock || !m_condition.m_blockQueue || !m_condition.m_unblockLock) {
+ if (m_condition.m_blockLock)
+ CloseHandle(m_condition.m_blockLock);
+ if (m_condition.m_blockQueue)
+ CloseHandle(m_condition.m_blockQueue);
+ if (m_condition.m_unblockLock)
+ CloseHandle(m_condition.m_unblockLock);
+ }
+}
+
+ThreadCondition::~ThreadCondition()
+{
+ CloseHandle(m_condition.m_blockLock);
+ CloseHandle(m_condition.m_blockQueue);
+ CloseHandle(m_condition.m_unblockLock);
+}
+
+void ThreadCondition::wait(Mutex& mutex)
+{
+ m_condition.timedWait(mutex.impl(), INFINITE);
+}
+
+bool ThreadCondition::timedWait(Mutex& mutex, double absoluteTime)
+{
+ double currentTime = WTF::currentTime();
+
+ // Time is in the past - return immediately.
+ if (absoluteTime < currentTime)
+ return false;
+
+ // Time is too far in the future (and would overflow unsigned long) - wait forever.
+ if (absoluteTime - currentTime > static_cast<double>(INT_MAX) / 1000.0) {
+ wait(mutex);
+ return true;
+ }
+
+ double intervalMilliseconds = (absoluteTime - currentTime) * 1000.0;
+ return m_condition.timedWait(mutex.impl(), static_cast<unsigned long>(intervalMilliseconds));
+}
+
+void ThreadCondition::signal()
+{
+ m_condition.signal(false); // Unblock only 1 thread.
+}
+
+void ThreadCondition::broadcast()
+{
+ m_condition.signal(true); // Unblock all threads.
+}
+
+} // namespace WTF
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/TypeTraits.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/TypeTraits.cpp
new file mode 100644
index 0000000..36fc6c6
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/TypeTraits.cpp
@@ -0,0 +1,120 @@
+ /*
+ * Copyright (C) 2006, 2007, 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2009 Google Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include "config.h"
+#include "TypeTraits.h"
+
+#include "Assertions.h"
+
+namespace WTF {
+
+COMPILE_ASSERT(IsInteger<bool>::value, WTF_IsInteger_bool_true);
+COMPILE_ASSERT(IsInteger<char>::value, WTF_IsInteger_char_true);
+COMPILE_ASSERT(IsInteger<signed char>::value, WTF_IsInteger_signed_char_true);
+COMPILE_ASSERT(IsInteger<unsigned char>::value, WTF_IsInteger_unsigned_char_true);
+COMPILE_ASSERT(IsInteger<short>::value, WTF_IsInteger_short_true);
+COMPILE_ASSERT(IsInteger<unsigned short>::value, WTF_IsInteger_unsigned_short_true);
+COMPILE_ASSERT(IsInteger<int>::value, WTF_IsInteger_int_true);
+COMPILE_ASSERT(IsInteger<unsigned int>::value, WTF_IsInteger_unsigned_int_true);
+COMPILE_ASSERT(IsInteger<long>::value, WTF_IsInteger_long_true);
+COMPILE_ASSERT(IsInteger<unsigned long>::value, WTF_IsInteger_unsigned_long_true);
+COMPILE_ASSERT(IsInteger<long long>::value, WTF_IsInteger_long_long_true);
+COMPILE_ASSERT(IsInteger<unsigned long long>::value, WTF_IsInteger_unsigned_long_long_true);
+#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED)
+COMPILE_ASSERT(IsInteger<wchar_t>::value, WTF_IsInteger_wchar_t_true);
+#endif
+COMPILE_ASSERT(!IsInteger<char*>::value, WTF_IsInteger_char_pointer_false);
+COMPILE_ASSERT(!IsInteger<const char*>::value, WTF_IsInteger_const_char_pointer_false);
+COMPILE_ASSERT(!IsInteger<volatile char*>::value, WTF_IsInteger_volatile_char_pointer_false);
+COMPILE_ASSERT(!IsInteger<double>::value, WTF_IsInteger_double_false);
+COMPILE_ASSERT(!IsInteger<float>::value, WTF_IsInteger_float_false);
+
+COMPILE_ASSERT(IsPod<bool>::value, WTF_IsPod_bool_true);
+COMPILE_ASSERT(IsPod<char>::value, WTF_IsPod_char_true);
+COMPILE_ASSERT(IsPod<signed char>::value, WTF_IsPod_signed_char_true);
+COMPILE_ASSERT(IsPod<unsigned char>::value, WTF_IsPod_unsigned_char_true);
+COMPILE_ASSERT(IsPod<short>::value, WTF_IsPod_short_true);
+COMPILE_ASSERT(IsPod<unsigned short>::value, WTF_IsPod_unsigned_short_true);
+COMPILE_ASSERT(IsPod<int>::value, WTF_IsPod_int_true);
+COMPILE_ASSERT(IsPod<unsigned int>::value, WTF_IsPod_unsigned_int_true);
+COMPILE_ASSERT(IsPod<long>::value, WTF_IsPod_long_true);
+COMPILE_ASSERT(IsPod<unsigned long>::value, WTF_IsPod_unsigned_long_true);
+COMPILE_ASSERT(IsPod<long long>::value, WTF_IsPod_long_long_true);
+COMPILE_ASSERT(IsPod<unsigned long long>::value, WTF_IsPod_unsigned_long_long_true);
+#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED)
+COMPILE_ASSERT(IsPod<wchar_t>::value, WTF_IsPod_wchar_t_true);
+#endif
+COMPILE_ASSERT(IsPod<char*>::value, WTF_IsPod_char_pointer_true);
+COMPILE_ASSERT(IsPod<const char*>::value, WTF_IsPod_const_char_pointer_true);
+COMPILE_ASSERT(IsPod<volatile char*>::value, WTF_IsPod_volatile_char_pointer_true);
+COMPILE_ASSERT(IsPod<double>::value, WTF_IsPod_double_true);
+COMPILE_ASSERT(IsPod<long double>::value, WTF_IsPod_long_double_true);
+COMPILE_ASSERT(IsPod<float>::value, WTF_IsPod_float_true);
+COMPILE_ASSERT(!IsPod<IsPod<bool> >::value, WTF_IsPod_struct_false);
+
+enum IsConvertibleToIntegerCheck { };
+COMPILE_ASSERT(IsConvertibleToInteger<IsConvertibleToIntegerCheck>::value, WTF_IsConvertibleToInteger_enum_true);
+COMPILE_ASSERT(IsConvertibleToInteger<bool>::value, WTF_IsConvertibleToInteger_bool_true);
+COMPILE_ASSERT(IsConvertibleToInteger<char>::value, WTF_IsConvertibleToInteger_char_true);
+COMPILE_ASSERT(IsConvertibleToInteger<signed char>::value, WTF_IsConvertibleToInteger_signed_char_true);
+COMPILE_ASSERT(IsConvertibleToInteger<unsigned char>::value, WTF_IsConvertibleToInteger_unsigned_char_true);
+COMPILE_ASSERT(IsConvertibleToInteger<short>::value, WTF_IsConvertibleToInteger_short_true);
+COMPILE_ASSERT(IsConvertibleToInteger<unsigned short>::value, WTF_IsConvertibleToInteger_unsigned_short_true);
+COMPILE_ASSERT(IsConvertibleToInteger<int>::value, WTF_IsConvertibleToInteger_int_true);
+COMPILE_ASSERT(IsConvertibleToInteger<unsigned int>::value, WTF_IsConvertibleToInteger_unsigned_int_true);
+COMPILE_ASSERT(IsConvertibleToInteger<long>::value, WTF_IsConvertibleToInteger_long_true);
+COMPILE_ASSERT(IsConvertibleToInteger<unsigned long>::value, WTF_IsConvertibleToInteger_unsigned_long_true);
+COMPILE_ASSERT(IsConvertibleToInteger<long long>::value, WTF_IsConvertibleToInteger_long_long_true);
+COMPILE_ASSERT(IsConvertibleToInteger<unsigned long long>::value, WTF_IsConvertibleToInteger_unsigned_long_long_true);
+#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED)
+COMPILE_ASSERT(IsConvertibleToInteger<wchar_t>::value, WTF_IsConvertibleToInteger_wchar_t_true);
+#endif
+COMPILE_ASSERT(IsConvertibleToInteger<double>::value, WTF_IsConvertibleToInteger_double_true);
+COMPILE_ASSERT(IsConvertibleToInteger<long double>::value, WTF_IsConvertibleToInteger_long_double_true);
+COMPILE_ASSERT(IsConvertibleToInteger<float>::value, WTF_IsConvertibleToInteger_float_true);
+COMPILE_ASSERT(!IsConvertibleToInteger<char*>::value, WTF_IsConvertibleToInteger_char_pointer_false);
+COMPILE_ASSERT(!IsConvertibleToInteger<const char*>::value, WTF_IsConvertibleToInteger_const_char_pointer_false);
+COMPILE_ASSERT(!IsConvertibleToInteger<volatile char*>::value, WTF_IsConvertibleToInteger_volatile_char_pointer_false);
+COMPILE_ASSERT(!IsConvertibleToInteger<IsConvertibleToInteger<bool> >::value, WTF_IsConvertibleToInteger_struct_false);
+
+COMPILE_ASSERT((IsSameType<bool, bool>::value), WTF_IsSameType_bool_true);
+COMPILE_ASSERT((IsSameType<int*, int*>::value), WTF_IsSameType_int_pointer_true);
+COMPILE_ASSERT((!IsSameType<int, int*>::value), WTF_IsSameType_int_int_pointer_false);
+COMPILE_ASSERT((!IsSameType<bool, const bool>::value), WTF_IsSameType_const_change_false);
+COMPILE_ASSERT((!IsSameType<bool, volatile bool>::value), WTF_IsSameType_volatile_change_false);
+
+COMPILE_ASSERT((IsSameType<bool, RemoveConst<const bool>::Type>::value), WTF_test_RemoveConst_const_bool);
+COMPILE_ASSERT((!IsSameType<bool, RemoveConst<volatile bool>::Type>::value), WTF_test_RemoveConst_volatile_bool);
+
+COMPILE_ASSERT((IsSameType<bool, RemoveVolatile<bool>::Type>::value), WTF_test_RemoveVolatile_bool);
+COMPILE_ASSERT((!IsSameType<bool, RemoveVolatile<const bool>::Type>::value), WTF_test_RemoveVolatile_const_bool);
+COMPILE_ASSERT((IsSameType<bool, RemoveVolatile<volatile bool>::Type>::value), WTF_test_RemoveVolatile_volatile_bool);
+
+COMPILE_ASSERT((IsSameType<bool, RemoveConstVolatile<bool>::Type>::value), WTF_test_RemoveConstVolatile_bool);
+COMPILE_ASSERT((IsSameType<bool, RemoveConstVolatile<const bool>::Type>::value), WTF_test_RemoveConstVolatile_const_bool);
+COMPILE_ASSERT((IsSameType<bool, RemoveConstVolatile<volatile bool>::Type>::value), WTF_test_RemoveConstVolatile_volatile_bool);
+COMPILE_ASSERT((IsSameType<bool, RemoveConstVolatile<const volatile bool>::Type>::value), WTF_test_RemoveConstVolatile_const_volatile_bool);
+
+COMPILE_ASSERT((IsSameType<int, RemovePointer<int>::Type>::value), WTF_Test_RemovePointer_int);
+COMPILE_ASSERT((IsSameType<int, RemovePointer<int*>::Type>::value), WTF_Test_RemovePointer_int_pointer);
+COMPILE_ASSERT((!IsSameType<int, RemovePointer<int**>::Type>::value), WTF_Test_RemovePointer_int_pointer_pointer);
+
+} // namespace WTF
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/TypeTraits.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/TypeTraits.h
new file mode 100644
index 0000000..56659a8
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/TypeTraits.h
@@ -0,0 +1,339 @@
+ /*
+ * Copyright (C) 2006, 2007, 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2009 Google Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef TypeTraits_h
+#define TypeTraits_h
+
+#include "Platform.h"
+
+#if (defined(__GLIBCXX__) && (__GLIBCXX__ >= 20070724) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || (defined(_MSC_VER) && (_MSC_VER >= 1600))
+#include <type_traits>
+#endif
+
+namespace WTF {
+
+ // The following are provided in this file:
+ //
+ // IsInteger<T>::value
+ // IsPod<T>::value, see the definition for a note about its limitations
+ // IsConvertibleToInteger<T>::value
+ //
+ // IsSameType<T, U>::value
+ //
+ // RemovePointer<T>::Type
+ // RemoveConst<T>::Type
+ // RemoveVolatile<T>::Type
+ // RemoveConstVolatile<T>::Type
+ //
+ // COMPILE_ASSERT's in TypeTraits.cpp illustrate their usage and what they do.
+
+ template<typename T> struct IsInteger { static const bool value = false; };
+ template<> struct IsInteger<bool> { static const bool value = true; };
+ template<> struct IsInteger<char> { static const bool value = true; };
+ template<> struct IsInteger<signed char> { static const bool value = true; };
+ template<> struct IsInteger<unsigned char> { static const bool value = true; };
+ template<> struct IsInteger<short> { static const bool value = true; };
+ template<> struct IsInteger<unsigned short> { static const bool value = true; };
+ template<> struct IsInteger<int> { static const bool value = true; };
+ template<> struct IsInteger<unsigned int> { static const bool value = true; };
+ template<> struct IsInteger<long> { static const bool value = true; };
+ template<> struct IsInteger<unsigned long> { static const bool value = true; };
+ template<> struct IsInteger<long long> { static const bool value = true; };
+ template<> struct IsInteger<unsigned long long> { static const bool value = true; };
+#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED)
+ template<> struct IsInteger<wchar_t> { static const bool value = true; };
+#endif
+
+ // IsPod is misnamed as it doesn't cover all plain old data (pod) types.
+ // Specifically, it doesn't allow for enums or for structs.
+ template <typename T> struct IsPod { static const bool value = IsInteger<T>::value; };
+ template <> struct IsPod<float> { static const bool value = true; };
+ template <> struct IsPod<double> { static const bool value = true; };
+ template <> struct IsPod<long double> { static const bool value = true; };
+ template <typename P> struct IsPod<P*> { static const bool value = true; };
+
+ // Avoid "possible loss of data" warning when using Microsoft's C++ compiler
+ // by not converting int's to doubles.
+ template<bool performCheck, typename U> class CheckedIsConvertibleToDouble;
+ template<typename U> class CheckedIsConvertibleToDouble<false, U> {
+ public:
+ static const bool value = false;
+ };
+
+ template<typename U> class CheckedIsConvertibleToDouble<true, U> {
+ typedef char YesType;
+ struct NoType {
+ char padding[8];
+ };
+
+ static YesType floatCheck(long double);
+ static NoType floatCheck(...);
+ static U& t;
+ public:
+ static const bool value = sizeof(floatCheck(t)) == sizeof(YesType);
+ };
+
+ template<typename T> class IsConvertibleToInteger {
+ public:
+ static const bool value = IsInteger<T>::value || CheckedIsConvertibleToDouble<!IsInteger<T>::value, T>::value;
+ };
+
+ template <typename T, typename U> struct IsSameType {
+ static const bool value = false;
+ };
+
+ template <typename T> struct IsSameType<T, T> {
+ static const bool value = true;
+ };
+
+ template <typename T> struct RemoveConst {
+ typedef T Type;
+ };
+
+ template <typename T> struct RemoveConst<const T> {
+ typedef T Type;
+ };
+
+ template <typename T> struct RemoveVolatile {
+ typedef T Type;
+ };
+
+ template <typename T> struct RemoveVolatile<volatile T> {
+ typedef T Type;
+ };
+
+ template <typename T> struct RemoveConstVolatile {
+ typedef typename RemoveVolatile<typename RemoveConst<T>::Type>::Type Type;
+ };
+
+ template <typename T> struct RemovePointer {
+ typedef T Type;
+ };
+
+ template <typename T> struct RemovePointer<T*> {
+ typedef T Type;
+ };
+
+#if (defined(__GLIBCXX__) && (__GLIBCXX__ >= 20070724) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || (defined(_MSC_VER) && (_MSC_VER >= 1600))
+
+ // GCC's libstdc++ 20070724 and later supports C++ TR1 type_traits in the std namespace.
+ // VC10 (VS2010) and later support C++ TR1 type_traits in the std::tr1 namespace.
+ template<typename T> struct HasTrivialConstructor : public std::tr1::has_trivial_constructor<T> { };
+ template<typename T> struct HasTrivialDestructor : public std::tr1::has_trivial_destructor<T> { };
+
+#else
+
+ // This compiler doesn't provide type traits, so we provide basic HasTrivialConstructor
+ // and HasTrivialDestructor definitions. The definitions here include most built-in
+ // scalar types but do not include POD structs and classes. For the intended purposes of
+ // type_traits this results correct but potentially less efficient code.
+ template <typename T, T v>
+ struct IntegralConstant {
+ static const T value = v;
+ typedef T value_type;
+ typedef IntegralConstant<T, v> type;
+ };
+
+ typedef IntegralConstant<bool, true> true_type;
+ typedef IntegralConstant<bool, false> false_type;
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1400)
+ // VC8 (VS2005) and later have built-in compiler support for HasTrivialConstructor / HasTrivialDestructor,
+ // but for some unexplained reason it doesn't work on built-in types.
+ template <typename T> struct HasTrivialConstructor : public IntegralConstant<bool, __has_trivial_constructor(T)>{ };
+ template <typename T> struct HasTrivialDestructor : public IntegralConstant<bool, __has_trivial_destructor(T)>{ };
+#else
+ template <typename T> struct HasTrivialConstructor : public false_type{ };
+ template <typename T> struct HasTrivialDestructor : public false_type{ };
+#endif
+
+ template <typename T> struct HasTrivialConstructor<T*> : public true_type{ };
+ template <typename T> struct HasTrivialDestructor<T*> : public true_type{ };
+
+ template <> struct HasTrivialConstructor<float> : public true_type{ };
+ template <> struct HasTrivialConstructor<const float> : public true_type{ };
+ template <> struct HasTrivialConstructor<volatile float> : public true_type{ };
+ template <> struct HasTrivialConstructor<const volatile float> : public true_type{ };
+
+ template <> struct HasTrivialConstructor<double> : public true_type{ };
+ template <> struct HasTrivialConstructor<const double> : public true_type{ };
+ template <> struct HasTrivialConstructor<volatile double> : public true_type{ };
+ template <> struct HasTrivialConstructor<const volatile double> : public true_type{ };
+
+ template <> struct HasTrivialConstructor<long double> : public true_type{ };
+ template <> struct HasTrivialConstructor<const long double> : public true_type{ };
+ template <> struct HasTrivialConstructor<volatile long double> : public true_type{ };
+ template <> struct HasTrivialConstructor<const volatile long double> : public true_type{ };
+
+ template <> struct HasTrivialConstructor<unsigned char> : public true_type{ };
+ template <> struct HasTrivialConstructor<const unsigned char> : public true_type{ };
+ template <> struct HasTrivialConstructor<volatile unsigned char> : public true_type{ };
+ template <> struct HasTrivialConstructor<const volatile unsigned char> : public true_type{ };
+
+ template <> struct HasTrivialConstructor<unsigned short> : public true_type{ };
+ template <> struct HasTrivialConstructor<const unsigned short> : public true_type{ };
+ template <> struct HasTrivialConstructor<volatile unsigned short> : public true_type{ };
+ template <> struct HasTrivialConstructor<const volatile unsigned short> : public true_type{ };
+
+ template <> struct HasTrivialConstructor<unsigned int> : public true_type{ };
+ template <> struct HasTrivialConstructor<const unsigned int> : public true_type{ };
+ template <> struct HasTrivialConstructor<volatile unsigned int> : public true_type{ };
+ template <> struct HasTrivialConstructor<const volatile unsigned int> : public true_type{ };
+
+ template <> struct HasTrivialConstructor<unsigned long> : public true_type{ };
+ template <> struct HasTrivialConstructor<const unsigned long> : public true_type{ };
+ template <> struct HasTrivialConstructor<volatile unsigned long> : public true_type{ };
+ template <> struct HasTrivialConstructor<const volatile unsigned long> : public true_type{ };
+
+ template <> struct HasTrivialConstructor<unsigned long long> : public true_type{ };
+ template <> struct HasTrivialConstructor<const unsigned long long> : public true_type{ };
+ template <> struct HasTrivialConstructor<volatile unsigned long long> : public true_type{ };
+ template <> struct HasTrivialConstructor<const volatile unsigned long long> : public true_type{ };
+
+ template <> struct HasTrivialConstructor<signed char> : public true_type{ };
+ template <> struct HasTrivialConstructor<const signed char> : public true_type{ };
+ template <> struct HasTrivialConstructor<volatile signed char> : public true_type{ };
+ template <> struct HasTrivialConstructor<const volatile signed char> : public true_type{ };
+
+ template <> struct HasTrivialConstructor<signed short> : public true_type{ };
+ template <> struct HasTrivialConstructor<const signed short> : public true_type{ };
+ template <> struct HasTrivialConstructor<volatile signed short> : public true_type{ };
+ template <> struct HasTrivialConstructor<const volatile signed short> : public true_type{ };
+
+ template <> struct HasTrivialConstructor<signed int> : public true_type{ };
+ template <> struct HasTrivialConstructor<const signed int> : public true_type{ };
+ template <> struct HasTrivialConstructor<volatile signed int> : public true_type{ };
+ template <> struct HasTrivialConstructor<const volatile signed int> : public true_type{ };
+
+ template <> struct HasTrivialConstructor<signed long> : public true_type{ };
+ template <> struct HasTrivialConstructor<const signed long> : public true_type{ };
+ template <> struct HasTrivialConstructor<volatile signed long> : public true_type{ };
+ template <> struct HasTrivialConstructor<const volatile signed long> : public true_type{ };
+
+ template <> struct HasTrivialConstructor<signed long long> : public true_type{ };
+ template <> struct HasTrivialConstructor<const signed long long> : public true_type{ };
+ template <> struct HasTrivialConstructor<volatile signed long long> : public true_type{ };
+ template <> struct HasTrivialConstructor<const volatile signed long long> : public true_type{ };
+
+ template <> struct HasTrivialConstructor<bool> : public true_type{ };
+ template <> struct HasTrivialConstructor<const bool> : public true_type{ };
+ template <> struct HasTrivialConstructor<volatile bool> : public true_type{ };
+ template <> struct HasTrivialConstructor<const volatile bool> : public true_type{ };
+
+ template <> struct HasTrivialConstructor<char> : public true_type{ };
+ template <> struct HasTrivialConstructor<const char> : public true_type{ };
+ template <> struct HasTrivialConstructor<volatile char> : public true_type{ };
+ template <> struct HasTrivialConstructor<const volatile char> : public true_type{ };
+
+ #if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED)
+ template <> struct HasTrivialConstructor<wchar_t> : public true_type{ };
+ template <> struct HasTrivialConstructor<const wchar_t> : public true_type{ };
+ template <> struct HasTrivialConstructor<volatile wchar_t> : public true_type{ };
+ template <> struct HasTrivialConstructor<const volatile wchar_t> : public true_type{ };
+ #endif
+
+ template <> struct HasTrivialDestructor<float> : public true_type{ };
+ template <> struct HasTrivialDestructor<const float> : public true_type{ };
+ template <> struct HasTrivialDestructor<volatile float> : public true_type{ };
+ template <> struct HasTrivialDestructor<const volatile float> : public true_type{ };
+
+ template <> struct HasTrivialDestructor<double> : public true_type{ };
+ template <> struct HasTrivialDestructor<const double> : public true_type{ };
+ template <> struct HasTrivialDestructor<volatile double> : public true_type{ };
+ template <> struct HasTrivialDestructor<const volatile double> : public true_type{ };
+
+ template <> struct HasTrivialDestructor<long double> : public true_type{ };
+ template <> struct HasTrivialDestructor<const long double> : public true_type{ };
+ template <> struct HasTrivialDestructor<volatile long double> : public true_type{ };
+ template <> struct HasTrivialDestructor<const volatile long double> : public true_type{ };
+
+ template <> struct HasTrivialDestructor<unsigned char> : public true_type{ };
+ template <> struct HasTrivialDestructor<const unsigned char> : public true_type{ };
+ template <> struct HasTrivialDestructor<volatile unsigned char> : public true_type{ };
+ template <> struct HasTrivialDestructor<const volatile unsigned char> : public true_type{ };
+
+ template <> struct HasTrivialDestructor<unsigned short> : public true_type{ };
+ template <> struct HasTrivialDestructor<const unsigned short> : public true_type{ };
+ template <> struct HasTrivialDestructor<volatile unsigned short> : public true_type{ };
+ template <> struct HasTrivialDestructor<const volatile unsigned short> : public true_type{ };
+
+ template <> struct HasTrivialDestructor<unsigned int> : public true_type{ };
+ template <> struct HasTrivialDestructor<const unsigned int> : public true_type{ };
+ template <> struct HasTrivialDestructor<volatile unsigned int> : public true_type{ };
+ template <> struct HasTrivialDestructor<const volatile unsigned int> : public true_type{ };
+
+ template <> struct HasTrivialDestructor<unsigned long> : public true_type{ };
+ template <> struct HasTrivialDestructor<const unsigned long> : public true_type{ };
+ template <> struct HasTrivialDestructor<volatile unsigned long> : public true_type{ };
+ template <> struct HasTrivialDestructor<const volatile unsigned long> : public true_type{ };
+
+ template <> struct HasTrivialDestructor<unsigned long long> : public true_type{ };
+ template <> struct HasTrivialDestructor<const unsigned long long> : public true_type{ };
+ template <> struct HasTrivialDestructor<volatile unsigned long long> : public true_type{ };
+ template <> struct HasTrivialDestructor<const volatile unsigned long long> : public true_type{ };
+
+ template <> struct HasTrivialDestructor<signed char> : public true_type{ };
+ template <> struct HasTrivialDestructor<const signed char> : public true_type{ };
+ template <> struct HasTrivialDestructor<volatile signed char> : public true_type{ };
+ template <> struct HasTrivialDestructor<const volatile signed char> : public true_type{ };
+
+ template <> struct HasTrivialDestructor<signed short> : public true_type{ };
+ template <> struct HasTrivialDestructor<const signed short> : public true_type{ };
+ template <> struct HasTrivialDestructor<volatile signed short> : public true_type{ };
+ template <> struct HasTrivialDestructor<const volatile signed short> : public true_type{ };
+
+ template <> struct HasTrivialDestructor<signed int> : public true_type{ };
+ template <> struct HasTrivialDestructor<const signed int> : public true_type{ };
+ template <> struct HasTrivialDestructor<volatile signed int> : public true_type{ };
+ template <> struct HasTrivialDestructor<const volatile signed int> : public true_type{ };
+
+ template <> struct HasTrivialDestructor<signed long> : public true_type{ };
+ template <> struct HasTrivialDestructor<const signed long> : public true_type{ };
+ template <> struct HasTrivialDestructor<volatile signed long> : public true_type{ };
+ template <> struct HasTrivialDestructor<const volatile signed long> : public true_type{ };
+
+ template <> struct HasTrivialDestructor<signed long long> : public true_type{ };
+ template <> struct HasTrivialDestructor<const signed long long> : public true_type{ };
+ template <> struct HasTrivialDestructor<volatile signed long long> : public true_type{ };
+ template <> struct HasTrivialDestructor<const volatile signed long long> : public true_type{ };
+
+ template <> struct HasTrivialDestructor<bool> : public true_type{ };
+ template <> struct HasTrivialDestructor<const bool> : public true_type{ };
+ template <> struct HasTrivialDestructor<volatile bool> : public true_type{ };
+ template <> struct HasTrivialDestructor<const volatile bool> : public true_type{ };
+
+ template <> struct HasTrivialDestructor<char> : public true_type{ };
+ template <> struct HasTrivialDestructor<const char> : public true_type{ };
+ template <> struct HasTrivialDestructor<volatile char> : public true_type{ };
+ template <> struct HasTrivialDestructor<const volatile char> : public true_type{ };
+
+ #if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED)
+ template <> struct HasTrivialDestructor<wchar_t> : public true_type{ };
+ template <> struct HasTrivialDestructor<const wchar_t> : public true_type{ };
+ template <> struct HasTrivialDestructor<volatile wchar_t> : public true_type{ };
+ template <> struct HasTrivialDestructor<const volatile wchar_t> : public true_type{ };
+ #endif
+
+#endif // __GLIBCXX__, etc.
+
+} // namespace WTF
+
+#endif // TypeTraits_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/UnusedParam.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/UnusedParam.h
new file mode 100644
index 0000000..996f5c8
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/UnusedParam.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2006 Apple Computer, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef WTF_UnusedParam_h
+#define WTF_UnusedParam_h
+
+/* don't use this for C++, it should only be used in plain C files or
+ ObjC methods, where leaving off the parameter name is not allowed. */
+
+#define UNUSED_PARAM(x) (void)x
+
+#endif /* WTF_UnusedParam_h */
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/VMTags.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/VMTags.h
new file mode 100644
index 0000000..519f518
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/VMTags.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VMTags_h
+#define VMTags_h
+
+#include <wtf/Platform.h>
+
+// On Mac OS X, the VM subsystem allows tagging memory requested from mmap and vm_map
+// in order to aid tools that inspect system memory use.
+#if PLATFORM(DARWIN) && !defined(BUILDING_ON_TIGER)
+
+#include <mach/vm_statistics.h>
+
+#if defined(VM_MEMORY_JAVASCRIPT_CORE) && defined(VM_MEMORY_JAVASCRIPT_JIT_REGISTER_FILE) && defined(VM_MEMORY_JAVASCRIPT_JIT_EXECUTABLE_ALLOCATOR) && defined(VM_MEMORY_JAVASCRIPT_JIT_EXECUTABLE_ALLOCATOR)
+#define VM_TAG_FOR_COLLECTOR_MEMORY VM_MAKE_TAG(VM_MEMORY_JAVASCRIPT_CORE)
+#define VM_TAG_FOR_REGISTERFILE_MEMORY VM_MAKE_TAG(VM_MEMORY_JAVASCRIPT_JIT_REGISTER_FILE)
+#define VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY VM_MAKE_TAG(VM_MEMORY_JAVASCRIPT_JIT_EXECUTABLE_ALLOCATOR)
+#else
+#define VM_TAG_FOR_COLLECTOR_MEMORY VM_MAKE_TAG(63)
+#define VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY VM_MAKE_TAG(64)
+#define VM_TAG_FOR_REGISTERFILE_MEMORY VM_MAKE_TAG(65)
+#endif // defined(VM_MEMORY_JAVASCRIPT_CORE) && defined(VM_MEMORY_JAVASCRIPT_JIT_REGISTER_FILE) && defined(VM_MEMORY_JAVASCRIPT_JIT_EXECUTABLE_ALLOCATOR) && defined(VM_MEMORY_JAVASCRIPT_JIT_EXECUTABLE_ALLOCATOR)
+
+#else // PLATFORM(DARWIN) && !defined(BUILDING_ON_TIGER)
+
+#define VM_TAG_FOR_COLLECTOR_MEMORY -1
+#define VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY -1
+#define VM_TAG_FOR_REGISTERFILE_MEMORY -1
+
+#endif // PLATFORM(DARWIN) && !defined(BUILDING_ON_TIGER)
+
+#endif // VMTags_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Vector.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Vector.h
new file mode 100644
index 0000000..7decc4a
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/Vector.h
@@ -0,0 +1,1014 @@
+/*
+ * Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef WTF_Vector_h
+#define WTF_Vector_h
+
+#include "FastAllocBase.h"
+#include "Noncopyable.h"
+#include "NotFound.h"
+#include "VectorTraits.h"
+#include <limits>
+#include <utility>
+
+#if PLATFORM(QT)
+#include <QDataStream>
+#endif
+
+namespace WTF {
+
+ using std::min;
+ using std::max;
+
+ // WTF_ALIGN_OF / WTF_ALIGNED
+ #if COMPILER(GCC) || COMPILER(MINGW) || COMPILER(RVCT) || COMPILER(WINSCW)
+ #define WTF_ALIGN_OF(type) __alignof__(type)
+ #define WTF_ALIGNED(variable_type, variable, n) variable_type variable __attribute__((__aligned__(n)))
+ #elif COMPILER(MSVC)
+ #define WTF_ALIGN_OF(type) __alignof(type)
+ #define WTF_ALIGNED(variable_type, variable, n) __declspec(align(n)) variable_type variable
+ #else
+ #define WTF_ALIGN_OF(type) 0
+ #endif
+
+ #if COMPILER(GCC) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 303)
+ typedef char __attribute__((__may_alias__)) AlignedBufferChar;
+ #else
+ typedef char AlignedBufferChar;
+ #endif
+
+ #ifdef WTF_ALIGNED
+ template <size_t size, size_t alignment> struct AlignedBuffer;
+ template <size_t size> struct AlignedBuffer<size, 1> { AlignedBufferChar buffer[size]; };
+ template <size_t size> struct AlignedBuffer<size, 2> { WTF_ALIGNED(AlignedBufferChar, buffer[size], 2); };
+ template <size_t size> struct AlignedBuffer<size, 4> { WTF_ALIGNED(AlignedBufferChar, buffer[size], 4); };
+ template <size_t size> struct AlignedBuffer<size, 8> { WTF_ALIGNED(AlignedBufferChar, buffer[size], 8); };
+ template <size_t size> struct AlignedBuffer<size, 16> { WTF_ALIGNED(AlignedBufferChar, buffer[size], 16); };
+ template <size_t size> struct AlignedBuffer<size, 32> { WTF_ALIGNED(AlignedBufferChar, buffer[size], 32); };
+ template <size_t size> struct AlignedBuffer<size, 64> { WTF_ALIGNED(AlignedBufferChar, buffer[size], 64); };
+ #else
+ template <size_t size, size_t> struct AlignedBuffer
+ {
+ AlignedBufferChar oversizebuffer[size + 64];
+ AlignedBufferChar *buffer()
+ {
+ AlignedBufferChar *ptr = oversizebuffer;
+ ptr += 64 - (reinterpret_cast<size_t>(ptr) & 0x3f);
+ return ptr;
+ }
+ };
+ #endif
+
+ template <bool needsDestruction, typename T>
+ class VectorDestructor;
+
+ template<typename T>
+ struct VectorDestructor<false, T>
+ {
+ static void destruct(T*, T*) {}
+ };
+
+ template<typename T>
+ struct VectorDestructor<true, T>
+ {
+ static void destruct(T* begin, T* end)
+ {
+ for (T* cur = begin; cur != end; ++cur)
+ cur->~T();
+ }
+ };
+
+ template <bool needsInitialization, bool canInitializeWithMemset, typename T>
+ class VectorInitializer;
+
+ template<bool ignore, typename T>
+ struct VectorInitializer<false, ignore, T>
+ {
+ static void initialize(T*, T*) {}
+ };
+
+ template<typename T>
+ struct VectorInitializer<true, false, T>
+ {
+ static void initialize(T* begin, T* end)
+ {
+ for (T* cur = begin; cur != end; ++cur)
+ new (cur) T;
+ }
+ };
+
+ template<typename T>
+ struct VectorInitializer<true, true, T>
+ {
+ static void initialize(T* begin, T* end)
+ {
+ memset(begin, 0, reinterpret_cast<char*>(end) - reinterpret_cast<char*>(begin));
+ }
+ };
+
+ template <bool canMoveWithMemcpy, typename T>
+ class VectorMover;
+
+ template<typename T>
+ struct VectorMover<false, T>
+ {
+ static void move(T* src, const T* srcEnd, T* dst)
+ {
+ while (src != srcEnd) {
+ new (dst) T(*src);
+ src->~T();
+ ++dst;
+ ++src;
+ }
+ }
+ static void moveOverlapping(T* src, const T* srcEnd, T* dst)
+ {
+ if (src > dst)
+ move(src, srcEnd, dst);
+ else {
+ T* dstEnd = dst + (srcEnd - src);
+ while (src != srcEnd) {
+ --srcEnd;
+ --dstEnd;
+ new (dstEnd) T(*srcEnd);
+ srcEnd->~T();
+ }
+ }
+ }
+ };
+
+ template<typename T>
+ struct VectorMover<true, T>
+ {
+ static void move(T* src, const T* srcEnd, T* dst)
+ {
+ memcpy(dst, src, reinterpret_cast<const char*>(srcEnd) - reinterpret_cast<const char*>(src));
+ }
+ static void moveOverlapping(T* src, const T* srcEnd, T* dst)
+ {
+ memmove(dst, src, reinterpret_cast<const char*>(srcEnd) - reinterpret_cast<const char*>(src));
+ }
+ };
+
+ template <bool canCopyWithMemcpy, typename T>
+ class VectorCopier;
+
+ template<typename T>
+ struct VectorCopier<false, T>
+ {
+ static void uninitializedCopy(const T* src, const T* srcEnd, T* dst)
+ {
+ while (src != srcEnd) {
+ new (dst) T(*src);
+ ++dst;
+ ++src;
+ }
+ }
+ };
+
+ template<typename T>
+ struct VectorCopier<true, T>
+ {
+ static void uninitializedCopy(const T* src, const T* srcEnd, T* dst)
+ {
+ memcpy(dst, src, reinterpret_cast<const char*>(srcEnd) - reinterpret_cast<const char*>(src));
+ }
+ };
+
+ template <bool canFillWithMemset, typename T>
+ class VectorFiller;
+
+ template<typename T>
+ struct VectorFiller<false, T>
+ {
+ static void uninitializedFill(T* dst, T* dstEnd, const T& val)
+ {
+ while (dst != dstEnd) {
+ new (dst) T(val);
+ ++dst;
+ }
+ }
+ };
+
+ template<typename T>
+ struct VectorFiller<true, T>
+ {
+ static void uninitializedFill(T* dst, T* dstEnd, const T& val)
+ {
+ ASSERT(sizeof(T) == sizeof(char));
+ memset(dst, val, dstEnd - dst);
+ }
+ };
+
+ template<bool canCompareWithMemcmp, typename T>
+ class VectorComparer;
+
+ template<typename T>
+ struct VectorComparer<false, T>
+ {
+ static bool compare(const T* a, const T* b, size_t size)
+ {
+ for (size_t i = 0; i < size; ++i)
+ if (a[i] != b[i])
+ return false;
+ return true;
+ }
+ };
+
+ template<typename T>
+ struct VectorComparer<true, T>
+ {
+ static bool compare(const T* a, const T* b, size_t size)
+ {
+ return memcmp(a, b, sizeof(T) * size) == 0;
+ }
+ };
+
+ template<typename T>
+ struct VectorTypeOperations
+ {
+ static void destruct(T* begin, T* end)
+ {
+ VectorDestructor<VectorTraits<T>::needsDestruction, T>::destruct(begin, end);
+ }
+
+ static void initialize(T* begin, T* end)
+ {
+ VectorInitializer<VectorTraits<T>::needsInitialization, VectorTraits<T>::canInitializeWithMemset, T>::initialize(begin, end);
+ }
+
+ static void move(T* src, const T* srcEnd, T* dst)
+ {
+ VectorMover<VectorTraits<T>::canMoveWithMemcpy, T>::move(src, srcEnd, dst);
+ }
+
+ static void moveOverlapping(T* src, const T* srcEnd, T* dst)
+ {
+ VectorMover<VectorTraits<T>::canMoveWithMemcpy, T>::moveOverlapping(src, srcEnd, dst);
+ }
+
+ static void uninitializedCopy(const T* src, const T* srcEnd, T* dst)
+ {
+ VectorCopier<VectorTraits<T>::canCopyWithMemcpy, T>::uninitializedCopy(src, srcEnd, dst);
+ }
+
+ static void uninitializedFill(T* dst, T* dstEnd, const T& val)
+ {
+ VectorFiller<VectorTraits<T>::canFillWithMemset, T>::uninitializedFill(dst, dstEnd, val);
+ }
+
+ static bool compare(const T* a, const T* b, size_t size)
+ {
+ return VectorComparer<VectorTraits<T>::canCompareWithMemcmp, T>::compare(a, b, size);
+ }
+ };
+
+ template<typename T>
+ class VectorBufferBase : public Noncopyable {
+ public:
+ void allocateBuffer(size_t newCapacity)
+ {
+ m_capacity = newCapacity;
+ if (newCapacity > std::numeric_limits<size_t>::max() / sizeof(T))
+ CRASH();
+ m_buffer = static_cast<T*>(fastMalloc(newCapacity * sizeof(T)));
+ }
+
+ void deallocateBuffer(T* bufferToDeallocate)
+ {
+ if (m_buffer == bufferToDeallocate) {
+ m_buffer = 0;
+ m_capacity = 0;
+ }
+ fastFree(bufferToDeallocate);
+ }
+
+ T* buffer() { return m_buffer; }
+ const T* buffer() const { return m_buffer; }
+ T** bufferSlot() { return &m_buffer; }
+ size_t capacity() const { return m_capacity; }
+
+ T* releaseBuffer()
+ {
+ T* buffer = m_buffer;
+ m_buffer = 0;
+ m_capacity = 0;
+ return buffer;
+ }
+
+ protected:
+ VectorBufferBase()
+ : m_buffer(0)
+ , m_capacity(0)
+ {
+ }
+
+ VectorBufferBase(T* buffer, size_t capacity)
+ : m_buffer(buffer)
+ , m_capacity(capacity)
+ {
+ }
+
+ ~VectorBufferBase()
+ {
+ // FIXME: It would be nice to find a way to ASSERT that m_buffer hasn't leaked here.
+ }
+
+ T* m_buffer;
+ size_t m_capacity;
+ };
+
+ template<typename T, size_t inlineCapacity>
+ class VectorBuffer;
+
+ template<typename T>
+ class VectorBuffer<T, 0> : private VectorBufferBase<T> {
+ private:
+ typedef VectorBufferBase<T> Base;
+ public:
+ VectorBuffer()
+ {
+ }
+
+ VectorBuffer(size_t capacity)
+ {
+ allocateBuffer(capacity);
+ }
+
+ ~VectorBuffer()
+ {
+ deallocateBuffer(buffer());
+ }
+
+ void swap(VectorBuffer<T, 0>& other)
+ {
+ std::swap(m_buffer, other.m_buffer);
+ std::swap(m_capacity, other.m_capacity);
+ }
+
+ void restoreInlineBufferIfNeeded() { }
+
+ using Base::allocateBuffer;
+ using Base::deallocateBuffer;
+
+ using Base::buffer;
+ using Base::bufferSlot;
+ using Base::capacity;
+
+ using Base::releaseBuffer;
+ private:
+ using Base::m_buffer;
+ using Base::m_capacity;
+ };
+
+ template<typename T, size_t inlineCapacity>
+ class VectorBuffer : private VectorBufferBase<T> {
+ private:
+ typedef VectorBufferBase<T> Base;
+ public:
+ VectorBuffer()
+ : Base(inlineBuffer(), inlineCapacity)
+ {
+ }
+
+ VectorBuffer(size_t capacity)
+ : Base(inlineBuffer(), inlineCapacity)
+ {
+ if (capacity > inlineCapacity)
+ Base::allocateBuffer(capacity);
+ }
+
+ ~VectorBuffer()
+ {
+ deallocateBuffer(buffer());
+ }
+
+ void allocateBuffer(size_t newCapacity)
+ {
+ if (newCapacity > inlineCapacity)
+ Base::allocateBuffer(newCapacity);
+ else {
+ m_buffer = inlineBuffer();
+ m_capacity = inlineCapacity;
+ }
+ }
+
+ void deallocateBuffer(T* bufferToDeallocate)
+ {
+ if (bufferToDeallocate == inlineBuffer())
+ return;
+ Base::deallocateBuffer(bufferToDeallocate);
+ }
+
+ void restoreInlineBufferIfNeeded()
+ {
+ if (m_buffer)
+ return;
+ m_buffer = inlineBuffer();
+ m_capacity = inlineCapacity;
+ }
+
+ using Base::buffer;
+ using Base::bufferSlot;
+ using Base::capacity;
+
+ T* releaseBuffer()
+ {
+ if (buffer() == inlineBuffer())
+ return 0;
+ return Base::releaseBuffer();
+ }
+
+ private:
+ using Base::m_buffer;
+ using Base::m_capacity;
+
+ static const size_t m_inlineBufferSize = inlineCapacity * sizeof(T);
+ #ifdef WTF_ALIGNED
+ T* inlineBuffer() { return reinterpret_cast<T*>(m_inlineBuffer.buffer); }
+ #else
+ T* inlineBuffer() { return reinterpret_cast<T*>(m_inlineBuffer.buffer()); }
+ #endif
+
+ AlignedBuffer<m_inlineBufferSize, WTF_ALIGN_OF(T)> m_inlineBuffer;
+ };
+
+ template<typename T, size_t inlineCapacity = 0>
+ class Vector : public FastAllocBase {
+ private:
+ typedef VectorBuffer<T, inlineCapacity> Buffer;
+ typedef VectorTypeOperations<T> TypeOperations;
+
+ public:
+ typedef T ValueType;
+
+ typedef T* iterator;
+ typedef const T* const_iterator;
+
+ Vector()
+ : m_size(0)
+ {
+ }
+
+ explicit Vector(size_t size)
+ : m_size(size)
+ , m_buffer(size)
+ {
+ if (begin())
+ TypeOperations::initialize(begin(), end());
+ }
+
+ ~Vector()
+ {
+ if (m_size) shrink(0);
+ }
+
+ Vector(const Vector&);
+ template<size_t otherCapacity>
+ Vector(const Vector<T, otherCapacity>&);
+
+ Vector& operator=(const Vector&);
+ template<size_t otherCapacity>
+ Vector& operator=(const Vector<T, otherCapacity>&);
+
+ size_t size() const { return m_size; }
+ size_t capacity() const { return m_buffer.capacity(); }
+ bool isEmpty() const { return !size(); }
+
+ T& at(size_t i)
+ {
+ ASSERT(i < size());
+ return m_buffer.buffer()[i];
+ }
+ const T& at(size_t i) const
+ {
+ ASSERT(i < size());
+ return m_buffer.buffer()[i];
+ }
+
+ T& operator[](size_t i) { return at(i); }
+ const T& operator[](size_t i) const { return at(i); }
+
+ T* data() { return m_buffer.buffer(); }
+ const T* data() const { return m_buffer.buffer(); }
+ T** dataSlot() { return m_buffer.bufferSlot(); }
+
+ iterator begin() { return data(); }
+ iterator end() { return begin() + m_size; }
+ const_iterator begin() const { return data(); }
+ const_iterator end() const { return begin() + m_size; }
+
+ T& first() { return at(0); }
+ const T& first() const { return at(0); }
+ T& last() { return at(size() - 1); }
+ const T& last() const { return at(size() - 1); }
+
+ template<typename U> size_t find(const U&) const;
+
+ void shrink(size_t size);
+ void grow(size_t size);
+ void resize(size_t size);
+ void reserveCapacity(size_t newCapacity);
+ void reserveInitialCapacity(size_t initialCapacity);
+ void shrinkCapacity(size_t newCapacity);
+ void shrinkToFit() { shrinkCapacity(size()); }
+
+ void clear() { shrinkCapacity(0); }
+
+ template<typename U> void append(const U*, size_t);
+ template<typename U> void append(const U&);
+ template<typename U> void uncheckedAppend(const U& val);
+ template<size_t otherCapacity> void append(const Vector<T, otherCapacity>&);
+
+ template<typename U> void insert(size_t position, const U*, size_t);
+ template<typename U> void insert(size_t position, const U&);
+ template<typename U, size_t c> void insert(size_t position, const Vector<U, c>&);
+
+ template<typename U> void prepend(const U*, size_t);
+ template<typename U> void prepend(const U&);
+ template<typename U, size_t c> void prepend(const Vector<U, c>&);
+
+ void remove(size_t position);
+ void remove(size_t position, size_t length);
+
+ void removeLast()
+ {
+ ASSERT(!isEmpty());
+ shrink(size() - 1);
+ }
+
+ Vector(size_t size, const T& val)
+ : m_size(size)
+ , m_buffer(size)
+ {
+ if (begin())
+ TypeOperations::uninitializedFill(begin(), end(), val);
+ }
+
+ void fill(const T&, size_t);
+ void fill(const T& val) { fill(val, size()); }
+
+ template<typename Iterator> void appendRange(Iterator start, Iterator end);
+
+ T* releaseBuffer();
+
+ void swap(Vector<T, inlineCapacity>& other)
+ {
+ std::swap(m_size, other.m_size);
+ m_buffer.swap(other.m_buffer);
+ }
+
+ private:
+ void expandCapacity(size_t newMinCapacity);
+ const T* expandCapacity(size_t newMinCapacity, const T*);
+ template<typename U> U* expandCapacity(size_t newMinCapacity, U*);
+
+ size_t m_size;
+ Buffer m_buffer;
+ };
+
+#if PLATFORM(QT)
+ QT_USE_NAMESPACE
+ template<typename T>
+ QDataStream& operator<<(QDataStream& stream, const Vector<T>& data)
+ {
+ stream << qint64(data.size());
+ foreach (const T& i, data)
+ stream << i;
+ return stream;
+ }
+
+ template<typename T>
+ QDataStream& operator>>(QDataStream& stream, Vector<T>& data)
+ {
+ data.clear();
+ qint64 count;
+ T item;
+ stream >> count;
+ data.reserveCapacity(count);
+ for (qint64 i = 0; i < count; ++i) {
+ stream >> item;
+ data.append(item);
+ }
+ return stream;
+ }
+#endif
+
+ template<typename T, size_t inlineCapacity>
+ Vector<T, inlineCapacity>::Vector(const Vector& other)
+ : m_size(other.size())
+ , m_buffer(other.capacity())
+ {
+ if (begin())
+ TypeOperations::uninitializedCopy(other.begin(), other.end(), begin());
+ }
+
+ template<typename T, size_t inlineCapacity>
+ template<size_t otherCapacity>
+ Vector<T, inlineCapacity>::Vector(const Vector<T, otherCapacity>& other)
+ : m_size(other.size())
+ , m_buffer(other.capacity())
+ {
+ if (begin())
+ TypeOperations::uninitializedCopy(other.begin(), other.end(), begin());
+ }
+
+ template<typename T, size_t inlineCapacity>
+ Vector<T, inlineCapacity>& Vector<T, inlineCapacity>::operator=(const Vector<T, inlineCapacity>& other)
+ {
+ if (&other == this)
+ return *this;
+
+ if (size() > other.size())
+ shrink(other.size());
+ else if (other.size() > capacity()) {
+ clear();
+ reserveCapacity(other.size());
+ if (!begin())
+ return *this;
+ }
+
+ std::copy(other.begin(), other.begin() + size(), begin());
+ TypeOperations::uninitializedCopy(other.begin() + size(), other.end(), end());
+ m_size = other.size();
+
+ return *this;
+ }
+
+ template<typename T, size_t inlineCapacity>
+ template<size_t otherCapacity>
+ Vector<T, inlineCapacity>& Vector<T, inlineCapacity>::operator=(const Vector<T, otherCapacity>& other)
+ {
+ if (&other == this)
+ return *this;
+
+ if (size() > other.size())
+ shrink(other.size());
+ else if (other.size() > capacity()) {
+ clear();
+ reserveCapacity(other.size());
+ if (!begin())
+ return *this;
+ }
+
+ std::copy(other.begin(), other.begin() + size(), begin());
+ TypeOperations::uninitializedCopy(other.begin() + size(), other.end(), end());
+ m_size = other.size();
+
+ return *this;
+ }
+
+ template<typename T, size_t inlineCapacity>
+ template<typename U>
+ size_t Vector<T, inlineCapacity>::find(const U& value) const
+ {
+ for (size_t i = 0; i < size(); ++i) {
+ if (at(i) == value)
+ return i;
+ }
+ return notFound;
+ }
+
+ template<typename T, size_t inlineCapacity>
+ void Vector<T, inlineCapacity>::fill(const T& val, size_t newSize)
+ {
+ if (size() > newSize)
+ shrink(newSize);
+ else if (newSize > capacity()) {
+ clear();
+ reserveCapacity(newSize);
+ if (!begin())
+ return;
+ }
+
+ std::fill(begin(), end(), val);
+ TypeOperations::uninitializedFill(end(), begin() + newSize, val);
+ m_size = newSize;
+ }
+
+ template<typename T, size_t inlineCapacity>
+ template<typename Iterator>
+ void Vector<T, inlineCapacity>::appendRange(Iterator start, Iterator end)
+ {
+ for (Iterator it = start; it != end; ++it)
+ append(*it);
+ }
+
+ template<typename T, size_t inlineCapacity>
+ void Vector<T, inlineCapacity>::expandCapacity(size_t newMinCapacity)
+ {
+ reserveCapacity(max(newMinCapacity, max(static_cast<size_t>(16), capacity() + capacity() / 4 + 1)));
+ }
+
+ template<typename T, size_t inlineCapacity>
+ const T* Vector<T, inlineCapacity>::expandCapacity(size_t newMinCapacity, const T* ptr)
+ {
+ if (ptr < begin() || ptr >= end()) {
+ expandCapacity(newMinCapacity);
+ return ptr;
+ }
+ size_t index = ptr - begin();
+ expandCapacity(newMinCapacity);
+ return begin() + index;
+ }
+
+ template<typename T, size_t inlineCapacity> template<typename U>
+ inline U* Vector<T, inlineCapacity>::expandCapacity(size_t newMinCapacity, U* ptr)
+ {
+ expandCapacity(newMinCapacity);
+ return ptr;
+ }
+
+ template<typename T, size_t inlineCapacity>
+ inline void Vector<T, inlineCapacity>::resize(size_t size)
+ {
+ if (size <= m_size)
+ TypeOperations::destruct(begin() + size, end());
+ else {
+ if (size > capacity())
+ expandCapacity(size);
+ if (begin())
+ TypeOperations::initialize(end(), begin() + size);
+ }
+
+ m_size = size;
+ }
+
+ template<typename T, size_t inlineCapacity>
+ void Vector<T, inlineCapacity>::shrink(size_t size)
+ {
+ ASSERT(size <= m_size);
+ TypeOperations::destruct(begin() + size, end());
+ m_size = size;
+ }
+
+ template<typename T, size_t inlineCapacity>
+ void Vector<T, inlineCapacity>::grow(size_t size)
+ {
+ ASSERT(size >= m_size);
+ if (size > capacity())
+ expandCapacity(size);
+ if (begin())
+ TypeOperations::initialize(end(), begin() + size);
+ m_size = size;
+ }
+
+ template<typename T, size_t inlineCapacity>
+ void Vector<T, inlineCapacity>::reserveCapacity(size_t newCapacity)
+ {
+ if (newCapacity <= capacity())
+ return;
+ T* oldBuffer = begin();
+ T* oldEnd = end();
+ m_buffer.allocateBuffer(newCapacity);
+ if (begin())
+ TypeOperations::move(oldBuffer, oldEnd, begin());
+ m_buffer.deallocateBuffer(oldBuffer);
+ }
+
+ template<typename T, size_t inlineCapacity>
+ inline void Vector<T, inlineCapacity>::reserveInitialCapacity(size_t initialCapacity)
+ {
+ ASSERT(!m_size);
+ ASSERT(capacity() == inlineCapacity);
+ if (initialCapacity > inlineCapacity)
+ m_buffer.allocateBuffer(initialCapacity);
+ }
+
+ template<typename T, size_t inlineCapacity>
+ void Vector<T, inlineCapacity>::shrinkCapacity(size_t newCapacity)
+ {
+ if (newCapacity >= capacity())
+ return;
+
+ if (newCapacity < size())
+ shrink(newCapacity);
+
+ T* oldBuffer = begin();
+ if (newCapacity > 0) {
+ T* oldEnd = end();
+ m_buffer.allocateBuffer(newCapacity);
+ if (begin() != oldBuffer)
+ TypeOperations::move(oldBuffer, oldEnd, begin());
+ }
+
+ m_buffer.deallocateBuffer(oldBuffer);
+ m_buffer.restoreInlineBufferIfNeeded();
+ }
+
+ // Templatizing these is better than just letting the conversion happen implicitly,
+ // because for instance it allows a PassRefPtr to be appended to a RefPtr vector
+ // without refcount thrash.
+
+ template<typename T, size_t inlineCapacity> template<typename U>
+ void Vector<T, inlineCapacity>::append(const U* data, size_t dataSize)
+ {
+ size_t newSize = m_size + dataSize;
+ if (newSize > capacity()) {
+ data = expandCapacity(newSize, data);
+ if (!begin())
+ return;
+ }
+ if (newSize < m_size)
+ CRASH();
+ T* dest = end();
+ for (size_t i = 0; i < dataSize; ++i)
+ new (&dest[i]) T(data[i]);
+ m_size = newSize;
+ }
+
+ template<typename T, size_t inlineCapacity> template<typename U>
+ ALWAYS_INLINE void Vector<T, inlineCapacity>::append(const U& val)
+ {
+ const U* ptr = &val;
+ if (size() == capacity()) {
+ ptr = expandCapacity(size() + 1, ptr);
+ if (!begin())
+ return;
+ }
+
+#if COMPILER(MSVC7)
+ // FIXME: MSVC7 generates compilation errors when trying to assign
+ // a pointer to a Vector of its base class (i.e. can't downcast). So far
+ // I've been unable to determine any logical reason for this, so I can
+ // only assume it is a bug with the compiler. Casting is a bad solution,
+ // however, because it subverts implicit conversions, so a better
+ // one is needed.
+ new (end()) T(static_cast<T>(*ptr));
+#else
+ new (end()) T(*ptr);
+#endif
+ ++m_size;
+ }
+
+ // This version of append saves a branch in the case where you know that the
+ // vector's capacity is large enough for the append to succeed.
+
+ template<typename T, size_t inlineCapacity> template<typename U>
+ inline void Vector<T, inlineCapacity>::uncheckedAppend(const U& val)
+ {
+ ASSERT(size() < capacity());
+ const U* ptr = &val;
+ new (end()) T(*ptr);
+ ++m_size;
+ }
+
+ // This method should not be called append, a better name would be appendElements.
+ // It could also be eliminated entirely, and call sites could just use
+ // appendRange(val.begin(), val.end()).
+ template<typename T, size_t inlineCapacity> template<size_t otherCapacity>
+ inline void Vector<T, inlineCapacity>::append(const Vector<T, otherCapacity>& val)
+ {
+ append(val.begin(), val.size());
+ }
+
+ template<typename T, size_t inlineCapacity> template<typename U>
+ void Vector<T, inlineCapacity>::insert(size_t position, const U* data, size_t dataSize)
+ {
+ ASSERT(position <= size());
+ size_t newSize = m_size + dataSize;
+ if (newSize > capacity()) {
+ data = expandCapacity(newSize, data);
+ if (!begin())
+ return;
+ }
+ if (newSize < m_size)
+ CRASH();
+ T* spot = begin() + position;
+ TypeOperations::moveOverlapping(spot, end(), spot + dataSize);
+ for (size_t i = 0; i < dataSize; ++i)
+ new (&spot[i]) T(data[i]);
+ m_size = newSize;
+ }
+
+ template<typename T, size_t inlineCapacity> template<typename U>
+ inline void Vector<T, inlineCapacity>::insert(size_t position, const U& val)
+ {
+ ASSERT(position <= size());
+ const U* data = &val;
+ if (size() == capacity()) {
+ data = expandCapacity(size() + 1, data);
+ if (!begin())
+ return;
+ }
+ T* spot = begin() + position;
+ TypeOperations::moveOverlapping(spot, end(), spot + 1);
+ new (spot) T(*data);
+ ++m_size;
+ }
+
+ template<typename T, size_t inlineCapacity> template<typename U, size_t c>
+ inline void Vector<T, inlineCapacity>::insert(size_t position, const Vector<U, c>& val)
+ {
+ insert(position, val.begin(), val.size());
+ }
+
+ template<typename T, size_t inlineCapacity> template<typename U>
+ void Vector<T, inlineCapacity>::prepend(const U* data, size_t dataSize)
+ {
+ insert(0, data, dataSize);
+ }
+
+ template<typename T, size_t inlineCapacity> template<typename U>
+ inline void Vector<T, inlineCapacity>::prepend(const U& val)
+ {
+ insert(0, val);
+ }
+
+ template<typename T, size_t inlineCapacity> template<typename U, size_t c>
+ inline void Vector<T, inlineCapacity>::prepend(const Vector<U, c>& val)
+ {
+ insert(0, val.begin(), val.size());
+ }
+
+ template<typename T, size_t inlineCapacity>
+ inline void Vector<T, inlineCapacity>::remove(size_t position)
+ {
+ ASSERT(position < size());
+ T* spot = begin() + position;
+ spot->~T();
+ TypeOperations::moveOverlapping(spot + 1, end(), spot);
+ --m_size;
+ }
+
+ template<typename T, size_t inlineCapacity>
+ inline void Vector<T, inlineCapacity>::remove(size_t position, size_t length)
+ {
+ ASSERT(position < size());
+ ASSERT(position + length <= size());
+ T* beginSpot = begin() + position;
+ T* endSpot = beginSpot + length;
+ TypeOperations::destruct(beginSpot, endSpot);
+ TypeOperations::moveOverlapping(endSpot, end(), beginSpot);
+ m_size -= length;
+ }
+
+ template<typename T, size_t inlineCapacity>
+ inline T* Vector<T, inlineCapacity>::releaseBuffer()
+ {
+ T* buffer = m_buffer.releaseBuffer();
+ if (inlineCapacity && !buffer && m_size) {
+ // If the vector had some data, but no buffer to release,
+ // that means it was using the inline buffer. In that case,
+ // we create a brand new buffer so the caller always gets one.
+ size_t bytes = m_size * sizeof(T);
+ buffer = static_cast<T*>(fastMalloc(bytes));
+ memcpy(buffer, data(), bytes);
+ }
+ m_size = 0;
+ return buffer;
+ }
+
+ template<typename T, size_t inlineCapacity>
+ void deleteAllValues(const Vector<T, inlineCapacity>& collection)
+ {
+ typedef typename Vector<T, inlineCapacity>::const_iterator iterator;
+ iterator end = collection.end();
+ for (iterator it = collection.begin(); it != end; ++it)
+ delete *it;
+ }
+
+ template<typename T, size_t inlineCapacity>
+ inline void swap(Vector<T, inlineCapacity>& a, Vector<T, inlineCapacity>& b)
+ {
+ a.swap(b);
+ }
+
+ template<typename T, size_t inlineCapacity>
+ bool operator==(const Vector<T, inlineCapacity>& a, const Vector<T, inlineCapacity>& b)
+ {
+ if (a.size() != b.size())
+ return false;
+
+ return VectorTypeOperations<T>::compare(a.data(), b.data(), a.size());
+ }
+
+ template<typename T, size_t inlineCapacity>
+ inline bool operator!=(const Vector<T, inlineCapacity>& a, const Vector<T, inlineCapacity>& b)
+ {
+ return !(a == b);
+ }
+
+
+} // namespace WTF
+
+using WTF::Vector;
+
+#endif // WTF_Vector_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/VectorTraits.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/VectorTraits.h
new file mode 100644
index 0000000..7974b9a
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/VectorTraits.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2006, 2007, 2008 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef WTF_VectorTraits_h
+#define WTF_VectorTraits_h
+
+#include "RefPtr.h"
+#include "TypeTraits.h"
+#include <utility>
+#include <memory>
+
+using std::pair;
+
+namespace WTF {
+
+ template<bool isPod, typename T>
+ class VectorTraitsBase;
+
+ template<typename T>
+ struct VectorTraitsBase<false, T>
+ {
+ static const bool needsDestruction = true;
+ static const bool needsInitialization = true;
+ static const bool canInitializeWithMemset = false;
+ static const bool canMoveWithMemcpy = false;
+ static const bool canCopyWithMemcpy = false;
+ static const bool canFillWithMemset = false;
+ static const bool canCompareWithMemcmp = false;
+ };
+
+ template<typename T>
+ struct VectorTraitsBase<true, T>
+ {
+ static const bool needsDestruction = false;
+ static const bool needsInitialization = false;
+ static const bool canInitializeWithMemset = false;
+ static const bool canMoveWithMemcpy = true;
+ static const bool canCopyWithMemcpy = true;
+ static const bool canFillWithMemset = sizeof(T) == sizeof(char);
+ static const bool canCompareWithMemcmp = true;
+ };
+
+ template<typename T>
+ struct VectorTraits : VectorTraitsBase<IsPod<T>::value, T> { };
+
+ struct SimpleClassVectorTraits
+ {
+ static const bool needsDestruction = true;
+ static const bool needsInitialization = true;
+ static const bool canInitializeWithMemset = true;
+ static const bool canMoveWithMemcpy = true;
+ static const bool canCopyWithMemcpy = false;
+ static const bool canFillWithMemset = false;
+ static const bool canCompareWithMemcmp = true;
+ };
+
+ // we know RefPtr is simple enough that initializing to 0 and moving with memcpy
+ // (and then not destructing the original) will totally work
+ template<typename P>
+ struct VectorTraits<RefPtr<P> > : SimpleClassVectorTraits { };
+
+ template<typename P>
+ struct VectorTraits<std::auto_ptr<P> > : SimpleClassVectorTraits { };
+
+ template<typename First, typename Second>
+ struct VectorTraits<pair<First, Second> >
+ {
+ typedef VectorTraits<First> FirstTraits;
+ typedef VectorTraits<Second> SecondTraits;
+
+ static const bool needsDestruction = FirstTraits::needsDestruction || SecondTraits::needsDestruction;
+ static const bool needsInitialization = FirstTraits::needsInitialization || SecondTraits::needsInitialization;
+ static const bool canInitializeWithMemset = FirstTraits::canInitializeWithMemset && SecondTraits::canInitializeWithMemset;
+ static const bool canMoveWithMemcpy = FirstTraits::canMoveWithMemcpy && SecondTraits::canMoveWithMemcpy;
+ static const bool canCopyWithMemcpy = FirstTraits::canCopyWithMemcpy && SecondTraits::canCopyWithMemcpy;
+ static const bool canFillWithMemset = false;
+ static const bool canCompareWithMemcmp = FirstTraits::canCompareWithMemcmp && SecondTraits::canCompareWithMemcmp;
+ };
+
+} // namespace WTF
+
+using WTF::VectorTraits;
+using WTF::SimpleClassVectorTraits;
+
+#endif // WTF_VectorTraits_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/dtoa.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/dtoa.cpp
new file mode 100644
index 0000000..d75c17a
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/dtoa.cpp
@@ -0,0 +1,2379 @@
+/****************************************************************
+ *
+ * The author of this software is David M. Gay.
+ *
+ * Copyright (c) 1991, 2000, 2001 by Lucent Technologies.
+ * Copyright (C) 2002, 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose without fee is hereby granted, provided that this entire notice
+ * is included in all copies of any software which is or includes a copy
+ * or modification of this software and in all copies of the supporting
+ * documentation for such software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED
+ * WARRANTY. IN PARTICULAR, NEITHER THE AUTHOR NOR LUCENT MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY
+ * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE.
+ *
+ ***************************************************************/
+
+/* Please send bug reports to
+ David M. Gay
+ Bell Laboratories, Room 2C-463
+ 600 Mountain Avenue
+ Murray Hill, NJ 07974-0636
+ U.S.A.
+ dmg@bell-labs.com
+ */
+
+/* On a machine with IEEE extended-precision registers, it is
+ * necessary to specify double-precision (53-bit) rounding precision
+ * before invoking strtod or dtoa. If the machine uses (the equivalent
+ * of) Intel 80x87 arithmetic, the call
+ * _control87(PC_53, MCW_PC);
+ * does this with many compilers. Whether this or another call is
+ * appropriate depends on the compiler; for this to work, it may be
+ * necessary to #include "float.h" or another system-dependent header
+ * file.
+ */
+
+/* strtod for IEEE-arithmetic machines.
+ *
+ * This strtod returns a nearest machine number to the input decimal
+ * string (or sets errno to ERANGE). With IEEE arithmetic, ties are
+ * broken by the IEEE round-even rule. Otherwise ties are broken by
+ * biased rounding (add half and chop).
+ *
+ * Inspired loosely by William D. Clinger's paper "How to Read Floating
+ * Point Numbers Accurately" [Proc. ACM SIGPLAN '90, pp. 92-101].
+ *
+ * Modifications:
+ *
+ * 1. We only require IEEE.
+ * 2. We get by with floating-point arithmetic in a case that
+ * Clinger missed -- when we're computing d * 10^n
+ * for a small integer d and the integer n is not too
+ * much larger than 22 (the maximum integer k for which
+ * we can represent 10^k exactly), we may be able to
+ * compute (d*10^k) * 10^(e-k) with just one roundoff.
+ * 3. Rather than a bit-at-a-time adjustment of the binary
+ * result in the hard case, we use floating-point
+ * arithmetic to determine the adjustment to within
+ * one bit; only in really hard cases do we need to
+ * compute a second residual.
+ * 4. Because of 3., we don't need a large table of powers of 10
+ * for ten-to-e (just some small tables, e.g. of 10^k
+ * for 0 <= k <= 22).
+ */
+
+/*
+ * #define IEEE_8087 for IEEE-arithmetic machines where the least
+ * significant byte has the lowest address.
+ * #define IEEE_MC68k for IEEE-arithmetic machines where the most
+ * significant byte has the lowest address.
+ * #define No_leftright to omit left-right logic in fast floating-point
+ * computation of dtoa.
+ * #define Check_FLT_ROUNDS if FLT_ROUNDS can assume the values 2 or 3
+ * and Honor_FLT_ROUNDS is not #defined.
+ * #define Inaccurate_Divide for IEEE-format with correctly rounded
+ * products but inaccurate quotients, e.g., for Intel i860.
+ * #define USE_LONG_LONG on machines that have a "long long"
+ * integer type (of >= 64 bits), and performance testing shows that
+ * it is faster than 32-bit fallback (which is often not the case
+ * on 32-bit machines). On such machines, you can #define Just_16
+ * to store 16 bits per 32-bit int32_t when doing high-precision integer
+ * arithmetic. Whether this speeds things up or slows things down
+ * depends on the machine and the number being converted.
+ * #define Bad_float_h if your system lacks a float.h or if it does not
+ * define some or all of DBL_DIG, DBL_MAX_10_EXP, DBL_MAX_EXP,
+ * FLT_RADIX, FLT_ROUNDS, and DBL_MAX.
+ * #define INFNAN_CHECK on IEEE systems to cause strtod to check for
+ * Infinity and NaN (case insensitively). On some systems (e.g.,
+ * some HP systems), it may be necessary to #define NAN_WORD0
+ * appropriately -- to the most significant word of a quiet NaN.
+ * (On HP Series 700/800 machines, -DNAN_WORD0=0x7ff40000 works.)
+ * When INFNAN_CHECK is #defined and No_Hex_NaN is not #defined,
+ * strtod also accepts (case insensitively) strings of the form
+ * NaN(x), where x is a string of hexadecimal digits and spaces;
+ * if there is only one string of hexadecimal digits, it is taken
+ * for the 52 fraction bits of the resulting NaN; if there are two
+ * or more strings of hex digits, the first is for the high 20 bits,
+ * the second and subsequent for the low 32 bits, with intervening
+ * white space ignored; but if this results in none of the 52
+ * fraction bits being on (an IEEE Infinity symbol), then NAN_WORD0
+ * and NAN_WORD1 are used instead.
+ * #define NO_IEEE_Scale to disable new (Feb. 1997) logic in strtod that
+ * avoids underflows on inputs whose result does not underflow.
+ * If you #define NO_IEEE_Scale on a machine that uses IEEE-format
+ * floating-point numbers and flushes underflows to zero rather
+ * than implementing gradual underflow, then you must also #define
+ * Sudden_Underflow.
+ * #define YES_ALIAS to permit aliasing certain double values with
+ * arrays of ULongs. This leads to slightly better code with
+ * some compilers and was always used prior to 19990916, but it
+ * is not strictly legal and can cause trouble with aggressively
+ * optimizing compilers (e.g., gcc 2.95.1 under -O2).
+ * #define SET_INEXACT if IEEE arithmetic is being used and extra
+ * computation should be done to set the inexact flag when the
+ * result is inexact and avoid setting inexact when the result
+ * is exact. In this case, dtoa.c must be compiled in
+ * an environment, perhaps provided by #include "dtoa.c" in a
+ * suitable wrapper, that defines two functions,
+ * int get_inexact(void);
+ * void clear_inexact(void);
+ * such that get_inexact() returns a nonzero value if the
+ * inexact bit is already set, and clear_inexact() sets the
+ * inexact bit to 0. When SET_INEXACT is #defined, strtod
+ * also does extra computations to set the underflow and overflow
+ * flags when appropriate (i.e., when the result is tiny and
+ * inexact or when it is a numeric value rounded to +-infinity).
+ * #define NO_ERRNO if strtod should not assign errno = ERANGE when
+ * the result overflows to +-Infinity or underflows to 0.
+ */
+
+#include "config.h"
+#include "dtoa.h"
+
+#if HAVE(ERRNO_H)
+#include <errno.h>
+#else
+#define NO_ERRNO
+#endif
+#include <float.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <wtf/AlwaysInline.h>
+#include <wtf/Assertions.h>
+#include <wtf/FastMalloc.h>
+#include <wtf/Vector.h>
+#include <wtf/Threading.h>
+
+#include <stdio.h>
+
+#if COMPILER(MSVC)
+#pragma warning(disable: 4244)
+#pragma warning(disable: 4245)
+#pragma warning(disable: 4554)
+#endif
+
+#if PLATFORM(BIG_ENDIAN)
+#define IEEE_MC68k
+#elif PLATFORM(MIDDLE_ENDIAN)
+#define IEEE_ARM
+#else
+#define IEEE_8087
+#endif
+
+#define INFNAN_CHECK
+
+#if defined(IEEE_8087) + defined(IEEE_MC68k) + defined(IEEE_ARM) != 1
+Exactly one of IEEE_8087, IEEE_ARM or IEEE_MC68k should be defined.
+#endif
+
+namespace WTF {
+
+#if ENABLE(JSC_MULTIPLE_THREADS)
+Mutex* s_dtoaP5Mutex;
+#endif
+
+typedef union { double d; uint32_t L[2]; } U;
+
+#ifdef YES_ALIAS
+#define dval(x) x
+#ifdef IEEE_8087
+#define word0(x) ((uint32_t*)&x)[1]
+#define word1(x) ((uint32_t*)&x)[0]
+#else
+#define word0(x) ((uint32_t*)&x)[0]
+#define word1(x) ((uint32_t*)&x)[1]
+#endif
+#else
+#ifdef IEEE_8087
+#define word0(x) (x)->L[1]
+#define word1(x) (x)->L[0]
+#else
+#define word0(x) (x)->L[0]
+#define word1(x) (x)->L[1]
+#endif
+#define dval(x) (x)->d
+#endif
+
+/* The following definition of Storeinc is appropriate for MIPS processors.
+ * An alternative that might be better on some machines is
+ * #define Storeinc(a,b,c) (*a++ = b << 16 | c & 0xffff)
+ */
+#if defined(IEEE_8087) || defined(IEEE_ARM)
+#define Storeinc(a,b,c) (((unsigned short*)a)[1] = (unsigned short)b, ((unsigned short*)a)[0] = (unsigned short)c, a++)
+#else
+#define Storeinc(a,b,c) (((unsigned short*)a)[0] = (unsigned short)b, ((unsigned short*)a)[1] = (unsigned short)c, a++)
+#endif
+
+#define Exp_shift 20
+#define Exp_shift1 20
+#define Exp_msk1 0x100000
+#define Exp_msk11 0x100000
+#define Exp_mask 0x7ff00000
+#define P 53
+#define Bias 1023
+#define Emin (-1022)
+#define Exp_1 0x3ff00000
+#define Exp_11 0x3ff00000
+#define Ebits 11
+#define Frac_mask 0xfffff
+#define Frac_mask1 0xfffff
+#define Ten_pmax 22
+#define Bletch 0x10
+#define Bndry_mask 0xfffff
+#define Bndry_mask1 0xfffff
+#define LSB 1
+#define Sign_bit 0x80000000
+#define Log2P 1
+#define Tiny0 0
+#define Tiny1 1
+#define Quick_max 14
+#define Int_max 14
+
+#if !defined(NO_IEEE_Scale)
+#undef Avoid_Underflow
+#define Avoid_Underflow
+#endif
+
+#if !defined(Flt_Rounds)
+#if defined(FLT_ROUNDS)
+#define Flt_Rounds FLT_ROUNDS
+#else
+#define Flt_Rounds 1
+#endif
+#endif /*Flt_Rounds*/
+
+
+#define rounded_product(a,b) a *= b
+#define rounded_quotient(a,b) a /= b
+
+#define Big0 (Frac_mask1 | Exp_msk1 * (DBL_MAX_EXP + Bias - 1))
+#define Big1 0xffffffff
+
+
+// FIXME: we should remove non-Pack_32 mode since it is unused and unmaintained
+#ifndef Pack_32
+#define Pack_32
+#endif
+
+#if PLATFORM(PPC64) || PLATFORM(X86_64)
+// 64-bit emulation provided by the compiler is likely to be slower than dtoa own code on 32-bit hardware.
+#define USE_LONG_LONG
+#endif
+
+#ifndef USE_LONG_LONG
+#ifdef Just_16
+#undef Pack_32
+/* When Pack_32 is not defined, we store 16 bits per 32-bit int32_t.
+ * This makes some inner loops simpler and sometimes saves work
+ * during multiplications, but it often seems to make things slightly
+ * slower. Hence the default is now to store 32 bits per int32_t.
+ */
+#endif
+#endif
+
+#define Kmax 15
+
+struct BigInt {
+ BigInt() : sign(0) { }
+ int sign;
+
+ void clear()
+ {
+ sign = 0;
+ m_words.clear();
+ }
+
+ size_t size() const
+ {
+ return m_words.size();
+ }
+
+ void resize(size_t s)
+ {
+ m_words.resize(s);
+ }
+
+ uint32_t* words()
+ {
+ return m_words.data();
+ }
+
+ const uint32_t* words() const
+ {
+ return m_words.data();
+ }
+
+ void append(uint32_t w)
+ {
+ m_words.append(w);
+ }
+
+ Vector<uint32_t, 16> m_words;
+};
+
+static void multadd(BigInt& b, int m, int a) /* multiply by m and add a */
+{
+#ifdef USE_LONG_LONG
+ unsigned long long carry;
+#else
+ uint32_t carry;
+#endif
+
+ int wds = b.size();
+ uint32_t* x = b.words();
+ int i = 0;
+ carry = a;
+ do {
+#ifdef USE_LONG_LONG
+ unsigned long long y = *x * (unsigned long long)m + carry;
+ carry = y >> 32;
+ *x++ = (uint32_t)y & 0xffffffffUL;
+#else
+#ifdef Pack_32
+ uint32_t xi = *x;
+ uint32_t y = (xi & 0xffff) * m + carry;
+ uint32_t z = (xi >> 16) * m + (y >> 16);
+ carry = z >> 16;
+ *x++ = (z << 16) + (y & 0xffff);
+#else
+ uint32_t y = *x * m + carry;
+ carry = y >> 16;
+ *x++ = y & 0xffff;
+#endif
+#endif
+ } while (++i < wds);
+
+ if (carry)
+ b.append((uint32_t)carry);
+}
+
+static void s2b(BigInt& b, const char* s, int nd0, int nd, uint32_t y9)
+{
+ int k;
+ int32_t y;
+ int32_t x = (nd + 8) / 9;
+
+ for (k = 0, y = 1; x > y; y <<= 1, k++) { }
+#ifdef Pack_32
+ b.sign = 0;
+ b.resize(1);
+ b.words()[0] = y9;
+#else
+ b.sign = 0;
+ b.resize((b->x[1] = y9 >> 16) ? 2 : 1);
+ b.words()[0] = y9 & 0xffff;
+#endif
+
+ int i = 9;
+ if (9 < nd0) {
+ s += 9;
+ do {
+ multadd(b, 10, *s++ - '0');
+ } while (++i < nd0);
+ s++;
+ } else
+ s += 10;
+ for (; i < nd; i++)
+ multadd(b, 10, *s++ - '0');
+}
+
+static int hi0bits(uint32_t x)
+{
+ int k = 0;
+
+ if (!(x & 0xffff0000)) {
+ k = 16;
+ x <<= 16;
+ }
+ if (!(x & 0xff000000)) {
+ k += 8;
+ x <<= 8;
+ }
+ if (!(x & 0xf0000000)) {
+ k += 4;
+ x <<= 4;
+ }
+ if (!(x & 0xc0000000)) {
+ k += 2;
+ x <<= 2;
+ }
+ if (!(x & 0x80000000)) {
+ k++;
+ if (!(x & 0x40000000))
+ return 32;
+ }
+ return k;
+}
+
+static int lo0bits (uint32_t* y)
+{
+ int k;
+ uint32_t x = *y;
+
+ if (x & 7) {
+ if (x & 1)
+ return 0;
+ if (x & 2) {
+ *y = x >> 1;
+ return 1;
+ }
+ *y = x >> 2;
+ return 2;
+ }
+ k = 0;
+ if (!(x & 0xffff)) {
+ k = 16;
+ x >>= 16;
+ }
+ if (!(x & 0xff)) {
+ k += 8;
+ x >>= 8;
+ }
+ if (!(x & 0xf)) {
+ k += 4;
+ x >>= 4;
+ }
+ if (!(x & 0x3)) {
+ k += 2;
+ x >>= 2;
+ }
+ if (!(x & 1)) {
+ k++;
+ x >>= 1;
+ if (!x & 1)
+ return 32;
+ }
+ *y = x;
+ return k;
+}
+
+static void i2b(BigInt& b, int i)
+{
+ b.sign = 0;
+ b.resize(1);
+ b.words()[0] = i;
+}
+
+static void mult(BigInt& aRef, const BigInt& bRef)
+{
+ const BigInt* a = &aRef;
+ const BigInt* b = &bRef;
+ BigInt c;
+ int wa, wb, wc;
+ const uint32_t *x = 0, *xa, *xb, *xae, *xbe;
+ uint32_t *xc, *xc0;
+ uint32_t y;
+#ifdef USE_LONG_LONG
+ unsigned long long carry, z;
+#else
+ uint32_t carry, z;
+#endif
+
+ if (a->size() < b->size()) {
+ const BigInt* tmp = a;
+ a = b;
+ b = tmp;
+ }
+
+ wa = a->size();
+ wb = b->size();
+ wc = wa + wb;
+ c.resize(wc);
+
+ for (xc = c.words(), xa = xc + wc; xc < xa; xc++)
+ *xc = 0;
+ xa = a->words();
+ xae = xa + wa;
+ xb = b->words();
+ xbe = xb + wb;
+ xc0 = c.words();
+#ifdef USE_LONG_LONG
+ for (; xb < xbe; xc0++) {
+ if ((y = *xb++)) {
+ x = xa;
+ xc = xc0;
+ carry = 0;
+ do {
+ z = *x++ * (unsigned long long)y + *xc + carry;
+ carry = z >> 32;
+ *xc++ = (uint32_t)z & 0xffffffffUL;
+ } while (x < xae);
+ *xc = (uint32_t)carry;
+ }
+ }
+#else
+#ifdef Pack_32
+ for (; xb < xbe; xb++, xc0++) {
+ if ((y = *xb & 0xffff)) {
+ x = xa;
+ xc = xc0;
+ carry = 0;
+ do {
+ z = (*x & 0xffff) * y + (*xc & 0xffff) + carry;
+ carry = z >> 16;
+ uint32_t z2 = (*x++ >> 16) * y + (*xc >> 16) + carry;
+ carry = z2 >> 16;
+ Storeinc(xc, z2, z);
+ } while (x < xae);
+ *xc = carry;
+ }
+ if ((y = *xb >> 16)) {
+ x = xa;
+ xc = xc0;
+ carry = 0;
+ uint32_t z2 = *xc;
+ do {
+ z = (*x & 0xffff) * y + (*xc >> 16) + carry;
+ carry = z >> 16;
+ Storeinc(xc, z, z2);
+ z2 = (*x++ >> 16) * y + (*xc & 0xffff) + carry;
+ carry = z2 >> 16;
+ } while (x < xae);
+ *xc = z2;
+ }
+ }
+#else
+ for(; xb < xbe; xc0++) {
+ if ((y = *xb++)) {
+ x = xa;
+ xc = xc0;
+ carry = 0;
+ do {
+ z = *x++ * y + *xc + carry;
+ carry = z >> 16;
+ *xc++ = z & 0xffff;
+ } while (x < xae);
+ *xc = carry;
+ }
+ }
+#endif
+#endif
+ for (xc0 = c.words(), xc = xc0 + wc; wc > 0 && !*--xc; --wc) { }
+ c.resize(wc);
+ aRef = c;
+}
+
+struct P5Node {
+ BigInt val;
+ P5Node* next;
+};
+
+static P5Node* p5s;
+static int p5s_count;
+
+static ALWAYS_INLINE void pow5mult(BigInt& b, int k)
+{
+ static int p05[3] = { 5, 25, 125 };
+
+ if (int i = k & 3)
+ multadd(b, p05[i - 1], 0);
+
+ if (!(k >>= 2))
+ return;
+
+#if ENABLE(JSC_MULTIPLE_THREADS)
+ s_dtoaP5Mutex->lock();
+#endif
+ P5Node* p5 = p5s;
+
+ if (!p5) {
+ /* first time */
+ p5 = new P5Node;
+ i2b(p5->val, 625);
+ p5->next = 0;
+ p5s = p5;
+ p5s_count = 1;
+ }
+
+ int p5s_count_local = p5s_count;
+#if ENABLE(JSC_MULTIPLE_THREADS)
+ s_dtoaP5Mutex->unlock();
+#endif
+ int p5s_used = 0;
+
+ for (;;) {
+ if (k & 1)
+ mult(b, p5->val);
+
+ if (!(k >>= 1))
+ break;
+
+ if (++p5s_used == p5s_count_local) {
+#if ENABLE(JSC_MULTIPLE_THREADS)
+ s_dtoaP5Mutex->lock();
+#endif
+ if (p5s_used == p5s_count) {
+ ASSERT(!p5->next);
+ p5->next = new P5Node;
+ p5->next->next = 0;
+ p5->next->val = p5->val;
+ mult(p5->next->val, p5->next->val);
+ ++p5s_count;
+ }
+
+ p5s_count_local = p5s_count;
+#if ENABLE(JSC_MULTIPLE_THREADS)
+ s_dtoaP5Mutex->unlock();
+#endif
+ }
+ p5 = p5->next;
+ }
+}
+
+static ALWAYS_INLINE void lshift(BigInt& b, int k)
+{
+#ifdef Pack_32
+ int n = k >> 5;
+#else
+ int n = k >> 4;
+#endif
+
+ int origSize = b.size();
+ int n1 = n + origSize + 1;
+
+ if (k &= 0x1f)
+ b.resize(b.size() + n + 1);
+ else
+ b.resize(b.size() + n);
+
+ const uint32_t* srcStart = b.words();
+ uint32_t* dstStart = b.words();
+ const uint32_t* src = srcStart + origSize - 1;
+ uint32_t* dst = dstStart + n1 - 1;
+#ifdef Pack_32
+ if (k) {
+ uint32_t hiSubword = 0;
+ int s = 32 - k;
+ for (; src >= srcStart; --src) {
+ *dst-- = hiSubword | *src >> s;
+ hiSubword = *src << k;
+ }
+ *dst = hiSubword;
+ ASSERT(dst == dstStart + n);
+
+ b.resize(origSize + n + (b.words()[n1 - 1] != 0));
+ }
+#else
+ if (k &= 0xf) {
+ uint32_t hiSubword = 0;
+ int s = 16 - k;
+ for (; src >= srcStart; --src) {
+ *dst-- = hiSubword | *src >> s;
+ hiSubword = (*src << k) & 0xffff;
+ }
+ *dst = hiSubword;
+ ASSERT(dst == dstStart + n);
+ result->wds = b->wds + n + (result->x[n1 - 1] != 0);
+ }
+ #endif
+ else {
+ do {
+ *--dst = *src--;
+ } while (src >= srcStart);
+ }
+ for (dst = dstStart + n; dst != dstStart; )
+ *--dst = 0;
+
+ ASSERT(b.size() <= 1 || b.words()[b.size() - 1]);
+}
+
+static int cmp(const BigInt& a, const BigInt& b)
+{
+ const uint32_t *xa, *xa0, *xb, *xb0;
+ int i, j;
+
+ i = a.size();
+ j = b.size();
+ ASSERT(i <= 1 || a.words()[i - 1]);
+ ASSERT(j <= 1 || b.words()[j - 1]);
+ if (i -= j)
+ return i;
+ xa0 = a.words();
+ xa = xa0 + j;
+ xb0 = b.words();
+ xb = xb0 + j;
+ for (;;) {
+ if (*--xa != *--xb)
+ return *xa < *xb ? -1 : 1;
+ if (xa <= xa0)
+ break;
+ }
+ return 0;
+}
+
+static ALWAYS_INLINE void diff(BigInt& c, const BigInt& aRef, const BigInt& bRef)
+{
+ const BigInt* a = &aRef;
+ const BigInt* b = &bRef;
+ int i, wa, wb;
+ uint32_t *xc;
+
+ i = cmp(*a, *b);
+ if (!i) {
+ c.sign = 0;
+ c.resize(1);
+ c.words()[0] = 0;
+ return;
+ }
+ if (i < 0) {
+ const BigInt* tmp = a;
+ a = b;
+ b = tmp;
+ i = 1;
+ } else
+ i = 0;
+
+ wa = a->size();
+ const uint32_t* xa = a->words();
+ const uint32_t* xae = xa + wa;
+ wb = b->size();
+ const uint32_t* xb = b->words();
+ const uint32_t* xbe = xb + wb;
+
+ c.resize(wa);
+ c.sign = i;
+ xc = c.words();
+#ifdef USE_LONG_LONG
+ unsigned long long borrow = 0;
+ do {
+ unsigned long long y = (unsigned long long)*xa++ - *xb++ - borrow;
+ borrow = y >> 32 & (uint32_t)1;
+ *xc++ = (uint32_t)y & 0xffffffffUL;
+ } while (xb < xbe);
+ while (xa < xae) {
+ unsigned long long y = *xa++ - borrow;
+ borrow = y >> 32 & (uint32_t)1;
+ *xc++ = (uint32_t)y & 0xffffffffUL;
+ }
+#else
+ uint32_t borrow = 0;
+#ifdef Pack_32
+ do {
+ uint32_t y = (*xa & 0xffff) - (*xb & 0xffff) - borrow;
+ borrow = (y & 0x10000) >> 16;
+ uint32_t z = (*xa++ >> 16) - (*xb++ >> 16) - borrow;
+ borrow = (z & 0x10000) >> 16;
+ Storeinc(xc, z, y);
+ } while (xb < xbe);
+ while (xa < xae) {
+ uint32_t y = (*xa & 0xffff) - borrow;
+ borrow = (y & 0x10000) >> 16;
+ uint32_t z = (*xa++ >> 16) - borrow;
+ borrow = (z & 0x10000) >> 16;
+ Storeinc(xc, z, y);
+ }
+#else
+ do {
+ uint32_t y = *xa++ - *xb++ - borrow;
+ borrow = (y & 0x10000) >> 16;
+ *xc++ = y & 0xffff;
+ } while (xb < xbe);
+ while (xa < xae) {
+ uint32_t y = *xa++ - borrow;
+ borrow = (y & 0x10000) >> 16;
+ *xc++ = y & 0xffff;
+ }
+#endif
+#endif
+ while (!*--xc)
+ wa--;
+ c.resize(wa);
+}
+
+static double ulp(U *x)
+{
+ register int32_t L;
+ U u;
+
+ L = (word0(x) & Exp_mask) - (P - 1) * Exp_msk1;
+#ifndef Avoid_Underflow
+#ifndef Sudden_Underflow
+ if (L > 0) {
+#endif
+#endif
+ word0(&u) = L;
+ word1(&u) = 0;
+#ifndef Avoid_Underflow
+#ifndef Sudden_Underflow
+ } else {
+ L = -L >> Exp_shift;
+ if (L < Exp_shift) {
+ word0(&u) = 0x80000 >> L;
+ word1(&u) = 0;
+ } else {
+ word0(&u) = 0;
+ L -= Exp_shift;
+ word1(&u) = L >= 31 ? 1 : 1 << 31 - L;
+ }
+ }
+#endif
+#endif
+ return dval(&u);
+}
+
+static double b2d(const BigInt& a, int* e)
+{
+ const uint32_t* xa;
+ const uint32_t* xa0;
+ uint32_t w;
+ uint32_t y;
+ uint32_t z;
+ int k;
+ U d;
+
+#define d0 word0(&d)
+#define d1 word1(&d)
+
+ xa0 = a.words();
+ xa = xa0 + a.size();
+ y = *--xa;
+ ASSERT(y);
+ k = hi0bits(y);
+ *e = 32 - k;
+#ifdef Pack_32
+ if (k < Ebits) {
+ d0 = Exp_1 | (y >> (Ebits - k));
+ w = xa > xa0 ? *--xa : 0;
+ d1 = (y << (32 - Ebits + k)) | (w >> (Ebits - k));
+ goto ret_d;
+ }
+ z = xa > xa0 ? *--xa : 0;
+ if (k -= Ebits) {
+ d0 = Exp_1 | (y << k) | (z >> (32 - k));
+ y = xa > xa0 ? *--xa : 0;
+ d1 = (z << k) | (y >> (32 - k));
+ } else {
+ d0 = Exp_1 | y;
+ d1 = z;
+ }
+#else
+ if (k < Ebits + 16) {
+ z = xa > xa0 ? *--xa : 0;
+ d0 = Exp_1 | y << k - Ebits | z >> Ebits + 16 - k;
+ w = xa > xa0 ? *--xa : 0;
+ y = xa > xa0 ? *--xa : 0;
+ d1 = z << k + 16 - Ebits | w << k - Ebits | y >> 16 + Ebits - k;
+ goto ret_d;
+ }
+ z = xa > xa0 ? *--xa : 0;
+ w = xa > xa0 ? *--xa : 0;
+ k -= Ebits + 16;
+ d0 = Exp_1 | y << k + 16 | z << k | w >> 16 - k;
+ y = xa > xa0 ? *--xa : 0;
+ d1 = w << k + 16 | y << k;
+#endif
+ret_d:
+#undef d0
+#undef d1
+ return dval(&d);
+}
+
+static ALWAYS_INLINE void d2b(BigInt& b, U* d, int* e, int* bits)
+{
+ int de, k;
+ uint32_t *x, y, z;
+#ifndef Sudden_Underflow
+ int i;
+#endif
+#define d0 word0(d)
+#define d1 word1(d)
+
+ b.sign = 0;
+#ifdef Pack_32
+ b.resize(1);
+#else
+ b.resize(2);
+#endif
+ x = b.words();
+
+ z = d0 & Frac_mask;
+ d0 &= 0x7fffffff; /* clear sign bit, which we ignore */
+#ifdef Sudden_Underflow
+ de = (int)(d0 >> Exp_shift);
+#else
+ if ((de = (int)(d0 >> Exp_shift)))
+ z |= Exp_msk1;
+#endif
+#ifdef Pack_32
+ if ((y = d1)) {
+ if ((k = lo0bits(&y))) {
+ x[0] = y | (z << (32 - k));
+ z >>= k;
+ } else
+ x[0] = y;
+ if (z) {
+ b.resize(2);
+ x[1] = z;
+ }
+
+#ifndef Sudden_Underflow
+ i = b.size();
+#endif
+ } else {
+ k = lo0bits(&z);
+ x[0] = z;
+#ifndef Sudden_Underflow
+ i = 1;
+#endif
+ b.resize(1);
+ k += 32;
+ }
+#else
+ if ((y = d1)) {
+ if ((k = lo0bits(&y))) {
+ if (k >= 16) {
+ x[0] = y | z << 32 - k & 0xffff;
+ x[1] = z >> k - 16 & 0xffff;
+ x[2] = z >> k;
+ i = 2;
+ } else {
+ x[0] = y & 0xffff;
+ x[1] = y >> 16 | z << 16 - k & 0xffff;
+ x[2] = z >> k & 0xffff;
+ x[3] = z >> k + 16;
+ i = 3;
+ }
+ } else {
+ x[0] = y & 0xffff;
+ x[1] = y >> 16;
+ x[2] = z & 0xffff;
+ x[3] = z >> 16;
+ i = 3;
+ }
+ } else {
+ k = lo0bits(&z);
+ if (k >= 16) {
+ x[0] = z;
+ i = 0;
+ } else {
+ x[0] = z & 0xffff;
+ x[1] = z >> 16;
+ i = 1;
+ }
+ k += 32;
+ } while (!x[i])
+ --i;
+ b->resize(i + 1);
+#endif
+#ifndef Sudden_Underflow
+ if (de) {
+#endif
+ *e = de - Bias - (P - 1) + k;
+ *bits = P - k;
+#ifndef Sudden_Underflow
+ } else {
+ *e = de - Bias - (P - 1) + 1 + k;
+#ifdef Pack_32
+ *bits = (32 * i) - hi0bits(x[i - 1]);
+#else
+ *bits = (i + 2) * 16 - hi0bits(x[i]);
+#endif
+ }
+#endif
+}
+#undef d0
+#undef d1
+
+static double ratio(const BigInt& a, const BigInt& b)
+{
+ U da, db;
+ int k, ka, kb;
+
+ dval(&da) = b2d(a, &ka);
+ dval(&db) = b2d(b, &kb);
+#ifdef Pack_32
+ k = ka - kb + 32 * (a.size() - b.size());
+#else
+ k = ka - kb + 16 * (a.size() - b.size());
+#endif
+ if (k > 0)
+ word0(&da) += k * Exp_msk1;
+ else {
+ k = -k;
+ word0(&db) += k * Exp_msk1;
+ }
+ return dval(&da) / dval(&db);
+}
+
+static const double tens[] = {
+ 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
+ 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
+ 1e20, 1e21, 1e22
+};
+
+static const double bigtens[] = { 1e16, 1e32, 1e64, 1e128, 1e256 };
+static const double tinytens[] = { 1e-16, 1e-32, 1e-64, 1e-128,
+#ifdef Avoid_Underflow
+ 9007199254740992. * 9007199254740992.e-256
+ /* = 2^106 * 1e-53 */
+#else
+ 1e-256
+#endif
+};
+
+/* The factor of 2^53 in tinytens[4] helps us avoid setting the underflow */
+/* flag unnecessarily. It leads to a song and dance at the end of strtod. */
+#define Scale_Bit 0x10
+#define n_bigtens 5
+
+#if defined(INFNAN_CHECK)
+
+#ifndef NAN_WORD0
+#define NAN_WORD0 0x7ff80000
+#endif
+
+#ifndef NAN_WORD1
+#define NAN_WORD1 0
+#endif
+
+static int match(const char** sp, const char* t)
+{
+ int c, d;
+ const char* s = *sp;
+
+ while ((d = *t++)) {
+ if ((c = *++s) >= 'A' && c <= 'Z')
+ c += 'a' - 'A';
+ if (c != d)
+ return 0;
+ }
+ *sp = s + 1;
+ return 1;
+}
+
+#ifndef No_Hex_NaN
+static void hexnan(U* rvp, const char** sp)
+{
+ uint32_t c, x[2];
+ const char* s;
+ int havedig, udx0, xshift;
+
+ x[0] = x[1] = 0;
+ havedig = xshift = 0;
+ udx0 = 1;
+ s = *sp;
+ while ((c = *(const unsigned char*)++s)) {
+ if (c >= '0' && c <= '9')
+ c -= '0';
+ else if (c >= 'a' && c <= 'f')
+ c += 10 - 'a';
+ else if (c >= 'A' && c <= 'F')
+ c += 10 - 'A';
+ else if (c <= ' ') {
+ if (udx0 && havedig) {
+ udx0 = 0;
+ xshift = 1;
+ }
+ continue;
+ } else if (/*(*/ c == ')' && havedig) {
+ *sp = s + 1;
+ break;
+ } else
+ return; /* invalid form: don't change *sp */
+ havedig = 1;
+ if (xshift) {
+ xshift = 0;
+ x[0] = x[1];
+ x[1] = 0;
+ }
+ if (udx0)
+ x[0] = (x[0] << 4) | (x[1] >> 28);
+ x[1] = (x[1] << 4) | c;
+ }
+ if ((x[0] &= 0xfffff) || x[1]) {
+ word0(rvp) = Exp_mask | x[0];
+ word1(rvp) = x[1];
+ }
+}
+#endif /*No_Hex_NaN*/
+#endif /* INFNAN_CHECK */
+
+double strtod(const char* s00, char** se)
+{
+#ifdef Avoid_Underflow
+ int scale;
+#endif
+ int bb2, bb5, bbe, bd2, bd5, bbbits, bs2, c, dsign,
+ e, e1, esign, i, j, k, nd, nd0, nf, nz, nz0, sign;
+ const char *s, *s0, *s1;
+ double aadj, aadj1;
+ U aadj2, adj, rv, rv0;
+ int32_t L;
+ uint32_t y, z;
+ BigInt bb, bb1, bd, bd0, bs, delta;
+#ifdef SET_INEXACT
+ int inexact, oldinexact;
+#endif
+
+ sign = nz0 = nz = 0;
+ dval(&rv) = 0;
+ for (s = s00; ; s++)
+ switch (*s) {
+ case '-':
+ sign = 1;
+ /* no break */
+ case '+':
+ if (*++s)
+ goto break2;
+ /* no break */
+ case 0:
+ goto ret0;
+ case '\t':
+ case '\n':
+ case '\v':
+ case '\f':
+ case '\r':
+ case ' ':
+ continue;
+ default:
+ goto break2;
+ }
+break2:
+ if (*s == '0') {
+ nz0 = 1;
+ while (*++s == '0') { }
+ if (!*s)
+ goto ret;
+ }
+ s0 = s;
+ y = z = 0;
+ for (nd = nf = 0; (c = *s) >= '0' && c <= '9'; nd++, s++)
+ if (nd < 9)
+ y = (10 * y) + c - '0';
+ else if (nd < 16)
+ z = (10 * z) + c - '0';
+ nd0 = nd;
+ if (c == '.') {
+ c = *++s;
+ if (!nd) {
+ for (; c == '0'; c = *++s)
+ nz++;
+ if (c > '0' && c <= '9') {
+ s0 = s;
+ nf += nz;
+ nz = 0;
+ goto have_dig;
+ }
+ goto dig_done;
+ }
+ for (; c >= '0' && c <= '9'; c = *++s) {
+have_dig:
+ nz++;
+ if (c -= '0') {
+ nf += nz;
+ for (i = 1; i < nz; i++)
+ if (nd++ < 9)
+ y *= 10;
+ else if (nd <= DBL_DIG + 1)
+ z *= 10;
+ if (nd++ < 9)
+ y = (10 * y) + c;
+ else if (nd <= DBL_DIG + 1)
+ z = (10 * z) + c;
+ nz = 0;
+ }
+ }
+ }
+dig_done:
+ e = 0;
+ if (c == 'e' || c == 'E') {
+ if (!nd && !nz && !nz0) {
+ goto ret0;
+ }
+ s00 = s;
+ esign = 0;
+ switch (c = *++s) {
+ case '-':
+ esign = 1;
+ case '+':
+ c = *++s;
+ }
+ if (c >= '0' && c <= '9') {
+ while (c == '0')
+ c = *++s;
+ if (c > '0' && c <= '9') {
+ L = c - '0';
+ s1 = s;
+ while ((c = *++s) >= '0' && c <= '9')
+ L = (10 * L) + c - '0';
+ if (s - s1 > 8 || L > 19999)
+ /* Avoid confusion from exponents
+ * so large that e might overflow.
+ */
+ e = 19999; /* safe for 16 bit ints */
+ else
+ e = (int)L;
+ if (esign)
+ e = -e;
+ } else
+ e = 0;
+ } else
+ s = s00;
+ }
+ if (!nd) {
+ if (!nz && !nz0) {
+#ifdef INFNAN_CHECK
+ /* Check for Nan and Infinity */
+ switch(c) {
+ case 'i':
+ case 'I':
+ if (match(&s,"nf")) {
+ --s;
+ if (!match(&s,"inity"))
+ ++s;
+ word0(&rv) = 0x7ff00000;
+ word1(&rv) = 0;
+ goto ret;
+ }
+ break;
+ case 'n':
+ case 'N':
+ if (match(&s, "an")) {
+ word0(&rv) = NAN_WORD0;
+ word1(&rv) = NAN_WORD1;
+#ifndef No_Hex_NaN
+ if (*s == '(') /*)*/
+ hexnan(&rv, &s);
+#endif
+ goto ret;
+ }
+ }
+#endif /* INFNAN_CHECK */
+ret0:
+ s = s00;
+ sign = 0;
+ }
+ goto ret;
+ }
+ e1 = e -= nf;
+
+ /* Now we have nd0 digits, starting at s0, followed by a
+ * decimal point, followed by nd-nd0 digits. The number we're
+ * after is the integer represented by those digits times
+ * 10**e */
+
+ if (!nd0)
+ nd0 = nd;
+ k = nd < DBL_DIG + 1 ? nd : DBL_DIG + 1;
+ dval(&rv) = y;
+ if (k > 9) {
+#ifdef SET_INEXACT
+ if (k > DBL_DIG)
+ oldinexact = get_inexact();
+#endif
+ dval(&rv) = tens[k - 9] * dval(&rv) + z;
+ }
+ if (nd <= DBL_DIG && Flt_Rounds == 1) {
+ if (!e)
+ goto ret;
+ if (e > 0) {
+ if (e <= Ten_pmax) {
+ /* rv = */ rounded_product(dval(&rv), tens[e]);
+ goto ret;
+ }
+ i = DBL_DIG - nd;
+ if (e <= Ten_pmax + i) {
+ /* A fancier test would sometimes let us do
+ * this for larger i values.
+ */
+ e -= i;
+ dval(&rv) *= tens[i];
+ /* rv = */ rounded_product(dval(&rv), tens[e]);
+ goto ret;
+ }
+ }
+#ifndef Inaccurate_Divide
+ else if (e >= -Ten_pmax) {
+ /* rv = */ rounded_quotient(dval(&rv), tens[-e]);
+ goto ret;
+ }
+#endif
+ }
+ e1 += nd - k;
+
+#ifdef SET_INEXACT
+ inexact = 1;
+ if (k <= DBL_DIG)
+ oldinexact = get_inexact();
+#endif
+#ifdef Avoid_Underflow
+ scale = 0;
+#endif
+
+ /* Get starting approximation = rv * 10**e1 */
+
+ if (e1 > 0) {
+ if ((i = e1 & 15))
+ dval(&rv) *= tens[i];
+ if (e1 &= ~15) {
+ if (e1 > DBL_MAX_10_EXP) {
+ovfl:
+#ifndef NO_ERRNO
+ errno = ERANGE;
+#endif
+ /* Can't trust HUGE_VAL */
+ word0(&rv) = Exp_mask;
+ word1(&rv) = 0;
+#ifdef SET_INEXACT
+ /* set overflow bit */
+ dval(&rv0) = 1e300;
+ dval(&rv0) *= dval(&rv0);
+#endif
+ goto ret;
+ }
+ e1 >>= 4;
+ for (j = 0; e1 > 1; j++, e1 >>= 1)
+ if (e1 & 1)
+ dval(&rv) *= bigtens[j];
+ /* The last multiplication could overflow. */
+ word0(&rv) -= P * Exp_msk1;
+ dval(&rv) *= bigtens[j];
+ if ((z = word0(&rv) & Exp_mask) > Exp_msk1 * (DBL_MAX_EXP + Bias - P))
+ goto ovfl;
+ if (z > Exp_msk1 * (DBL_MAX_EXP + Bias - 1 - P)) {
+ /* set to largest number */
+ /* (Can't trust DBL_MAX) */
+ word0(&rv) = Big0;
+ word1(&rv) = Big1;
+ } else
+ word0(&rv) += P * Exp_msk1;
+ }
+ } else if (e1 < 0) {
+ e1 = -e1;
+ if ((i = e1 & 15))
+ dval(&rv) /= tens[i];
+ if (e1 >>= 4) {
+ if (e1 >= 1 << n_bigtens)
+ goto undfl;
+#ifdef Avoid_Underflow
+ if (e1 & Scale_Bit)
+ scale = 2 * P;
+ for (j = 0; e1 > 0; j++, e1 >>= 1)
+ if (e1 & 1)
+ dval(&rv) *= tinytens[j];
+ if (scale && (j = (2 * P) + 1 - ((word0(&rv) & Exp_mask) >> Exp_shift)) > 0) {
+ /* scaled rv is denormal; zap j low bits */
+ if (j >= 32) {
+ word1(&rv) = 0;
+ if (j >= 53)
+ word0(&rv) = (P + 2) * Exp_msk1;
+ else
+ word0(&rv) &= 0xffffffff << (j - 32);
+ } else
+ word1(&rv) &= 0xffffffff << j;
+ }
+#else
+ for (j = 0; e1 > 1; j++, e1 >>= 1)
+ if (e1 & 1)
+ dval(&rv) *= tinytens[j];
+ /* The last multiplication could underflow. */
+ dval(&rv0) = dval(&rv);
+ dval(&rv) *= tinytens[j];
+ if (!dval(&rv)) {
+ dval(&rv) = 2. * dval(&rv0);
+ dval(&rv) *= tinytens[j];
+#endif
+ if (!dval(&rv)) {
+undfl:
+ dval(&rv) = 0.;
+#ifndef NO_ERRNO
+ errno = ERANGE;
+#endif
+ goto ret;
+ }
+#ifndef Avoid_Underflow
+ word0(&rv) = Tiny0;
+ word1(&rv) = Tiny1;
+ /* The refinement below will clean
+ * this approximation up.
+ */
+ }
+#endif
+ }
+ }
+
+ /* Now the hard part -- adjusting rv to the correct value.*/
+
+ /* Put digits into bd: true value = bd * 10^e */
+
+ s2b(bd0, s0, nd0, nd, y);
+
+ for (;;) {
+ bd = bd0;
+ d2b(bb, &rv, &bbe, &bbbits); /* rv = bb * 2^bbe */
+ i2b(bs, 1);
+
+ if (e >= 0) {
+ bb2 = bb5 = 0;
+ bd2 = bd5 = e;
+ } else {
+ bb2 = bb5 = -e;
+ bd2 = bd5 = 0;
+ }
+ if (bbe >= 0)
+ bb2 += bbe;
+ else
+ bd2 -= bbe;
+ bs2 = bb2;
+#ifdef Avoid_Underflow
+ j = bbe - scale;
+ i = j + bbbits - 1; /* logb(rv) */
+ if (i < Emin) /* denormal */
+ j += P - Emin;
+ else
+ j = P + 1 - bbbits;
+#else /*Avoid_Underflow*/
+#ifdef Sudden_Underflow
+ j = P + 1 - bbbits;
+#else /*Sudden_Underflow*/
+ j = bbe;
+ i = j + bbbits - 1; /* logb(rv) */
+ if (i < Emin) /* denormal */
+ j += P - Emin;
+ else
+ j = P + 1 - bbbits;
+#endif /*Sudden_Underflow*/
+#endif /*Avoid_Underflow*/
+ bb2 += j;
+ bd2 += j;
+#ifdef Avoid_Underflow
+ bd2 += scale;
+#endif
+ i = bb2 < bd2 ? bb2 : bd2;
+ if (i > bs2)
+ i = bs2;
+ if (i > 0) {
+ bb2 -= i;
+ bd2 -= i;
+ bs2 -= i;
+ }
+ if (bb5 > 0) {
+ pow5mult(bs, bb5);
+ mult(bb, bs);
+ }
+ if (bb2 > 0)
+ lshift(bb, bb2);
+ if (bd5 > 0)
+ pow5mult(bd, bd5);
+ if (bd2 > 0)
+ lshift(bd, bd2);
+ if (bs2 > 0)
+ lshift(bs, bs2);
+ diff(delta, bb, bd);
+ dsign = delta.sign;
+ delta.sign = 0;
+ i = cmp(delta, bs);
+
+ if (i < 0) {
+ /* Error is less than half an ulp -- check for
+ * special case of mantissa a power of two.
+ */
+ if (dsign || word1(&rv) || word0(&rv) & Bndry_mask
+#ifdef Avoid_Underflow
+ || (word0(&rv) & Exp_mask) <= (2 * P + 1) * Exp_msk1
+#else
+ || (word0(&rv) & Exp_mask) <= Exp_msk1
+#endif
+ ) {
+#ifdef SET_INEXACT
+ if (!delta->words()[0] && delta->size() <= 1)
+ inexact = 0;
+#endif
+ break;
+ }
+ if (!delta.words()[0] && delta.size() <= 1) {
+ /* exact result */
+#ifdef SET_INEXACT
+ inexact = 0;
+#endif
+ break;
+ }
+ lshift(delta, Log2P);
+ if (cmp(delta, bs) > 0)
+ goto drop_down;
+ break;
+ }
+ if (i == 0) {
+ /* exactly half-way between */
+ if (dsign) {
+ if ((word0(&rv) & Bndry_mask1) == Bndry_mask1
+ && word1(&rv) == (
+#ifdef Avoid_Underflow
+ (scale && (y = word0(&rv) & Exp_mask) <= 2 * P * Exp_msk1)
+ ? (0xffffffff & (0xffffffff << (2 * P + 1 - (y >> Exp_shift)))) :
+#endif
+ 0xffffffff)) {
+ /*boundary case -- increment exponent*/
+ word0(&rv) = (word0(&rv) & Exp_mask) + Exp_msk1;
+ word1(&rv) = 0;
+#ifdef Avoid_Underflow
+ dsign = 0;
+#endif
+ break;
+ }
+ } else if (!(word0(&rv) & Bndry_mask) && !word1(&rv)) {
+drop_down:
+ /* boundary case -- decrement exponent */
+#ifdef Sudden_Underflow /*{{*/
+ L = word0(&rv) & Exp_mask;
+#ifdef Avoid_Underflow
+ if (L <= (scale ? (2 * P + 1) * Exp_msk1 : Exp_msk1))
+#else
+ if (L <= Exp_msk1)
+#endif /*Avoid_Underflow*/
+ goto undfl;
+ L -= Exp_msk1;
+#else /*Sudden_Underflow}{*/
+#ifdef Avoid_Underflow
+ if (scale) {
+ L = word0(&rv) & Exp_mask;
+ if (L <= (2 * P + 1) * Exp_msk1) {
+ if (L > (P + 2) * Exp_msk1)
+ /* round even ==> */
+ /* accept rv */
+ break;
+ /* rv = smallest denormal */
+ goto undfl;
+ }
+ }
+#endif /*Avoid_Underflow*/
+ L = (word0(&rv) & Exp_mask) - Exp_msk1;
+#endif /*Sudden_Underflow}}*/
+ word0(&rv) = L | Bndry_mask1;
+ word1(&rv) = 0xffffffff;
+ break;
+ }
+ if (!(word1(&rv) & LSB))
+ break;
+ if (dsign)
+ dval(&rv) += ulp(&rv);
+ else {
+ dval(&rv) -= ulp(&rv);
+#ifndef Sudden_Underflow
+ if (!dval(&rv))
+ goto undfl;
+#endif
+ }
+#ifdef Avoid_Underflow
+ dsign = 1 - dsign;
+#endif
+ break;
+ }
+ if ((aadj = ratio(delta, bs)) <= 2.) {
+ if (dsign)
+ aadj = aadj1 = 1.;
+ else if (word1(&rv) || word0(&rv) & Bndry_mask) {
+#ifndef Sudden_Underflow
+ if (word1(&rv) == Tiny1 && !word0(&rv))
+ goto undfl;
+#endif
+ aadj = 1.;
+ aadj1 = -1.;
+ } else {
+ /* special case -- power of FLT_RADIX to be */
+ /* rounded down... */
+
+ if (aadj < 2. / FLT_RADIX)
+ aadj = 1. / FLT_RADIX;
+ else
+ aadj *= 0.5;
+ aadj1 = -aadj;
+ }
+ } else {
+ aadj *= 0.5;
+ aadj1 = dsign ? aadj : -aadj;
+#ifdef Check_FLT_ROUNDS
+ switch (Rounding) {
+ case 2: /* towards +infinity */
+ aadj1 -= 0.5;
+ break;
+ case 0: /* towards 0 */
+ case 3: /* towards -infinity */
+ aadj1 += 0.5;
+ }
+#else
+ if (Flt_Rounds == 0)
+ aadj1 += 0.5;
+#endif /*Check_FLT_ROUNDS*/
+ }
+ y = word0(&rv) & Exp_mask;
+
+ /* Check for overflow */
+
+ if (y == Exp_msk1 * (DBL_MAX_EXP + Bias - 1)) {
+ dval(&rv0) = dval(&rv);
+ word0(&rv) -= P * Exp_msk1;
+ adj.d = aadj1 * ulp(&rv);
+ dval(&rv) += adj.d;
+ if ((word0(&rv) & Exp_mask) >= Exp_msk1 * (DBL_MAX_EXP + Bias - P)) {
+ if (word0(&rv0) == Big0 && word1(&rv0) == Big1)
+ goto ovfl;
+ word0(&rv) = Big0;
+ word1(&rv) = Big1;
+ goto cont;
+ } else
+ word0(&rv) += P * Exp_msk1;
+ } else {
+#ifdef Avoid_Underflow
+ if (scale && y <= 2 * P * Exp_msk1) {
+ if (aadj <= 0x7fffffff) {
+ if ((z = (uint32_t)aadj) <= 0)
+ z = 1;
+ aadj = z;
+ aadj1 = dsign ? aadj : -aadj;
+ }
+ dval(&aadj2) = aadj1;
+ word0(&aadj2) += (2 * P + 1) * Exp_msk1 - y;
+ aadj1 = dval(&aadj2);
+ }
+ adj.d = aadj1 * ulp(&rv);
+ dval(&rv) += adj.d;
+#else
+#ifdef Sudden_Underflow
+ if ((word0(&rv) & Exp_mask) <= P * Exp_msk1) {
+ dval(&rv0) = dval(&rv);
+ word0(&rv) += P * Exp_msk1;
+ adj.d = aadj1 * ulp(&rv);
+ dval(&rv) += adj.d;
+ if ((word0(&rv) & Exp_mask) <= P * Exp_msk1)
+ {
+ if (word0(&rv0) == Tiny0 && word1(&rv0) == Tiny1)
+ goto undfl;
+ word0(&rv) = Tiny0;
+ word1(&rv) = Tiny1;
+ goto cont;
+ }
+ else
+ word0(&rv) -= P * Exp_msk1;
+ } else {
+ adj.d = aadj1 * ulp(&rv);
+ dval(&rv) += adj.d;
+ }
+#else /*Sudden_Underflow*/
+ /* Compute adj so that the IEEE rounding rules will
+ * correctly round rv + adj in some half-way cases.
+ * If rv * ulp(rv) is denormalized (i.e.,
+ * y <= (P - 1) * Exp_msk1), we must adjust aadj to avoid
+ * trouble from bits lost to denormalization;
+ * example: 1.2e-307 .
+ */
+ if (y <= (P - 1) * Exp_msk1 && aadj > 1.) {
+ aadj1 = (double)(int)(aadj + 0.5);
+ if (!dsign)
+ aadj1 = -aadj1;
+ }
+ adj.d = aadj1 * ulp(&rv);
+ dval(&rv) += adj.d;
+#endif /*Sudden_Underflow*/
+#endif /*Avoid_Underflow*/
+ }
+ z = word0(&rv) & Exp_mask;
+#ifndef SET_INEXACT
+#ifdef Avoid_Underflow
+ if (!scale)
+#endif
+ if (y == z) {
+ /* Can we stop now? */
+ L = (int32_t)aadj;
+ aadj -= L;
+ /* The tolerances below are conservative. */
+ if (dsign || word1(&rv) || word0(&rv) & Bndry_mask) {
+ if (aadj < .4999999 || aadj > .5000001)
+ break;
+ } else if (aadj < .4999999 / FLT_RADIX)
+ break;
+ }
+#endif
+cont:
+ ;
+ }
+#ifdef SET_INEXACT
+ if (inexact) {
+ if (!oldinexact) {
+ word0(&rv0) = Exp_1 + (70 << Exp_shift);
+ word1(&rv0) = 0;
+ dval(&rv0) += 1.;
+ }
+ } else if (!oldinexact)
+ clear_inexact();
+#endif
+#ifdef Avoid_Underflow
+ if (scale) {
+ word0(&rv0) = Exp_1 - 2 * P * Exp_msk1;
+ word1(&rv0) = 0;
+ dval(&rv) *= dval(&rv0);
+#ifndef NO_ERRNO
+ /* try to avoid the bug of testing an 8087 register value */
+ if (word0(&rv) == 0 && word1(&rv) == 0)
+ errno = ERANGE;
+#endif
+ }
+#endif /* Avoid_Underflow */
+#ifdef SET_INEXACT
+ if (inexact && !(word0(&rv) & Exp_mask)) {
+ /* set underflow bit */
+ dval(&rv0) = 1e-300;
+ dval(&rv0) *= dval(&rv0);
+ }
+#endif
+ret:
+ if (se)
+ *se = const_cast<char*>(s);
+ return sign ? -dval(&rv) : dval(&rv);
+}
+
+static ALWAYS_INLINE int quorem(BigInt& b, BigInt& S)
+{
+ size_t n;
+ uint32_t *bx, *bxe, q, *sx, *sxe;
+#ifdef USE_LONG_LONG
+ unsigned long long borrow, carry, y, ys;
+#else
+ uint32_t borrow, carry, y, ys;
+#ifdef Pack_32
+ uint32_t si, z, zs;
+#endif
+#endif
+ ASSERT(b.size() <= 1 || b.words()[b.size() - 1]);
+ ASSERT(S.size() <= 1 || S.words()[S.size() - 1]);
+
+ n = S.size();
+ ASSERT_WITH_MESSAGE(b.size() <= n, "oversize b in quorem");
+ if (b.size() < n)
+ return 0;
+ sx = S.words();
+ sxe = sx + --n;
+ bx = b.words();
+ bxe = bx + n;
+ q = *bxe / (*sxe + 1); /* ensure q <= true quotient */
+ ASSERT_WITH_MESSAGE(q <= 9, "oversized quotient in quorem");
+ if (q) {
+ borrow = 0;
+ carry = 0;
+ do {
+#ifdef USE_LONG_LONG
+ ys = *sx++ * (unsigned long long)q + carry;
+ carry = ys >> 32;
+ y = *bx - (ys & 0xffffffffUL) - borrow;
+ borrow = y >> 32 & (uint32_t)1;
+ *bx++ = (uint32_t)y & 0xffffffffUL;
+#else
+#ifdef Pack_32
+ si = *sx++;
+ ys = (si & 0xffff) * q + carry;
+ zs = (si >> 16) * q + (ys >> 16);
+ carry = zs >> 16;
+ y = (*bx & 0xffff) - (ys & 0xffff) - borrow;
+ borrow = (y & 0x10000) >> 16;
+ z = (*bx >> 16) - (zs & 0xffff) - borrow;
+ borrow = (z & 0x10000) >> 16;
+ Storeinc(bx, z, y);
+#else
+ ys = *sx++ * q + carry;
+ carry = ys >> 16;
+ y = *bx - (ys & 0xffff) - borrow;
+ borrow = (y & 0x10000) >> 16;
+ *bx++ = y & 0xffff;
+#endif
+#endif
+ } while (sx <= sxe);
+ if (!*bxe) {
+ bx = b.words();
+ while (--bxe > bx && !*bxe)
+ --n;
+ b.resize(n);
+ }
+ }
+ if (cmp(b, S) >= 0) {
+ q++;
+ borrow = 0;
+ carry = 0;
+ bx = b.words();
+ sx = S.words();
+ do {
+#ifdef USE_LONG_LONG
+ ys = *sx++ + carry;
+ carry = ys >> 32;
+ y = *bx - (ys & 0xffffffffUL) - borrow;
+ borrow = y >> 32 & (uint32_t)1;
+ *bx++ = (uint32_t)y & 0xffffffffUL;
+#else
+#ifdef Pack_32
+ si = *sx++;
+ ys = (si & 0xffff) + carry;
+ zs = (si >> 16) + (ys >> 16);
+ carry = zs >> 16;
+ y = (*bx & 0xffff) - (ys & 0xffff) - borrow;
+ borrow = (y & 0x10000) >> 16;
+ z = (*bx >> 16) - (zs & 0xffff) - borrow;
+ borrow = (z & 0x10000) >> 16;
+ Storeinc(bx, z, y);
+#else
+ ys = *sx++ + carry;
+ carry = ys >> 16;
+ y = *bx - (ys & 0xffff) - borrow;
+ borrow = (y & 0x10000) >> 16;
+ *bx++ = y & 0xffff;
+#endif
+#endif
+ } while (sx <= sxe);
+ bx = b.words();
+ bxe = bx + n;
+ if (!*bxe) {
+ while (--bxe > bx && !*bxe)
+ --n;
+ b.resize(n);
+ }
+ }
+ return q;
+}
+
+/* dtoa for IEEE arithmetic (dmg): convert double to ASCII string.
+ *
+ * Inspired by "How to Print Floating-Point Numbers Accurately" by
+ * Guy L. Steele, Jr. and Jon L. White [Proc. ACM SIGPLAN '90, pp. 92-101].
+ *
+ * Modifications:
+ * 1. Rather than iterating, we use a simple numeric overestimate
+ * to determine k = floor(log10(d)). We scale relevant
+ * quantities using O(log2(k)) rather than O(k) multiplications.
+ * 2. For some modes > 2 (corresponding to ecvt and fcvt), we don't
+ * try to generate digits strictly left to right. Instead, we
+ * compute with fewer bits and propagate the carry if necessary
+ * when rounding the final digit up. This is often faster.
+ * 3. Under the assumption that input will be rounded nearest,
+ * mode 0 renders 1e23 as 1e23 rather than 9.999999999999999e22.
+ * That is, we allow equality in stopping tests when the
+ * round-nearest rule will give the same floating-point value
+ * as would satisfaction of the stopping test with strict
+ * inequality.
+ * 4. We remove common factors of powers of 2 from relevant
+ * quantities.
+ * 5. When converting floating-point integers less than 1e16,
+ * we use floating-point arithmetic rather than resorting
+ * to multiple-precision integers.
+ * 6. When asked to produce fewer than 15 digits, we first try
+ * to get by with floating-point arithmetic; we resort to
+ * multiple-precision integer arithmetic only if we cannot
+ * guarantee that the floating-point calculation has given
+ * the correctly rounded result. For k requested digits and
+ * "uniformly" distributed input, the probability is
+ * something like 10^(k-15) that we must resort to the int32_t
+ * calculation.
+ */
+
+void dtoa(char* result, double dd, int ndigits, int* decpt, int* sign, char** rve)
+{
+ /*
+ Arguments ndigits, decpt, sign are similar to those
+ of ecvt and fcvt; trailing zeros are suppressed from
+ the returned string. If not null, *rve is set to point
+ to the end of the return value. If d is +-Infinity or NaN,
+ then *decpt is set to 9999.
+
+ */
+
+ int bbits, b2, b5, be, dig, i, ieps, ilim = 0, ilim0, ilim1 = 0,
+ j, j1, k, k0, k_check, leftright, m2, m5, s2, s5,
+ spec_case, try_quick;
+ int32_t L;
+#ifndef Sudden_Underflow
+ int denorm;
+ uint32_t x;
+#endif
+ BigInt b, b1, delta, mlo, mhi, S;
+ U d2, eps, u;
+ double ds;
+ char *s, *s0;
+#ifdef SET_INEXACT
+ int inexact, oldinexact;
+#endif
+
+ u.d = dd;
+ if (word0(&u) & Sign_bit) {
+ /* set sign for everything, including 0's and NaNs */
+ *sign = 1;
+ word0(&u) &= ~Sign_bit; /* clear sign bit */
+ } else
+ *sign = 0;
+
+ if ((word0(&u) & Exp_mask) == Exp_mask)
+ {
+ /* Infinity or NaN */
+ *decpt = 9999;
+ if (!word1(&u) && !(word0(&u) & 0xfffff))
+ strcpy(result, "Infinity");
+ else
+ strcpy(result, "NaN");
+ return;
+ }
+ if (!dval(&u)) {
+ *decpt = 1;
+ result[0] = '0';
+ result[1] = '\0';
+ return;
+ }
+
+#ifdef SET_INEXACT
+ try_quick = oldinexact = get_inexact();
+ inexact = 1;
+#endif
+
+ d2b(b, &u, &be, &bbits);
+#ifdef Sudden_Underflow
+ i = (int)(word0(&u) >> Exp_shift1 & (Exp_mask >> Exp_shift1));
+#else
+ if ((i = (int)(word0(&u) >> Exp_shift1 & (Exp_mask >> Exp_shift1)))) {
+#endif
+ dval(&d2) = dval(&u);
+ word0(&d2) &= Frac_mask1;
+ word0(&d2) |= Exp_11;
+
+ /* log(x) ~=~ log(1.5) + (x-1.5)/1.5
+ * log10(x) = log(x) / log(10)
+ * ~=~ log(1.5)/log(10) + (x-1.5)/(1.5*log(10))
+ * log10(d) = (i-Bias)*log(2)/log(10) + log10(d2)
+ *
+ * This suggests computing an approximation k to log10(d) by
+ *
+ * k = (i - Bias)*0.301029995663981
+ * + ( (d2-1.5)*0.289529654602168 + 0.176091259055681 );
+ *
+ * We want k to be too large rather than too small.
+ * The error in the first-order Taylor series approximation
+ * is in our favor, so we just round up the constant enough
+ * to compensate for any error in the multiplication of
+ * (i - Bias) by 0.301029995663981; since |i - Bias| <= 1077,
+ * and 1077 * 0.30103 * 2^-52 ~=~ 7.2e-14,
+ * adding 1e-13 to the constant term more than suffices.
+ * Hence we adjust the constant term to 0.1760912590558.
+ * (We could get a more accurate k by invoking log10,
+ * but this is probably not worthwhile.)
+ */
+
+ i -= Bias;
+#ifndef Sudden_Underflow
+ denorm = 0;
+ } else {
+ /* d is denormalized */
+
+ i = bbits + be + (Bias + (P - 1) - 1);
+ x = (i > 32) ? (word0(&u) << (64 - i)) | (word1(&u) >> (i - 32))
+ : word1(&u) << (32 - i);
+ dval(&d2) = x;
+ word0(&d2) -= 31 * Exp_msk1; /* adjust exponent */
+ i -= (Bias + (P - 1) - 1) + 1;
+ denorm = 1;
+ }
+#endif
+ ds = (dval(&d2) - 1.5) * 0.289529654602168 + 0.1760912590558 + (i * 0.301029995663981);
+ k = (int)ds;
+ if (ds < 0. && ds != k)
+ k--; /* want k = floor(ds) */
+ k_check = 1;
+ if (k >= 0 && k <= Ten_pmax) {
+ if (dval(&u) < tens[k])
+ k--;
+ k_check = 0;
+ }
+ j = bbits - i - 1;
+ if (j >= 0) {
+ b2 = 0;
+ s2 = j;
+ } else {
+ b2 = -j;
+ s2 = 0;
+ }
+ if (k >= 0) {
+ b5 = 0;
+ s5 = k;
+ s2 += k;
+ } else {
+ b2 -= k;
+ b5 = -k;
+ s5 = 0;
+ }
+
+#ifndef SET_INEXACT
+#ifdef Check_FLT_ROUNDS
+ try_quick = Rounding == 1;
+#else
+ try_quick = 1;
+#endif
+#endif /*SET_INEXACT*/
+
+ leftright = 1;
+ ilim = ilim1 = -1;
+ i = 18;
+ ndigits = 0;
+ s = s0 = result;
+
+ if (ilim >= 0 && ilim <= Quick_max && try_quick) {
+
+ /* Try to get by with floating-point arithmetic. */
+
+ i = 0;
+ dval(&d2) = dval(&u);
+ k0 = k;
+ ilim0 = ilim;
+ ieps = 2; /* conservative */
+ if (k > 0) {
+ ds = tens[k & 0xf];
+ j = k >> 4;
+ if (j & Bletch) {
+ /* prevent overflows */
+ j &= Bletch - 1;
+ dval(&u) /= bigtens[n_bigtens - 1];
+ ieps++;
+ }
+ for (; j; j >>= 1, i++) {
+ if (j & 1) {
+ ieps++;
+ ds *= bigtens[i];
+ }
+ }
+ dval(&u) /= ds;
+ } else if ((j1 = -k)) {
+ dval(&u) *= tens[j1 & 0xf];
+ for (j = j1 >> 4; j; j >>= 1, i++) {
+ if (j & 1) {
+ ieps++;
+ dval(&u) *= bigtens[i];
+ }
+ }
+ }
+ if (k_check && dval(&u) < 1. && ilim > 0) {
+ if (ilim1 <= 0)
+ goto fast_failed;
+ ilim = ilim1;
+ k--;
+ dval(&u) *= 10.;
+ ieps++;
+ }
+ dval(&eps) = (ieps * dval(&u)) + 7.;
+ word0(&eps) -= (P - 1) * Exp_msk1;
+ if (ilim == 0) {
+ S.clear();
+ mhi.clear();
+ dval(&u) -= 5.;
+ if (dval(&u) > dval(&eps))
+ goto one_digit;
+ if (dval(&u) < -dval(&eps))
+ goto no_digits;
+ goto fast_failed;
+ }
+#ifndef No_leftright
+ if (leftright) {
+ /* Use Steele & White method of only
+ * generating digits needed.
+ */
+ dval(&eps) = (0.5 / tens[ilim - 1]) - dval(&eps);
+ for (i = 0;;) {
+ L = (long int)dval(&u);
+ dval(&u) -= L;
+ *s++ = '0' + (int)L;
+ if (dval(&u) < dval(&eps))
+ goto ret;
+ if (1. - dval(&u) < dval(&eps))
+ goto bump_up;
+ if (++i >= ilim)
+ break;
+ dval(&eps) *= 10.;
+ dval(&u) *= 10.;
+ }
+ } else {
+#endif
+ /* Generate ilim digits, then fix them up. */
+ dval(&eps) *= tens[ilim - 1];
+ for (i = 1;; i++, dval(&u) *= 10.) {
+ L = (int32_t)(dval(&u));
+ if (!(dval(&u) -= L))
+ ilim = i;
+ *s++ = '0' + (int)L;
+ if (i == ilim) {
+ if (dval(&u) > 0.5 + dval(&eps))
+ goto bump_up;
+ else if (dval(&u) < 0.5 - dval(&eps)) {
+ while (*--s == '0') { }
+ s++;
+ goto ret;
+ }
+ break;
+ }
+ }
+#ifndef No_leftright
+ }
+#endif
+fast_failed:
+ s = s0;
+ dval(&u) = dval(&d2);
+ k = k0;
+ ilim = ilim0;
+ }
+
+ /* Do we have a "small" integer? */
+
+ if (be >= 0 && k <= Int_max) {
+ /* Yes. */
+ ds = tens[k];
+ if (ndigits < 0 && ilim <= 0) {
+ S.clear();
+ mhi.clear();
+ if (ilim < 0 || dval(&u) <= 5 * ds)
+ goto no_digits;
+ goto one_digit;
+ }
+ for (i = 1;; i++, dval(&u) *= 10.) {
+ L = (int32_t)(dval(&u) / ds);
+ dval(&u) -= L * ds;
+#ifdef Check_FLT_ROUNDS
+ /* If FLT_ROUNDS == 2, L will usually be high by 1 */
+ if (dval(&u) < 0) {
+ L--;
+ dval(&u) += ds;
+ }
+#endif
+ *s++ = '0' + (int)L;
+ if (!dval(&u)) {
+#ifdef SET_INEXACT
+ inexact = 0;
+#endif
+ break;
+ }
+ if (i == ilim) {
+ dval(&u) += dval(&u);
+ if (dval(&u) > ds || (dval(&u) == ds && (L & 1))) {
+bump_up:
+ while (*--s == '9')
+ if (s == s0) {
+ k++;
+ *s = '0';
+ break;
+ }
+ ++*s++;
+ }
+ break;
+ }
+ }
+ goto ret;
+ }
+
+ m2 = b2;
+ m5 = b5;
+ mhi.clear();
+ mlo.clear();
+ if (leftright) {
+ i =
+#ifndef Sudden_Underflow
+ denorm ? be + (Bias + (P - 1) - 1 + 1) :
+#endif
+ 1 + P - bbits;
+ b2 += i;
+ s2 += i;
+ i2b(mhi, 1);
+ }
+ if (m2 > 0 && s2 > 0) {
+ i = m2 < s2 ? m2 : s2;
+ b2 -= i;
+ m2 -= i;
+ s2 -= i;
+ }
+ if (b5 > 0) {
+ if (leftright) {
+ if (m5 > 0) {
+ pow5mult(mhi, m5);
+ mult(b, mhi);
+ }
+ if ((j = b5 - m5))
+ pow5mult(b, j);
+ } else
+ pow5mult(b, b5);
+ }
+ i2b(S, 1);
+ if (s5 > 0)
+ pow5mult(S, s5);
+
+ /* Check for special case that d is a normalized power of 2. */
+
+ spec_case = 0;
+ if (!word1(&u) && !(word0(&u) & Bndry_mask)
+#ifndef Sudden_Underflow
+ && word0(&u) & (Exp_mask & ~Exp_msk1)
+#endif
+ ) {
+ /* The special case */
+ b2 += Log2P;
+ s2 += Log2P;
+ spec_case = 1;
+ }
+
+ /* Arrange for convenient computation of quotients:
+ * shift left if necessary so divisor has 4 leading 0 bits.
+ *
+ * Perhaps we should just compute leading 28 bits of S once
+ * and for all and pass them and a shift to quorem, so it
+ * can do shifts and ors to compute the numerator for q.
+ */
+#ifdef Pack_32
+ if ((i = ((s5 ? 32 - hi0bits(S.words()[S.size() - 1]) : 1) + s2) & 0x1f))
+ i = 32 - i;
+#else
+ if ((i = ((s5 ? 32 - hi0bits(S.words()[S.size() - 1]) : 1) + s2) & 0xf))
+ i = 16 - i;
+#endif
+ if (i > 4) {
+ i -= 4;
+ b2 += i;
+ m2 += i;
+ s2 += i;
+ } else if (i < 4) {
+ i += 28;
+ b2 += i;
+ m2 += i;
+ s2 += i;
+ }
+ if (b2 > 0)
+ lshift(b, b2);
+ if (s2 > 0)
+ lshift(S, s2);
+ if (k_check) {
+ if (cmp(b,S) < 0) {
+ k--;
+ multadd(b, 10, 0); /* we botched the k estimate */
+ if (leftright)
+ multadd(mhi, 10, 0);
+ ilim = ilim1;
+ }
+ }
+
+ if (leftright) {
+ if (m2 > 0)
+ lshift(mhi, m2);
+
+ /* Compute mlo -- check for special case
+ * that d is a normalized power of 2.
+ */
+
+ mlo = mhi;
+ if (spec_case) {
+ mhi = mlo;
+ lshift(mhi, Log2P);
+ }
+
+ for (i = 1;;i++) {
+ dig = quorem(b,S) + '0';
+ /* Do we yet have the shortest decimal string
+ * that will round to d?
+ */
+ j = cmp(b, mlo);
+ diff(delta, S, mhi);
+ j1 = delta.sign ? 1 : cmp(b, delta);
+ if (j1 == 0 && !(word1(&u) & 1)) {
+ if (dig == '9')
+ goto round_9_up;
+ if (j > 0)
+ dig++;
+#ifdef SET_INEXACT
+ else if (!b->x[0] && b->wds <= 1)
+ inexact = 0;
+#endif
+ *s++ = dig;
+ goto ret;
+ }
+ if (j < 0 || (j == 0 && !(word1(&u) & 1))) {
+ if (!b.words()[0] && b.size() <= 1) {
+#ifdef SET_INEXACT
+ inexact = 0;
+#endif
+ goto accept_dig;
+ }
+ if (j1 > 0) {
+ lshift(b, 1);
+ j1 = cmp(b, S);
+ if ((j1 > 0 || (j1 == 0 && (dig & 1))) && dig++ == '9')
+ goto round_9_up;
+ }
+accept_dig:
+ *s++ = dig;
+ goto ret;
+ }
+ if (j1 > 0) {
+ if (dig == '9') { /* possible if i == 1 */
+round_9_up:
+ *s++ = '9';
+ goto roundoff;
+ }
+ *s++ = dig + 1;
+ goto ret;
+ }
+ *s++ = dig;
+ if (i == ilim)
+ break;
+ multadd(b, 10, 0);
+ multadd(mlo, 10, 0);
+ multadd(mhi, 10, 0);
+ }
+ } else
+ for (i = 1;; i++) {
+ *s++ = dig = quorem(b,S) + '0';
+ if (!b.words()[0] && b.size() <= 1) {
+#ifdef SET_INEXACT
+ inexact = 0;
+#endif
+ goto ret;
+ }
+ if (i >= ilim)
+ break;
+ multadd(b, 10, 0);
+ }
+
+ /* Round off last digit */
+
+ lshift(b, 1);
+ j = cmp(b, S);
+ if (j > 0 || (j == 0 && (dig & 1))) {
+roundoff:
+ while (*--s == '9')
+ if (s == s0) {
+ k++;
+ *s++ = '1';
+ goto ret;
+ }
+ ++*s++;
+ } else {
+ while (*--s == '0') { }
+ s++;
+ }
+ goto ret;
+no_digits:
+ k = -1 - ndigits;
+ goto ret;
+one_digit:
+ *s++ = '1';
+ k++;
+ goto ret;
+ret:
+#ifdef SET_INEXACT
+ if (inexact) {
+ if (!oldinexact) {
+ word0(&u) = Exp_1 + (70 << Exp_shift);
+ word1(&u) = 0;
+ dval(&u) += 1.;
+ }
+ } else if (!oldinexact)
+ clear_inexact();
+#endif
+ *s = 0;
+ *decpt = k + 1;
+ if (rve)
+ *rve = s;
+}
+
+} // namespace WTF
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/dtoa.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/dtoa.h
new file mode 100644
index 0000000..cbec7c7
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/dtoa.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2003, 2008 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef WTF_dtoa_h
+#define WTF_dtoa_h
+
+namespace WTF {
+ class Mutex;
+}
+
+namespace WTF {
+
+ extern WTF::Mutex* s_dtoaP5Mutex;
+
+ double strtod(const char* s00, char** se);
+ void dtoa(char* result, double d, int ndigits, int* decpt, int* sign, char** rve);
+
+} // namespace WTF
+
+#endif // WTF_dtoa_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/qt/MainThreadQt.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/qt/MainThreadQt.cpp
new file mode 100644
index 0000000..0ac2717
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/qt/MainThreadQt.cpp
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2007 Staikos Computing Services Inc.
+ * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies)
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "MainThread.h"
+
+#include <QtCore/QObject>
+#include <QtCore/QCoreApplication>
+
+
+namespace WTF {
+
+QT_USE_NAMESPACE
+
+class MainThreadInvoker : public QObject {
+ Q_OBJECT
+public:
+ MainThreadInvoker();
+
+private Q_SLOTS:
+ void dispatch();
+};
+
+MainThreadInvoker::MainThreadInvoker()
+{
+ moveToThread(QCoreApplication::instance()->thread());
+}
+
+void MainThreadInvoker::dispatch()
+{
+ dispatchFunctionsFromMainThread();
+}
+
+Q_GLOBAL_STATIC(MainThreadInvoker, webkit_main_thread_invoker)
+
+void initializeMainThreadPlatform()
+{
+}
+
+void scheduleDispatchFunctionsOnMainThread()
+{
+ QMetaObject::invokeMethod(webkit_main_thread_invoker(), "dispatch", Qt::QueuedConnection);
+}
+
+} // namespace WTF
+
+#include "MainThreadQt.moc"
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/qt/ThreadingQt.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/qt/ThreadingQt.cpp
new file mode 100644
index 0000000..5a84764
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/qt/ThreadingQt.cpp
@@ -0,0 +1,271 @@
+/*
+ * Copyright (C) 2007 Apple Inc. All rights reserved.
+ * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "config.h"
+#include "Threading.h"
+
+#include "CurrentTime.h"
+#include "HashMap.h"
+#include "MainThread.h"
+#include "RandomNumberSeed.h"
+
+#include <QCoreApplication>
+#include <QMutex>
+#include <QThread>
+#include <QWaitCondition>
+
+namespace WTF {
+
+QT_USE_NAMESPACE
+
+class ThreadPrivate : public QThread {
+public:
+ ThreadPrivate(ThreadFunction entryPoint, void* data);
+ void run();
+ void* getReturnValue() { return m_returnValue; }
+private:
+ void* m_data;
+ ThreadFunction m_entryPoint;
+ void* m_returnValue;
+};
+
+ThreadPrivate::ThreadPrivate(ThreadFunction entryPoint, void* data)
+ : m_data(data)
+ , m_entryPoint(entryPoint)
+ , m_returnValue(0)
+{
+}
+
+void ThreadPrivate::run()
+{
+ m_returnValue = m_entryPoint(m_data);
+}
+
+
+static Mutex* atomicallyInitializedStaticMutex;
+
+static ThreadIdentifier mainThreadIdentifier;
+
+static Mutex& threadMapMutex()
+{
+ static Mutex mutex;
+ return mutex;
+}
+
+static HashMap<ThreadIdentifier, QThread*>& threadMap()
+{
+ static HashMap<ThreadIdentifier, QThread*> map;
+ return map;
+}
+
+static ThreadIdentifier identifierByQthreadHandle(QThread*& thread)
+{
+ MutexLocker locker(threadMapMutex());
+
+ HashMap<ThreadIdentifier, QThread*>::iterator i = threadMap().begin();
+ for (; i != threadMap().end(); ++i) {
+ if (i->second == thread)
+ return i->first;
+ }
+
+ return 0;
+}
+
+static ThreadIdentifier establishIdentifierForThread(QThread*& thread)
+{
+ ASSERT(!identifierByQthreadHandle(thread));
+
+ MutexLocker locker(threadMapMutex());
+
+ static ThreadIdentifier identifierCount = 1;
+
+ threadMap().add(identifierCount, thread);
+
+ return identifierCount++;
+}
+
+static void clearThreadForIdentifier(ThreadIdentifier id)
+{
+ MutexLocker locker(threadMapMutex());
+
+ ASSERT(threadMap().contains(id));
+
+ threadMap().remove(id);
+}
+
+static QThread* threadForIdentifier(ThreadIdentifier id)
+{
+ MutexLocker locker(threadMapMutex());
+
+ return threadMap().get(id);
+}
+
+void initializeThreading()
+{
+ if (!atomicallyInitializedStaticMutex) {
+ atomicallyInitializedStaticMutex = new Mutex;
+ threadMapMutex();
+ initializeRandomNumberGenerator();
+ QThread* mainThread = QCoreApplication::instance()->thread();
+ mainThreadIdentifier = identifierByQthreadHandle(mainThread);
+ if (!mainThreadIdentifier)
+ mainThreadIdentifier = establishIdentifierForThread(mainThread);
+ initializeMainThread();
+ }
+}
+
+void lockAtomicallyInitializedStaticMutex()
+{
+ ASSERT(atomicallyInitializedStaticMutex);
+ atomicallyInitializedStaticMutex->lock();
+}
+
+void unlockAtomicallyInitializedStaticMutex()
+{
+ atomicallyInitializedStaticMutex->unlock();
+}
+
+ThreadIdentifier createThreadInternal(ThreadFunction entryPoint, void* data, const char*)
+{
+ ThreadPrivate* thread = new ThreadPrivate(entryPoint, data);
+ if (!thread) {
+ LOG_ERROR("Failed to create thread at entry point %p with data %p", entryPoint, data);
+ return 0;
+ }
+ thread->start();
+
+ QThread* threadRef = static_cast<QThread*>(thread);
+
+ return establishIdentifierForThread(threadRef);
+}
+
+void setThreadNameInternal(const char*)
+{
+}
+
+int waitForThreadCompletion(ThreadIdentifier threadID, void** result)
+{
+ ASSERT(threadID);
+
+ QThread* thread = threadForIdentifier(threadID);
+
+ bool res = thread->wait();
+
+ clearThreadForIdentifier(threadID);
+ if (result)
+ *result = static_cast<ThreadPrivate*>(thread)->getReturnValue();
+
+ return !res;
+}
+
+void detachThread(ThreadIdentifier)
+{
+}
+
+ThreadIdentifier currentThread()
+{
+ QThread* currentThread = QThread::currentThread();
+ if (ThreadIdentifier id = identifierByQthreadHandle(currentThread))
+ return id;
+ return establishIdentifierForThread(currentThread);
+}
+
+bool isMainThread()
+{
+ return QThread::currentThread() == QCoreApplication::instance()->thread();
+}
+
+Mutex::Mutex()
+ : m_mutex(new QMutex())
+{
+}
+
+Mutex::~Mutex()
+{
+ delete m_mutex;
+}
+
+void Mutex::lock()
+{
+ m_mutex->lock();
+}
+
+bool Mutex::tryLock()
+{
+ return m_mutex->tryLock();
+}
+
+void Mutex::unlock()
+{
+ m_mutex->unlock();
+}
+
+ThreadCondition::ThreadCondition()
+ : m_condition(new QWaitCondition())
+{
+}
+
+ThreadCondition::~ThreadCondition()
+{
+ delete m_condition;
+}
+
+void ThreadCondition::wait(Mutex& mutex)
+{
+ m_condition->wait(mutex.impl());
+}
+
+bool ThreadCondition::timedWait(Mutex& mutex, double absoluteTime)
+{
+ double currentTime = WTF::currentTime();
+
+ // Time is in the past - return immediately.
+ if (absoluteTime < currentTime)
+ return false;
+
+ // Time is too far in the future (and would overflow unsigned long) - wait forever.
+ if (absoluteTime - currentTime > static_cast<double>(INT_MAX) / 1000.0) {
+ wait(mutex);
+ return true;
+ }
+
+ double intervalMilliseconds = (absoluteTime - currentTime) * 1000.0;
+ return m_condition->wait(mutex.impl(), static_cast<unsigned long>(intervalMilliseconds));
+}
+
+void ThreadCondition::signal()
+{
+ m_condition->wakeOne();
+}
+
+void ThreadCondition::broadcast()
+{
+ m_condition->wakeAll();
+}
+
+} // namespace WebCore
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/Collator.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/Collator.h
new file mode 100644
index 0000000..51e8a06
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/Collator.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_Collator_h
+#define WTF_Collator_h
+
+#include <memory>
+#include <wtf/Noncopyable.h>
+#include <wtf/unicode/Unicode.h>
+
+#if USE(ICU_UNICODE) && !UCONFIG_NO_COLLATION
+struct UCollator;
+#endif
+
+namespace WTF {
+
+ class Collator : public Noncopyable {
+ public:
+ enum Result { Equal = 0, Greater = 1, Less = -1 };
+
+ Collator(const char* locale); // Parsing is lenient; e.g. language identifiers (such as "en-US") are accepted, too.
+ ~Collator();
+ void setOrderLowerFirst(bool);
+
+ static std::auto_ptr<Collator> userDefault();
+
+ Result collate(const ::UChar*, size_t, const ::UChar*, size_t) const;
+
+ private:
+#if USE(ICU_UNICODE) && !UCONFIG_NO_COLLATION
+ void createCollator() const;
+ void releaseCollator();
+ mutable UCollator* m_collator;
+#endif
+ char* m_locale;
+ bool m_lowerFirst;
+ };
+}
+
+using WTF::Collator;
+
+#endif
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/CollatorDefault.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/CollatorDefault.cpp
new file mode 100644
index 0000000..eddbe53
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/CollatorDefault.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "Collator.h"
+
+#if !USE(ICU_UNICODE) || UCONFIG_NO_COLLATION
+
+namespace WTF {
+
+Collator::Collator(const char*)
+{
+}
+
+Collator::~Collator()
+{
+}
+
+void Collator::setOrderLowerFirst(bool)
+{
+}
+
+std::auto_ptr<Collator> Collator::userDefault()
+{
+ return std::auto_ptr<Collator>(new Collator(0));
+}
+
+// A default implementation for platforms that lack Unicode-aware collation.
+Collator::Result Collator::collate(const UChar* lhs, size_t lhsLength, const UChar* rhs, size_t rhsLength) const
+{
+ int lmin = lhsLength < rhsLength ? lhsLength : rhsLength;
+ int l = 0;
+ while (l < lmin && *lhs == *rhs) {
+ lhs++;
+ rhs++;
+ l++;
+ }
+
+ if (l < lmin)
+ return (*lhs > *rhs) ? Greater : Less;
+
+ if (lhsLength == rhsLength)
+ return Equal;
+
+ return (lhsLength > rhsLength) ? Greater : Less;
+}
+
+}
+
+#endif
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/UTF8.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/UTF8.cpp
new file mode 100644
index 0000000..9e713fe
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/UTF8.cpp
@@ -0,0 +1,303 @@
+/*
+ * Copyright (C) 2007 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "UTF8.h"
+
+namespace WTF {
+namespace Unicode {
+
+inline int inlineUTF8SequenceLengthNonASCII(char b0)
+{
+ if ((b0 & 0xC0) != 0xC0)
+ return 0;
+ if ((b0 & 0xE0) == 0xC0)
+ return 2;
+ if ((b0 & 0xF0) == 0xE0)
+ return 3;
+ if ((b0 & 0xF8) == 0xF0)
+ return 4;
+ return 0;
+}
+
+inline int inlineUTF8SequenceLength(char b0)
+{
+ return (b0 & 0x80) == 0 ? 1 : inlineUTF8SequenceLengthNonASCII(b0);
+}
+
+int UTF8SequenceLength(char b0)
+{
+ return (b0 & 0x80) == 0 ? 1 : inlineUTF8SequenceLengthNonASCII(b0);
+}
+
+int decodeUTF8Sequence(const char* sequence)
+{
+ // Handle 0-byte sequences (never valid).
+ const unsigned char b0 = sequence[0];
+ const int length = inlineUTF8SequenceLength(b0);
+ if (length == 0)
+ return -1;
+
+ // Handle 1-byte sequences (plain ASCII).
+ const unsigned char b1 = sequence[1];
+ if (length == 1) {
+ if (b1)
+ return -1;
+ return b0;
+ }
+
+ // Handle 2-byte sequences.
+ if ((b1 & 0xC0) != 0x80)
+ return -1;
+ const unsigned char b2 = sequence[2];
+ if (length == 2) {
+ if (b2)
+ return -1;
+ const int c = ((b0 & 0x1F) << 6) | (b1 & 0x3F);
+ if (c < 0x80)
+ return -1;
+ return c;
+ }
+
+ // Handle 3-byte sequences.
+ if ((b2 & 0xC0) != 0x80)
+ return -1;
+ const unsigned char b3 = sequence[3];
+ if (length == 3) {
+ if (b3)
+ return -1;
+ const int c = ((b0 & 0xF) << 12) | ((b1 & 0x3F) << 6) | (b2 & 0x3F);
+ if (c < 0x800)
+ return -1;
+ // UTF-16 surrogates should never appear in UTF-8 data.
+ if (c >= 0xD800 && c <= 0xDFFF)
+ return -1;
+ return c;
+ }
+
+ // Handle 4-byte sequences.
+ if ((b3 & 0xC0) != 0x80)
+ return -1;
+ const unsigned char b4 = sequence[4];
+ if (length == 4) {
+ if (b4)
+ return -1;
+ const int c = ((b0 & 0x7) << 18) | ((b1 & 0x3F) << 12) | ((b2 & 0x3F) << 6) | (b3 & 0x3F);
+ if (c < 0x10000 || c > 0x10FFFF)
+ return -1;
+ return c;
+ }
+
+ return -1;
+}
+
+// Once the bits are split out into bytes of UTF-8, this is a mask OR-ed
+// into the first byte, depending on how many bytes follow. There are
+// as many entries in this table as there are UTF-8 sequence types.
+// (I.e., one byte sequence, two byte... etc.). Remember that sequencs
+// for *legal* UTF-8 will be 4 or fewer bytes total.
+static const unsigned char firstByteMark[7] = { 0x00, 0x00, 0xC0, 0xE0, 0xF0, 0xF8, 0xFC };
+
+ConversionResult convertUTF16ToUTF8(
+ const UChar** sourceStart, const UChar* sourceEnd,
+ char** targetStart, char* targetEnd, bool strict)
+{
+ ConversionResult result = conversionOK;
+ const UChar* source = *sourceStart;
+ char* target = *targetStart;
+ while (source < sourceEnd) {
+ UChar32 ch;
+ unsigned short bytesToWrite = 0;
+ const UChar32 byteMask = 0xBF;
+ const UChar32 byteMark = 0x80;
+ const UChar* oldSource = source; // In case we have to back up because of target overflow.
+ ch = static_cast<unsigned short>(*source++);
+ // If we have a surrogate pair, convert to UChar32 first.
+ if (ch >= 0xD800 && ch <= 0xDBFF) {
+ // If the 16 bits following the high surrogate are in the source buffer...
+ if (source < sourceEnd) {
+ UChar32 ch2 = static_cast<unsigned short>(*source);
+ // If it's a low surrogate, convert to UChar32.
+ if (ch2 >= 0xDC00 && ch2 <= 0xDFFF) {
+ ch = ((ch - 0xD800) << 10) + (ch2 - 0xDC00) + 0x0010000;
+ ++source;
+ } else if (strict) { // it's an unpaired high surrogate
+ --source; // return to the illegal value itself
+ result = sourceIllegal;
+ break;
+ }
+ } else { // We don't have the 16 bits following the high surrogate.
+ --source; // return to the high surrogate
+ result = sourceExhausted;
+ break;
+ }
+ } else if (strict) {
+ // UTF-16 surrogate values are illegal in UTF-32
+ if (ch >= 0xDC00 && ch <= 0xDFFF) {
+ --source; // return to the illegal value itself
+ result = sourceIllegal;
+ break;
+ }
+ }
+ // Figure out how many bytes the result will require
+ if (ch < (UChar32)0x80) {
+ bytesToWrite = 1;
+ } else if (ch < (UChar32)0x800) {
+ bytesToWrite = 2;
+ } else if (ch < (UChar32)0x10000) {
+ bytesToWrite = 3;
+ } else if (ch < (UChar32)0x110000) {
+ bytesToWrite = 4;
+ } else {
+ bytesToWrite = 3;
+ ch = 0xFFFD;
+ }
+
+ target += bytesToWrite;
+ if (target > targetEnd) {
+ source = oldSource; // Back up source pointer!
+ target -= bytesToWrite;
+ result = targetExhausted;
+ break;
+ }
+ switch (bytesToWrite) { // note: everything falls through.
+ case 4: *--target = (char)((ch | byteMark) & byteMask); ch >>= 6;
+ case 3: *--target = (char)((ch | byteMark) & byteMask); ch >>= 6;
+ case 2: *--target = (char)((ch | byteMark) & byteMask); ch >>= 6;
+ case 1: *--target = (char)(ch | firstByteMark[bytesToWrite]);
+ }
+ target += bytesToWrite;
+ }
+ *sourceStart = source;
+ *targetStart = target;
+ return result;
+}
+
+// This must be called with the length pre-determined by the first byte.
+// If presented with a length > 4, this returns false. The Unicode
+// definition of UTF-8 goes up to 4-byte sequences.
+static bool isLegalUTF8(const unsigned char* source, int length)
+{
+ unsigned char a;
+ const unsigned char* srcptr = source + length;
+ switch (length) {
+ default: return false;
+ // Everything else falls through when "true"...
+ case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
+ case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
+ case 2: if ((a = (*--srcptr)) > 0xBF) return false;
+
+ switch (*source) {
+ // no fall-through in this inner switch
+ case 0xE0: if (a < 0xA0) return false; break;
+ case 0xED: if (a > 0x9F) return false; break;
+ case 0xF0: if (a < 0x90) return false; break;
+ case 0xF4: if (a > 0x8F) return false; break;
+ default: if (a < 0x80) return false;
+ }
+
+ case 1: if (*source >= 0x80 && *source < 0xC2) return false;
+ }
+ if (*source > 0xF4)
+ return false;
+ return true;
+}
+
+// Magic values subtracted from a buffer value during UTF8 conversion.
+// This table contains as many values as there might be trailing bytes
+// in a UTF-8 sequence.
+static const UChar32 offsetsFromUTF8[6] = { 0x00000000UL, 0x00003080UL, 0x000E2080UL,
+ 0x03C82080UL, 0xFA082080UL, 0x82082080UL };
+
+ConversionResult convertUTF8ToUTF16(
+ const char** sourceStart, const char* sourceEnd,
+ UChar** targetStart, UChar* targetEnd, bool strict)
+{
+ ConversionResult result = conversionOK;
+ const char* source = *sourceStart;
+ UChar* target = *targetStart;
+ while (source < sourceEnd) {
+ UChar32 ch = 0;
+ int extraBytesToRead = UTF8SequenceLength(*source) - 1;
+ if (source + extraBytesToRead >= sourceEnd) {
+ result = sourceExhausted;
+ break;
+ }
+ // Do this check whether lenient or strict
+ if (!isLegalUTF8(reinterpret_cast<const unsigned char*>(source), extraBytesToRead + 1)) {
+ result = sourceIllegal;
+ break;
+ }
+ // The cases all fall through.
+ switch (extraBytesToRead) {
+ case 5: ch += static_cast<unsigned char>(*source++); ch <<= 6; // remember, illegal UTF-8
+ case 4: ch += static_cast<unsigned char>(*source++); ch <<= 6; // remember, illegal UTF-8
+ case 3: ch += static_cast<unsigned char>(*source++); ch <<= 6;
+ case 2: ch += static_cast<unsigned char>(*source++); ch <<= 6;
+ case 1: ch += static_cast<unsigned char>(*source++); ch <<= 6;
+ case 0: ch += static_cast<unsigned char>(*source++);
+ }
+ ch -= offsetsFromUTF8[extraBytesToRead];
+
+ if (target >= targetEnd) {
+ source -= (extraBytesToRead + 1); // Back up source pointer!
+ result = targetExhausted; break;
+ }
+ if (ch <= 0xFFFF) {
+ // UTF-16 surrogate values are illegal in UTF-32
+ if (ch >= 0xD800 && ch <= 0xDFFF) {
+ if (strict) {
+ source -= (extraBytesToRead + 1); // return to the illegal value itself
+ result = sourceIllegal;
+ break;
+ } else
+ *target++ = 0xFFFD;
+ } else
+ *target++ = (UChar)ch; // normal case
+ } else if (ch > 0x10FFFF) {
+ if (strict) {
+ result = sourceIllegal;
+ source -= (extraBytesToRead + 1); // return to the start
+ break; // Bail out; shouldn't continue
+ } else
+ *target++ = 0xFFFD;
+ } else {
+ // target is a character in range 0xFFFF - 0x10FFFF
+ if (target + 1 >= targetEnd) {
+ source -= (extraBytesToRead + 1); // Back up source pointer!
+ result = targetExhausted;
+ break;
+ }
+ ch -= 0x0010000UL;
+ *target++ = (UChar)((ch >> 10) + 0xD800);
+ *target++ = (UChar)((ch & 0x03FF) + 0xDC00);
+ }
+ }
+ *sourceStart = source;
+ *targetStart = target;
+ return result;
+}
+
+}
+}
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/UTF8.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/UTF8.h
new file mode 100644
index 0000000..a5ed93e
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/UTF8.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2007 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_UTF8_h
+#define WTF_UTF8_h
+
+#include "Unicode.h"
+
+namespace WTF {
+ namespace Unicode {
+
+ // Given a first byte, gives the length of the UTF-8 sequence it begins.
+ // Returns 0 for bytes that are not legal starts of UTF-8 sequences.
+ // Only allows sequences of up to 4 bytes, since that works for all Unicode characters (U-00000000 to U-0010FFFF).
+ int UTF8SequenceLength(char);
+
+ // Takes a null-terminated C-style string with a UTF-8 sequence in it and converts it to a character.
+ // Only allows Unicode characters (U-00000000 to U-0010FFFF).
+ // Returns -1 if the sequence is not valid (including presence of extra bytes).
+ int decodeUTF8Sequence(const char*);
+
+ typedef enum {
+ conversionOK, // conversion successful
+ sourceExhausted, // partial character in source, but hit end
+ targetExhausted, // insuff. room in target for conversion
+ sourceIllegal // source sequence is illegal/malformed
+ } ConversionResult;
+
+ // These conversion functions take a "strict" argument. When this
+ // flag is set to strict, both irregular sequences and isolated surrogates
+ // will cause an error. When the flag is set to lenient, both irregular
+ // sequences and isolated surrogates are converted.
+ //
+ // Whether the flag is strict or lenient, all illegal sequences will cause
+ // an error return. This includes sequences such as: <F4 90 80 80>, <C0 80>,
+ // or <A0> in UTF-8, and values above 0x10FFFF in UTF-32. Conformant code
+ // must check for illegal sequences.
+ //
+ // When the flag is set to lenient, characters over 0x10FFFF are converted
+ // to the replacement character; otherwise (when the flag is set to strict)
+ // they constitute an error.
+
+ ConversionResult convertUTF8ToUTF16(
+ const char** sourceStart, const char* sourceEnd,
+ UChar** targetStart, UChar* targetEnd, bool strict = true);
+
+ ConversionResult convertUTF16ToUTF8(
+ const UChar** sourceStart, const UChar* sourceEnd,
+ char** targetStart, char* targetEnd, bool strict = true);
+ }
+}
+
+#endif // WTF_UTF8_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/Unicode.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/Unicode.h
new file mode 100644
index 0000000..f86a9b7
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/Unicode.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2006 George Staikos <staikos@kde.org>
+ * Copyright (C) 2006, 2008, 2009 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef WTF_UNICODE_H
+#define WTF_UNICODE_H
+
+#include <wtf/Assertions.h>
+
+#if USE(QT4_UNICODE)
+#include "qt4/UnicodeQt4.h"
+#elif USE(ICU_UNICODE)
+#include <wtf/unicode/icu/UnicodeIcu.h>
+#elif USE(GLIB_UNICODE)
+#include <wtf/unicode/glib/UnicodeGLib.h>
+#else
+#error "Unknown Unicode implementation"
+#endif
+
+COMPILE_ASSERT(sizeof(UChar) == 2, UCharIsTwoBytes);
+
+#endif // WTF_UNICODE_H
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/glib/UnicodeGLib.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/glib/UnicodeGLib.cpp
new file mode 100644
index 0000000..a779b36
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/glib/UnicodeGLib.cpp
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) 2008 Jürg Billeter <j@bitron.ch>
+ * Copyright (C) 2008 Dominik Röttsches <dominik.roettsches@access-company.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include "UnicodeGLib.h"
+
+namespace WTF {
+namespace Unicode {
+
+UChar32 foldCase(UChar32 ch)
+{
+ GOwnPtr<GError> gerror;
+
+ GOwnPtr<char> utf8char;
+ utf8char.set(g_ucs4_to_utf8(reinterpret_cast<gunichar*>(&ch), 1, 0, 0, &gerror.outPtr()));
+ if (gerror)
+ return ch;
+
+ GOwnPtr<char> utf8caseFolded;
+ utf8caseFolded.set(g_utf8_casefold(utf8char.get(), -1));
+
+ GOwnPtr<gunichar> ucs4Result;
+ ucs4Result.set(g_utf8_to_ucs4_fast(utf8caseFolded.get(), -1, 0));
+
+ return *ucs4Result;
+}
+
+int foldCase(UChar* result, int resultLength, const UChar* src, int srcLength, bool* error)
+{
+ *error = false;
+ GOwnPtr<GError> gerror;
+
+ GOwnPtr<char> utf8src;
+ utf8src.set(g_utf16_to_utf8(src, srcLength, 0, 0, &gerror.outPtr()));
+ if (gerror) {
+ *error = true;
+ return -1;
+ }
+
+ GOwnPtr<char> utf8result;
+ utf8result.set(g_utf8_casefold(utf8src.get(), -1));
+
+ long utf16resultLength = -1;
+ GOwnPtr<UChar> utf16result;
+ utf16result.set(g_utf8_to_utf16(utf8result.get(), -1, 0, &utf16resultLength, &gerror.outPtr()));
+ if (gerror) {
+ *error = true;
+ return -1;
+ }
+
+ if (utf16resultLength > resultLength) {
+ *error = true;
+ return utf16resultLength;
+ }
+ memcpy(result, utf16result.get(), utf16resultLength * sizeof(UChar));
+
+ return utf16resultLength;
+}
+
+int toLower(UChar* result, int resultLength, const UChar* src, int srcLength, bool* error)
+{
+ *error = false;
+ GOwnPtr<GError> gerror;
+
+ GOwnPtr<char> utf8src;
+ utf8src.set(g_utf16_to_utf8(src, srcLength, 0, 0, &gerror.outPtr()));
+ if (gerror) {
+ *error = true;
+ return -1;
+ }
+
+ GOwnPtr<char> utf8result;
+ utf8result.set(g_utf8_strdown(utf8src.get(), -1));
+
+ long utf16resultLength = -1;
+ GOwnPtr<UChar> utf16result;
+ utf16result.set(g_utf8_to_utf16(utf8result.get(), -1, 0, &utf16resultLength, &gerror.outPtr()));
+ if (gerror) {
+ *error = true;
+ return -1;
+ }
+
+ if (utf16resultLength > resultLength) {
+ *error = true;
+ return utf16resultLength;
+ }
+ memcpy(result, utf16result.get(), utf16resultLength * sizeof(UChar));
+
+ return utf16resultLength;
+}
+
+int toUpper(UChar* result, int resultLength, const UChar* src, int srcLength, bool* error)
+{
+ *error = false;
+ GOwnPtr<GError> gerror;
+
+ GOwnPtr<char> utf8src;
+ utf8src.set(g_utf16_to_utf8(src, srcLength, 0, 0, &gerror.outPtr()));
+ if (gerror) {
+ *error = true;
+ return -1;
+ }
+
+ GOwnPtr<char> utf8result;
+ utf8result.set(g_utf8_strup(utf8src.get(), -1));
+
+ long utf16resultLength = -1;
+ GOwnPtr<UChar> utf16result;
+ utf16result.set(g_utf8_to_utf16(utf8result.get(), -1, 0, &utf16resultLength, &gerror.outPtr()));
+ if (gerror) {
+ *error = true;
+ return -1;
+ }
+
+ if (utf16resultLength > resultLength) {
+ *error = true;
+ return utf16resultLength;
+ }
+ memcpy(result, utf16result.get(), utf16resultLength * sizeof(UChar));
+
+ return utf16resultLength;
+}
+
+Direction direction(UChar32 c)
+{
+ PangoBidiType type = pango_bidi_type_for_unichar(c);
+ switch (type) {
+ case PANGO_BIDI_TYPE_L:
+ return LeftToRight;
+ case PANGO_BIDI_TYPE_R:
+ return RightToLeft;
+ case PANGO_BIDI_TYPE_AL:
+ return RightToLeftArabic;
+ case PANGO_BIDI_TYPE_LRE:
+ return LeftToRightEmbedding;
+ case PANGO_BIDI_TYPE_RLE:
+ return RightToLeftEmbedding;
+ case PANGO_BIDI_TYPE_LRO:
+ return LeftToRightOverride;
+ case PANGO_BIDI_TYPE_RLO:
+ return RightToLeftOverride;
+ case PANGO_BIDI_TYPE_PDF:
+ return PopDirectionalFormat;
+ case PANGO_BIDI_TYPE_EN:
+ return EuropeanNumber;
+ case PANGO_BIDI_TYPE_AN:
+ return ArabicNumber;
+ case PANGO_BIDI_TYPE_ES:
+ return EuropeanNumberSeparator;
+ case PANGO_BIDI_TYPE_ET:
+ return EuropeanNumberTerminator;
+ case PANGO_BIDI_TYPE_CS:
+ return CommonNumberSeparator;
+ case PANGO_BIDI_TYPE_NSM:
+ return NonSpacingMark;
+ case PANGO_BIDI_TYPE_BN:
+ return BoundaryNeutral;
+ case PANGO_BIDI_TYPE_B:
+ return BlockSeparator;
+ case PANGO_BIDI_TYPE_S:
+ return SegmentSeparator;
+ case PANGO_BIDI_TYPE_WS:
+ return WhiteSpaceNeutral;
+ default:
+ return OtherNeutral;
+ }
+}
+
+int umemcasecmp(const UChar* a, const UChar* b, int len)
+{
+ GOwnPtr<char> utf8a;
+ GOwnPtr<char> utf8b;
+
+ utf8a.set(g_utf16_to_utf8(a, len, 0, 0, 0));
+ utf8b.set(g_utf16_to_utf8(b, len, 0, 0, 0));
+
+ GOwnPtr<char> foldedA;
+ GOwnPtr<char> foldedB;
+
+ foldedA.set(g_utf8_casefold(utf8a.get(), -1));
+ foldedB.set(g_utf8_casefold(utf8b.get(), -1));
+
+ // FIXME: umemcasecmp needs to mimic u_memcasecmp of icu
+ // from the ICU docs:
+ // "Compare two strings case-insensitively using full case folding.
+ // his is equivalent to u_strcmp(u_strFoldCase(s1, n, options), u_strFoldCase(s2, n, options))."
+ //
+ // So it looks like we don't need the full g_utf8_collate here,
+ // but really a bitwise comparison of casefolded unicode chars (not utf-8 bytes).
+ // As there is no direct equivalent to this icu function in GLib, for now
+ // we'll use g_utf8_collate():
+
+ return g_utf8_collate(foldedA.get(), foldedB.get());
+}
+
+}
+}
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/glib/UnicodeGLib.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/glib/UnicodeGLib.h
new file mode 100644
index 0000000..c03d3ec
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/glib/UnicodeGLib.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright (C) 2006 George Staikos <staikos@kde.org>
+ * Copyright (C) 2006 Alexey Proskuryakov <ap@nypop.com>
+ * Copyright (C) 2007 Apple Computer, Inc. All rights reserved.
+ * Copyright (C) 2008 Jürg Billeter <j@bitron.ch>
+ * Copyright (C) 2008 Dominik Röttsches <dominik.roettsches@access-company.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef UnicodeGLib_h
+#define UnicodeGLib_h
+
+#include "UnicodeMacrosFromICU.h"
+#include <wtf/GOwnPtr.h>
+
+#include <glib.h>
+#include <pango/pango.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+typedef uint16_t UChar;
+typedef int32_t UChar32;
+
+namespace WTF {
+namespace Unicode {
+
+enum Direction {
+ LeftToRight,
+ RightToLeft,
+ EuropeanNumber,
+ EuropeanNumberSeparator,
+ EuropeanNumberTerminator,
+ ArabicNumber,
+ CommonNumberSeparator,
+ BlockSeparator,
+ SegmentSeparator,
+ WhiteSpaceNeutral,
+ OtherNeutral,
+ LeftToRightEmbedding,
+ LeftToRightOverride,
+ RightToLeftArabic,
+ RightToLeftEmbedding,
+ RightToLeftOverride,
+ PopDirectionalFormat,
+ NonSpacingMark,
+ BoundaryNeutral
+};
+
+enum DecompositionType {
+ DecompositionNone,
+ DecompositionCanonical,
+ DecompositionCompat,
+ DecompositionCircle,
+ DecompositionFinal,
+ DecompositionFont,
+ DecompositionFraction,
+ DecompositionInitial,
+ DecompositionIsolated,
+ DecompositionMedial,
+ DecompositionNarrow,
+ DecompositionNoBreak,
+ DecompositionSmall,
+ DecompositionSquare,
+ DecompositionSub,
+ DecompositionSuper,
+ DecompositionVertical,
+ DecompositionWide,
+};
+
+enum CharCategory {
+ NoCategory = 0,
+ Other_NotAssigned = U_MASK(G_UNICODE_UNASSIGNED),
+ Letter_Uppercase = U_MASK(G_UNICODE_UPPERCASE_LETTER),
+ Letter_Lowercase = U_MASK(G_UNICODE_LOWERCASE_LETTER),
+ Letter_Titlecase = U_MASK(G_UNICODE_TITLECASE_LETTER),
+ Letter_Modifier = U_MASK(G_UNICODE_MODIFIER_LETTER),
+ Letter_Other = U_MASK(G_UNICODE_OTHER_LETTER),
+
+ Mark_NonSpacing = U_MASK(G_UNICODE_NON_SPACING_MARK),
+ Mark_Enclosing = U_MASK(G_UNICODE_ENCLOSING_MARK),
+ Mark_SpacingCombining = U_MASK(G_UNICODE_COMBINING_MARK),
+
+ Number_DecimalDigit = U_MASK(G_UNICODE_DECIMAL_NUMBER),
+ Number_Letter = U_MASK(G_UNICODE_LETTER_NUMBER),
+ Number_Other = U_MASK(G_UNICODE_OTHER_NUMBER),
+
+ Separator_Space = U_MASK(G_UNICODE_SPACE_SEPARATOR),
+ Separator_Line = U_MASK(G_UNICODE_LINE_SEPARATOR),
+ Separator_Paragraph = U_MASK(G_UNICODE_PARAGRAPH_SEPARATOR),
+
+ Other_Control = U_MASK(G_UNICODE_CONTROL),
+ Other_Format = U_MASK(G_UNICODE_FORMAT),
+ Other_PrivateUse = U_MASK(G_UNICODE_PRIVATE_USE),
+ Other_Surrogate = U_MASK(G_UNICODE_SURROGATE),
+
+ Punctuation_Dash = U_MASK(G_UNICODE_DASH_PUNCTUATION),
+ Punctuation_Open = U_MASK(G_UNICODE_OPEN_PUNCTUATION),
+ Punctuation_Close = U_MASK(G_UNICODE_CLOSE_PUNCTUATION),
+ Punctuation_Connector = U_MASK(G_UNICODE_CONNECT_PUNCTUATION),
+ Punctuation_Other = U_MASK(G_UNICODE_OTHER_PUNCTUATION),
+
+ Symbol_Math = U_MASK(G_UNICODE_MATH_SYMBOL),
+ Symbol_Currency = U_MASK(G_UNICODE_CURRENCY_SYMBOL),
+ Symbol_Modifier = U_MASK(G_UNICODE_MODIFIER_SYMBOL),
+ Symbol_Other = U_MASK(G_UNICODE_OTHER_SYMBOL),
+
+ Punctuation_InitialQuote = U_MASK(G_UNICODE_INITIAL_PUNCTUATION),
+ Punctuation_FinalQuote = U_MASK(G_UNICODE_FINAL_PUNCTUATION)
+};
+
+UChar32 foldCase(UChar32);
+
+int foldCase(UChar* result, int resultLength, const UChar* src, int srcLength, bool* error);
+
+int toLower(UChar* result, int resultLength, const UChar* src, int srcLength, bool* error);
+
+inline UChar32 toLower(UChar32 c)
+{
+ return g_unichar_tolower(c);
+}
+
+inline UChar32 toUpper(UChar32 c)
+{
+ return g_unichar_toupper(c);
+}
+
+int toUpper(UChar* result, int resultLength, const UChar* src, int srcLength, bool* error);
+
+inline UChar32 toTitleCase(UChar32 c)
+{
+ return g_unichar_totitle(c);
+}
+
+inline bool isArabicChar(UChar32 c)
+{
+ return c >= 0x0600 && c <= 0x06FF;
+}
+
+inline bool isFormatChar(UChar32 c)
+{
+ return g_unichar_type(c) == G_UNICODE_FORMAT;
+}
+
+inline bool isSeparatorSpace(UChar32 c)
+{
+ return g_unichar_type(c) == G_UNICODE_SPACE_SEPARATOR;
+}
+
+inline bool isPrintableChar(UChar32 c)
+{
+ return g_unichar_isprint(c);
+}
+
+inline bool isDigit(UChar32 c)
+{
+ return g_unichar_isdigit(c);
+}
+
+inline bool isPunct(UChar32 c)
+{
+ return g_unichar_ispunct(c);
+}
+
+inline bool hasLineBreakingPropertyComplexContext(UChar32 c)
+{
+ // FIXME
+ return false;
+}
+
+inline bool hasLineBreakingPropertyComplexContextOrIdeographic(UChar32 c)
+{
+ // FIXME
+ return false;
+}
+
+inline UChar32 mirroredChar(UChar32 c)
+{
+ gunichar mirror = 0;
+ g_unichar_get_mirror_char(c, &mirror);
+ return mirror;
+}
+
+inline CharCategory category(UChar32 c)
+{
+ if (c > 0xffff)
+ return NoCategory;
+
+ return (CharCategory) U_MASK(g_unichar_type(c));
+}
+
+Direction direction(UChar32);
+
+inline bool isLower(UChar32 c)
+{
+ return g_unichar_islower(c);
+}
+
+inline int digitValue(UChar32 c)
+{
+ return g_unichar_digit_value(c);
+}
+
+inline uint8_t combiningClass(UChar32 c)
+{
+ // FIXME
+ // return g_unichar_combining_class(c);
+ return 0;
+}
+
+inline DecompositionType decompositionType(UChar32 c)
+{
+ // FIXME
+ return DecompositionNone;
+}
+
+int umemcasecmp(const UChar*, const UChar*, int len);
+
+}
+}
+
+#endif
+
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/glib/UnicodeMacrosFromICU.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/glib/UnicodeMacrosFromICU.h
new file mode 100644
index 0000000..5d3eca6
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/glib/UnicodeMacrosFromICU.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2006 George Staikos <staikos@kde.org>
+ * Copyright (C) 2006 Alexey Proskuryakov <ap@nypop.com>
+ * Copyright (C) 2007 Apple Computer, Inc. All rights reserved.
+ * Copyright (C) 2008 Jürg Billeter <j@bitron.ch>
+ * Copyright (C) 2008 Dominik Röttsches <dominik.roettsches@access-company.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef UnicodeMacrosFromICU_h
+#define UnicodeMacrosFromICU_h
+
+// some defines from ICU
+
+#define U16_IS_LEAD(c) (((c)&0xfffffc00)==0xd800)
+#define U16_IS_TRAIL(c) (((c)&0xfffffc00)==0xdc00)
+#define U16_SURROGATE_OFFSET ((0xd800<<10UL)+0xdc00-0x10000)
+#define U16_GET_SUPPLEMENTARY(lead, trail) \
+ (((UChar32)(lead)<<10UL)+(UChar32)(trail)-U16_SURROGATE_OFFSET)
+
+#define U16_LEAD(supplementary) (UChar)(((supplementary)>>10)+0xd7c0)
+#define U16_TRAIL(supplementary) (UChar)(((supplementary)&0x3ff)|0xdc00)
+
+#define U_IS_SURROGATE(c) (((c)&0xfffff800)==0xd800)
+#define U16_IS_SINGLE(c) !U_IS_SURROGATE(c)
+#define U16_IS_SURROGATE(c) U_IS_SURROGATE(c)
+#define U16_IS_SURROGATE_LEAD(c) (((c)&0x400)==0)
+
+#define U16_PREV(s, start, i, c) { \
+ (c)=(s)[--(i)]; \
+ if(U16_IS_TRAIL(c)) { \
+ uint16_t __c2; \
+ if((i)>(start) && U16_IS_LEAD(__c2=(s)[(i)-1])) { \
+ --(i); \
+ (c)=U16_GET_SUPPLEMENTARY(__c2, (c)); \
+ } \
+ } \
+}
+
+#define U16_NEXT(s, i, length, c) { \
+ (c)=(s)[(i)++]; \
+ if(U16_IS_LEAD(c)) { \
+ uint16_t __c2; \
+ if((i)<(length) && U16_IS_TRAIL(__c2=(s)[(i)])) { \
+ ++(i); \
+ (c)=U16_GET_SUPPLEMENTARY((c), __c2); \
+ } \
+ } \
+}
+
+#define U_MASK(x) ((uint32_t)1<<(x))
+
+#endif
+
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/icu/CollatorICU.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/icu/CollatorICU.cpp
new file mode 100644
index 0000000..6376bb3
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/icu/CollatorICU.cpp
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "Collator.h"
+
+#if USE(ICU_UNICODE) && !UCONFIG_NO_COLLATION
+
+#include "Assertions.h"
+#include "Threading.h"
+#include <unicode/ucol.h>
+#include <string.h>
+
+#if PLATFORM(DARWIN)
+#include "RetainPtr.h"
+#include <CoreFoundation/CoreFoundation.h>
+#endif
+
+namespace WTF {
+
+static UCollator* cachedCollator;
+static Mutex& cachedCollatorMutex()
+{
+ AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
+ return mutex;
+}
+
+Collator::Collator(const char* locale)
+ : m_collator(0)
+ , m_locale(locale ? strdup(locale) : 0)
+ , m_lowerFirst(false)
+{
+}
+
+std::auto_ptr<Collator> Collator::userDefault()
+{
+#if PLATFORM(DARWIN) && PLATFORM(CF)
+ // Mac OS X doesn't set UNIX locale to match user-selected one, so ICU default doesn't work.
+#if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) && !PLATFORM(IPHONE)
+ RetainPtr<CFLocaleRef> currentLocale(AdoptCF, CFLocaleCopyCurrent());
+ CFStringRef collationOrder = (CFStringRef)CFLocaleGetValue(currentLocale.get(), kCFLocaleCollatorIdentifier);
+#else
+ RetainPtr<CFStringRef> collationOrderRetainer(AdoptCF, (CFStringRef)CFPreferencesCopyValue(CFSTR("AppleCollationOrder"), kCFPreferencesAnyApplication, kCFPreferencesCurrentUser, kCFPreferencesAnyHost));
+ CFStringRef collationOrder = collationOrderRetainer.get();
+#endif
+ char buf[256];
+ if (collationOrder) {
+ CFStringGetCString(collationOrder, buf, sizeof(buf), kCFStringEncodingASCII);
+ return std::auto_ptr<Collator>(new Collator(buf));
+ } else
+ return std::auto_ptr<Collator>(new Collator(""));
+#else
+ return std::auto_ptr<Collator>(new Collator(0));
+#endif
+}
+
+Collator::~Collator()
+{
+ releaseCollator();
+ free(m_locale);
+}
+
+void Collator::setOrderLowerFirst(bool lowerFirst)
+{
+ m_lowerFirst = lowerFirst;
+}
+
+Collator::Result Collator::collate(const UChar* lhs, size_t lhsLength, const UChar* rhs, size_t rhsLength) const
+{
+ if (!m_collator)
+ createCollator();
+
+ return static_cast<Result>(ucol_strcoll(m_collator, lhs, lhsLength, rhs, rhsLength));
+}
+
+void Collator::createCollator() const
+{
+ ASSERT(!m_collator);
+ UErrorCode status = U_ZERO_ERROR;
+
+ {
+ Locker<Mutex> lock(cachedCollatorMutex());
+ if (cachedCollator) {
+ const char* cachedCollatorLocale = ucol_getLocaleByType(cachedCollator, ULOC_REQUESTED_LOCALE, &status);
+ ASSERT(U_SUCCESS(status));
+ ASSERT(cachedCollatorLocale);
+
+ UColAttributeValue cachedCollatorLowerFirst = ucol_getAttribute(cachedCollator, UCOL_CASE_FIRST, &status);
+ ASSERT(U_SUCCESS(status));
+
+ // FIXME: default locale is never matched, because ucol_getLocaleByType returns the actual one used, not 0.
+ if (m_locale && 0 == strcmp(cachedCollatorLocale, m_locale)
+ && ((UCOL_LOWER_FIRST == cachedCollatorLowerFirst && m_lowerFirst) || (UCOL_UPPER_FIRST == cachedCollatorLowerFirst && !m_lowerFirst))) {
+ m_collator = cachedCollator;
+ cachedCollator = 0;
+ return;
+ }
+ }
+ }
+
+ m_collator = ucol_open(m_locale, &status);
+ if (U_FAILURE(status)) {
+ status = U_ZERO_ERROR;
+ m_collator = ucol_open("", &status); // Fallback to Unicode Collation Algorithm.
+ }
+ ASSERT(U_SUCCESS(status));
+
+ ucol_setAttribute(m_collator, UCOL_CASE_FIRST, m_lowerFirst ? UCOL_LOWER_FIRST : UCOL_UPPER_FIRST, &status);
+ ASSERT(U_SUCCESS(status));
+}
+
+void Collator::releaseCollator()
+{
+ {
+ Locker<Mutex> lock(cachedCollatorMutex());
+ if (cachedCollator)
+ ucol_close(cachedCollator);
+ cachedCollator = m_collator;
+ m_collator = 0;
+ }
+}
+
+}
+
+#endif
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/icu/UnicodeIcu.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/icu/UnicodeIcu.h
new file mode 100644
index 0000000..35c6fbf
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/icu/UnicodeIcu.h
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2006 George Staikos <staikos@kde.org>
+ * Copyright (C) 2006 Alexey Proskuryakov <ap@nypop.com>
+ * Copyright (C) 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef WTF_UNICODE_ICU_H
+#define WTF_UNICODE_ICU_H
+
+#include <stdlib.h>
+#include <unicode/uchar.h>
+#include <unicode/ustring.h>
+#include <unicode/utf16.h>
+
+namespace WTF {
+namespace Unicode {
+
+enum Direction {
+ LeftToRight = U_LEFT_TO_RIGHT,
+ RightToLeft = U_RIGHT_TO_LEFT,
+ EuropeanNumber = U_EUROPEAN_NUMBER,
+ EuropeanNumberSeparator = U_EUROPEAN_NUMBER_SEPARATOR,
+ EuropeanNumberTerminator = U_EUROPEAN_NUMBER_TERMINATOR,
+ ArabicNumber = U_ARABIC_NUMBER,
+ CommonNumberSeparator = U_COMMON_NUMBER_SEPARATOR,
+ BlockSeparator = U_BLOCK_SEPARATOR,
+ SegmentSeparator = U_SEGMENT_SEPARATOR,
+ WhiteSpaceNeutral = U_WHITE_SPACE_NEUTRAL,
+ OtherNeutral = U_OTHER_NEUTRAL,
+ LeftToRightEmbedding = U_LEFT_TO_RIGHT_EMBEDDING,
+ LeftToRightOverride = U_LEFT_TO_RIGHT_OVERRIDE,
+ RightToLeftArabic = U_RIGHT_TO_LEFT_ARABIC,
+ RightToLeftEmbedding = U_RIGHT_TO_LEFT_EMBEDDING,
+ RightToLeftOverride = U_RIGHT_TO_LEFT_OVERRIDE,
+ PopDirectionalFormat = U_POP_DIRECTIONAL_FORMAT,
+ NonSpacingMark = U_DIR_NON_SPACING_MARK,
+ BoundaryNeutral = U_BOUNDARY_NEUTRAL
+};
+
+enum DecompositionType {
+ DecompositionNone = U_DT_NONE,
+ DecompositionCanonical = U_DT_CANONICAL,
+ DecompositionCompat = U_DT_COMPAT,
+ DecompositionCircle = U_DT_CIRCLE,
+ DecompositionFinal = U_DT_FINAL,
+ DecompositionFont = U_DT_FONT,
+ DecompositionFraction = U_DT_FRACTION,
+ DecompositionInitial = U_DT_INITIAL,
+ DecompositionIsolated = U_DT_ISOLATED,
+ DecompositionMedial = U_DT_MEDIAL,
+ DecompositionNarrow = U_DT_NARROW,
+ DecompositionNoBreak = U_DT_NOBREAK,
+ DecompositionSmall = U_DT_SMALL,
+ DecompositionSquare = U_DT_SQUARE,
+ DecompositionSub = U_DT_SUB,
+ DecompositionSuper = U_DT_SUPER,
+ DecompositionVertical = U_DT_VERTICAL,
+ DecompositionWide = U_DT_WIDE,
+};
+
+enum CharCategory {
+ NoCategory = 0,
+ Other_NotAssigned = U_MASK(U_GENERAL_OTHER_TYPES),
+ Letter_Uppercase = U_MASK(U_UPPERCASE_LETTER),
+ Letter_Lowercase = U_MASK(U_LOWERCASE_LETTER),
+ Letter_Titlecase = U_MASK(U_TITLECASE_LETTER),
+ Letter_Modifier = U_MASK(U_MODIFIER_LETTER),
+ Letter_Other = U_MASK(U_OTHER_LETTER),
+
+ Mark_NonSpacing = U_MASK(U_NON_SPACING_MARK),
+ Mark_Enclosing = U_MASK(U_ENCLOSING_MARK),
+ Mark_SpacingCombining = U_MASK(U_COMBINING_SPACING_MARK),
+
+ Number_DecimalDigit = U_MASK(U_DECIMAL_DIGIT_NUMBER),
+ Number_Letter = U_MASK(U_LETTER_NUMBER),
+ Number_Other = U_MASK(U_OTHER_NUMBER),
+
+ Separator_Space = U_MASK(U_SPACE_SEPARATOR),
+ Separator_Line = U_MASK(U_LINE_SEPARATOR),
+ Separator_Paragraph = U_MASK(U_PARAGRAPH_SEPARATOR),
+
+ Other_Control = U_MASK(U_CONTROL_CHAR),
+ Other_Format = U_MASK(U_FORMAT_CHAR),
+ Other_PrivateUse = U_MASK(U_PRIVATE_USE_CHAR),
+ Other_Surrogate = U_MASK(U_SURROGATE),
+
+ Punctuation_Dash = U_MASK(U_DASH_PUNCTUATION),
+ Punctuation_Open = U_MASK(U_START_PUNCTUATION),
+ Punctuation_Close = U_MASK(U_END_PUNCTUATION),
+ Punctuation_Connector = U_MASK(U_CONNECTOR_PUNCTUATION),
+ Punctuation_Other = U_MASK(U_OTHER_PUNCTUATION),
+
+ Symbol_Math = U_MASK(U_MATH_SYMBOL),
+ Symbol_Currency = U_MASK(U_CURRENCY_SYMBOL),
+ Symbol_Modifier = U_MASK(U_MODIFIER_SYMBOL),
+ Symbol_Other = U_MASK(U_OTHER_SYMBOL),
+
+ Punctuation_InitialQuote = U_MASK(U_INITIAL_PUNCTUATION),
+ Punctuation_FinalQuote = U_MASK(U_FINAL_PUNCTUATION)
+};
+
+inline UChar32 foldCase(UChar32 c)
+{
+ return u_foldCase(c, U_FOLD_CASE_DEFAULT);
+}
+
+inline int foldCase(UChar* result, int resultLength, const UChar* src, int srcLength, bool* error)
+{
+ UErrorCode status = U_ZERO_ERROR;
+ int realLength = u_strFoldCase(result, resultLength, src, srcLength, U_FOLD_CASE_DEFAULT, &status);
+ *error = !U_SUCCESS(status);
+ return realLength;
+}
+
+inline int toLower(UChar* result, int resultLength, const UChar* src, int srcLength, bool* error)
+{
+ UErrorCode status = U_ZERO_ERROR;
+ int realLength = u_strToLower(result, resultLength, src, srcLength, "", &status);
+ *error = !!U_FAILURE(status);
+ return realLength;
+}
+
+inline UChar32 toLower(UChar32 c)
+{
+ return u_tolower(c);
+}
+
+inline UChar32 toUpper(UChar32 c)
+{
+ return u_toupper(c);
+}
+
+inline int toUpper(UChar* result, int resultLength, const UChar* src, int srcLength, bool* error)
+{
+ UErrorCode status = U_ZERO_ERROR;
+ int realLength = u_strToUpper(result, resultLength, src, srcLength, "", &status);
+ *error = !!U_FAILURE(status);
+ return realLength;
+}
+
+inline UChar32 toTitleCase(UChar32 c)
+{
+ return u_totitle(c);
+}
+
+inline bool isArabicChar(UChar32 c)
+{
+ return ublock_getCode(c) == UBLOCK_ARABIC;
+}
+
+inline bool isSeparatorSpace(UChar32 c)
+{
+ return u_charType(c) == U_SPACE_SEPARATOR;
+}
+
+inline bool isPrintableChar(UChar32 c)
+{
+ return !!u_isprint(c);
+}
+
+inline bool isPunct(UChar32 c)
+{
+ return !!u_ispunct(c);
+}
+
+inline bool hasLineBreakingPropertyComplexContext(UChar32 c)
+{
+ return u_getIntPropertyValue(c, UCHAR_LINE_BREAK) == U_LB_COMPLEX_CONTEXT;
+}
+
+inline bool hasLineBreakingPropertyComplexContextOrIdeographic(UChar32 c)
+{
+ int32_t prop = u_getIntPropertyValue(c, UCHAR_LINE_BREAK);
+ return prop == U_LB_COMPLEX_CONTEXT || prop == U_LB_IDEOGRAPHIC;
+}
+
+inline UChar32 mirroredChar(UChar32 c)
+{
+ return u_charMirror(c);
+}
+
+inline CharCategory category(UChar32 c)
+{
+ return static_cast<CharCategory>(U_GET_GC_MASK(c));
+}
+
+inline Direction direction(UChar32 c)
+{
+ return static_cast<Direction>(u_charDirection(c));
+}
+
+inline bool isLower(UChar32 c)
+{
+ return !!u_islower(c);
+}
+
+inline uint8_t combiningClass(UChar32 c)
+{
+ return u_getCombiningClass(c);
+}
+
+inline DecompositionType decompositionType(UChar32 c)
+{
+ return static_cast<DecompositionType>(u_getIntPropertyValue(c, UCHAR_DECOMPOSITION_TYPE));
+}
+
+inline int umemcasecmp(const UChar* a, const UChar* b, int len)
+{
+ return u_memcasecmp(a, b, len, U_FOLD_CASE_DEFAULT);
+}
+
+} }
+
+#endif // WTF_UNICODE_ICU_H
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/qt4/UnicodeQt4.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/qt4/UnicodeQt4.h
new file mode 100644
index 0000000..65b3e79
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/unicode/qt4/UnicodeQt4.h
@@ -0,0 +1,545 @@
+/*
+ * Copyright (C) 2006 George Staikos <staikos@kde.org>
+ * Copyright (C) 2006 Alexey Proskuryakov <ap@nypop.com>
+ * Copyright (C) 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef WTF_UNICODE_QT4_H
+#define WTF_UNICODE_QT4_H
+
+#include <QChar>
+#include <QString>
+
+#include <config.h>
+
+#include <stdint.h>
+
+#if QT_VERSION >= 0x040300
+QT_BEGIN_NAMESPACE
+namespace QUnicodeTables {
+ struct Properties {
+ ushort category : 8;
+ ushort line_break_class : 8;
+ ushort direction : 8;
+ ushort combiningClass :8;
+ ushort joining : 2;
+ signed short digitValue : 6; /* 5 needed */
+ ushort unicodeVersion : 4;
+ ushort lowerCaseSpecial : 1;
+ ushort upperCaseSpecial : 1;
+ ushort titleCaseSpecial : 1;
+ ushort caseFoldSpecial : 1; /* currently unused */
+ signed short mirrorDiff : 16;
+ signed short lowerCaseDiff : 16;
+ signed short upperCaseDiff : 16;
+ signed short titleCaseDiff : 16;
+ signed short caseFoldDiff : 16;
+ };
+ Q_CORE_EXPORT const Properties * QT_FASTCALL properties(uint ucs4);
+ Q_CORE_EXPORT const Properties * QT_FASTCALL properties(ushort ucs2);
+}
+QT_END_NAMESPACE
+#endif
+
+// ugly hack to make UChar compatible with JSChar in API/JSStringRef.h
+#if defined(Q_OS_WIN)
+typedef wchar_t UChar;
+#else
+typedef uint16_t UChar;
+#endif
+typedef uint32_t UChar32;
+
+// some defines from ICU
+
+#define U16_IS_LEAD(c) (((c)&0xfffffc00)==0xd800)
+#define U16_IS_TRAIL(c) (((c)&0xfffffc00)==0xdc00)
+#define U16_SURROGATE_OFFSET ((0xd800<<10UL)+0xdc00-0x10000)
+#define U16_GET_SUPPLEMENTARY(lead, trail) \
+ (((UChar32)(lead)<<10UL)+(UChar32)(trail)-U16_SURROGATE_OFFSET)
+
+#define U16_LEAD(supplementary) (UChar)(((supplementary)>>10)+0xd7c0)
+#define U16_TRAIL(supplementary) (UChar)(((supplementary)&0x3ff)|0xdc00)
+
+#define U_IS_SURROGATE(c) (((c)&0xfffff800)==0xd800)
+#define U16_IS_SINGLE(c) !U_IS_SURROGATE(c)
+#define U16_IS_SURROGATE(c) U_IS_SURROGATE(c)
+#define U16_IS_SURROGATE_LEAD(c) (((c)&0x400)==0)
+
+#define U16_NEXT(s, i, length, c) { \
+ (c)=(s)[(i)++]; \
+ if(U16_IS_LEAD(c)) { \
+ uint16_t __c2; \
+ if((i)<(length) && U16_IS_TRAIL(__c2=(s)[(i)])) { \
+ ++(i); \
+ (c)=U16_GET_SUPPLEMENTARY((c), __c2); \
+ } \
+ } \
+}
+
+#define U16_PREV(s, start, i, c) { \
+ (c)=(s)[--(i)]; \
+ if(U16_IS_TRAIL(c)) { \
+ uint16_t __c2; \
+ if((i)>(start) && U16_IS_LEAD(__c2=(s)[(i)-1])) { \
+ --(i); \
+ (c)=U16_GET_SUPPLEMENTARY(__c2, (c)); \
+ } \
+ } \
+}
+
+#define U_MASK(x) ((uint32_t)1<<(x))
+
+namespace WTF {
+namespace Unicode {
+
+QT_USE_NAMESPACE
+
+enum Direction {
+ LeftToRight = QChar::DirL,
+ RightToLeft = QChar::DirR,
+ EuropeanNumber = QChar::DirEN,
+ EuropeanNumberSeparator = QChar::DirES,
+ EuropeanNumberTerminator = QChar::DirET,
+ ArabicNumber = QChar::DirAN,
+ CommonNumberSeparator = QChar::DirCS,
+ BlockSeparator = QChar::DirB,
+ SegmentSeparator = QChar::DirS,
+ WhiteSpaceNeutral = QChar::DirWS,
+ OtherNeutral = QChar::DirON,
+ LeftToRightEmbedding = QChar::DirLRE,
+ LeftToRightOverride = QChar::DirLRO,
+ RightToLeftArabic = QChar::DirAL,
+ RightToLeftEmbedding = QChar::DirRLE,
+ RightToLeftOverride = QChar::DirRLO,
+ PopDirectionalFormat = QChar::DirPDF,
+ NonSpacingMark = QChar::DirNSM,
+ BoundaryNeutral = QChar::DirBN
+};
+
+enum DecompositionType {
+ DecompositionNone = QChar::NoDecomposition,
+ DecompositionCanonical = QChar::Canonical,
+ DecompositionCompat = QChar::Compat,
+ DecompositionCircle = QChar::Circle,
+ DecompositionFinal = QChar::Final,
+ DecompositionFont = QChar::Font,
+ DecompositionFraction = QChar::Fraction,
+ DecompositionInitial = QChar::Initial,
+ DecompositionIsolated = QChar::Isolated,
+ DecompositionMedial = QChar::Medial,
+ DecompositionNarrow = QChar::Narrow,
+ DecompositionNoBreak = QChar::NoBreak,
+ DecompositionSmall = QChar::Small,
+ DecompositionSquare = QChar::Square,
+ DecompositionSub = QChar::Sub,
+ DecompositionSuper = QChar::Super,
+ DecompositionVertical = QChar::Vertical,
+ DecompositionWide = QChar::Wide
+};
+
+enum CharCategory {
+ NoCategory = 0,
+ Mark_NonSpacing = U_MASK(QChar::Mark_NonSpacing),
+ Mark_SpacingCombining = U_MASK(QChar::Mark_SpacingCombining),
+ Mark_Enclosing = U_MASK(QChar::Mark_Enclosing),
+ Number_DecimalDigit = U_MASK(QChar::Number_DecimalDigit),
+ Number_Letter = U_MASK(QChar::Number_Letter),
+ Number_Other = U_MASK(QChar::Number_Other),
+ Separator_Space = U_MASK(QChar::Separator_Space),
+ Separator_Line = U_MASK(QChar::Separator_Line),
+ Separator_Paragraph = U_MASK(QChar::Separator_Paragraph),
+ Other_Control = U_MASK(QChar::Other_Control),
+ Other_Format = U_MASK(QChar::Other_Format),
+ Other_Surrogate = U_MASK(QChar::Other_Surrogate),
+ Other_PrivateUse = U_MASK(QChar::Other_PrivateUse),
+ Other_NotAssigned = U_MASK(QChar::Other_NotAssigned),
+ Letter_Uppercase = U_MASK(QChar::Letter_Uppercase),
+ Letter_Lowercase = U_MASK(QChar::Letter_Lowercase),
+ Letter_Titlecase = U_MASK(QChar::Letter_Titlecase),
+ Letter_Modifier = U_MASK(QChar::Letter_Modifier),
+ Letter_Other = U_MASK(QChar::Letter_Other),
+ Punctuation_Connector = U_MASK(QChar::Punctuation_Connector),
+ Punctuation_Dash = U_MASK(QChar::Punctuation_Dash),
+ Punctuation_Open = U_MASK(QChar::Punctuation_Open),
+ Punctuation_Close = U_MASK(QChar::Punctuation_Close),
+ Punctuation_InitialQuote = U_MASK(QChar::Punctuation_InitialQuote),
+ Punctuation_FinalQuote = U_MASK(QChar::Punctuation_FinalQuote),
+ Punctuation_Other = U_MASK(QChar::Punctuation_Other),
+ Symbol_Math = U_MASK(QChar::Symbol_Math),
+ Symbol_Currency = U_MASK(QChar::Symbol_Currency),
+ Symbol_Modifier = U_MASK(QChar::Symbol_Modifier),
+ Symbol_Other = U_MASK(QChar::Symbol_Other)
+};
+
+
+#if QT_VERSION >= 0x040300
+
+// FIXME: handle surrogates correctly in all methods
+
+inline UChar32 toLower(UChar32 ch)
+{
+ return QChar::toLower(ch);
+}
+
+inline int toLower(UChar* result, int resultLength, const UChar* src, int srcLength, bool* error)
+{
+ const UChar *e = src + srcLength;
+ const UChar *s = src;
+ UChar *r = result;
+ uint rindex = 0;
+
+ // this avoids one out of bounds check in the loop
+ if (s < e && QChar(*s).isLowSurrogate()) {
+ if (r)
+ r[rindex] = *s++;
+ ++rindex;
+ }
+
+ int needed = 0;
+ while (s < e && (rindex < uint(resultLength) || !r)) {
+ uint c = *s;
+ if (QChar(c).isLowSurrogate() && QChar(*(s - 1)).isHighSurrogate())
+ c = QChar::surrogateToUcs4(*(s - 1), c);
+ const QUnicodeTables::Properties *prop = QUnicodeTables::properties(c);
+ if (prop->lowerCaseSpecial) {
+ QString qstring;
+ if (c < 0x10000) {
+ qstring += QChar(c);
+ } else {
+ qstring += QChar(*(s-1));
+ qstring += QChar(*s);
+ }
+ qstring = qstring.toLower();
+ for (int i = 0; i < qstring.length(); ++i) {
+ if (rindex >= uint(resultLength)) {
+ needed += qstring.length() - i;
+ break;
+ }
+ if (r)
+ r[rindex] = qstring.at(i).unicode();
+ ++rindex;
+ }
+ } else {
+ if (r)
+ r[rindex] = *s + prop->lowerCaseDiff;
+ ++rindex;
+ }
+ ++s;
+ }
+ if (s < e)
+ needed += e - s;
+ *error = (needed != 0);
+ if (rindex < uint(resultLength))
+ r[rindex] = 0;
+ return rindex + needed;
+}
+
+inline UChar32 toUpper(UChar32 ch)
+{
+ return QChar::toUpper(ch);
+}
+
+inline int toUpper(UChar* result, int resultLength, const UChar* src, int srcLength, bool* error)
+{
+ const UChar *e = src + srcLength;
+ const UChar *s = src;
+ UChar *r = result;
+ int rindex = 0;
+
+ // this avoids one out of bounds check in the loop
+ if (s < e && QChar(*s).isLowSurrogate()) {
+ if (r)
+ r[rindex] = *s++;
+ ++rindex;
+ }
+
+ int needed = 0;
+ while (s < e && (rindex < resultLength || !r)) {
+ uint c = *s;
+ if (QChar(c).isLowSurrogate() && QChar(*(s - 1)).isHighSurrogate())
+ c = QChar::surrogateToUcs4(*(s - 1), c);
+ const QUnicodeTables::Properties *prop = QUnicodeTables::properties(c);
+ if (prop->upperCaseSpecial) {
+ QString qstring;
+ if (c < 0x10000) {
+ qstring += QChar(c);
+ } else {
+ qstring += QChar(*(s-1));
+ qstring += QChar(*s);
+ }
+ qstring = qstring.toUpper();
+ for (int i = 0; i < qstring.length(); ++i) {
+ if (rindex >= resultLength) {
+ needed += qstring.length() - i;
+ break;
+ }
+ if (r)
+ r[rindex] = qstring.at(i).unicode();
+ ++rindex;
+ }
+ } else {
+ if (r)
+ r[rindex] = *s + prop->upperCaseDiff;
+ ++rindex;
+ }
+ ++s;
+ }
+ if (s < e)
+ needed += e - s;
+ *error = (needed != 0);
+ if (rindex < resultLength)
+ r[rindex] = 0;
+ return rindex + needed;
+}
+
+inline int toTitleCase(UChar32 c)
+{
+ return QChar::toTitleCase(c);
+}
+
+inline UChar32 foldCase(UChar32 c)
+{
+ return QChar::toCaseFolded(c);
+}
+
+inline int foldCase(UChar* result, int resultLength, const UChar* src, int srcLength, bool* error)
+{
+ // FIXME: handle special casing. Easiest with some low level API in Qt
+ *error = false;
+ if (resultLength < srcLength) {
+ *error = true;
+ return srcLength;
+ }
+ for (int i = 0; i < srcLength; ++i)
+ result[i] = QChar::toCaseFolded(ushort(src[i]));
+ return srcLength;
+}
+
+inline bool isArabicChar(UChar32 c)
+{
+ return c >= 0x0600 && c <= 0x06FF;
+}
+
+inline bool isPrintableChar(UChar32 c)
+{
+ const uint test = U_MASK(QChar::Other_Control) |
+ U_MASK(QChar::Other_NotAssigned);
+ return !(U_MASK(QChar::category(c)) & test);
+}
+
+inline bool isSeparatorSpace(UChar32 c)
+{
+ return QChar::category(c) == QChar::Separator_Space;
+}
+
+inline bool isPunct(UChar32 c)
+{
+ const uint test = U_MASK(QChar::Punctuation_Connector) |
+ U_MASK(QChar::Punctuation_Dash) |
+ U_MASK(QChar::Punctuation_Open) |
+ U_MASK(QChar::Punctuation_Close) |
+ U_MASK(QChar::Punctuation_InitialQuote) |
+ U_MASK(QChar::Punctuation_FinalQuote) |
+ U_MASK(QChar::Punctuation_Other);
+ return U_MASK(QChar::category(c)) & test;
+}
+
+inline bool isLower(UChar32 c)
+{
+ return QChar::category(c) == QChar::Letter_Lowercase;
+}
+
+inline bool hasLineBreakingPropertyComplexContext(UChar32)
+{
+ // FIXME: Implement this to return whether the character has line breaking property SA (Complex Context).
+ return false;
+}
+
+inline UChar32 mirroredChar(UChar32 c)
+{
+ return QChar::mirroredChar(c);
+}
+
+inline uint8_t combiningClass(UChar32 c)
+{
+ return QChar::combiningClass(c);
+}
+
+inline DecompositionType decompositionType(UChar32 c)
+{
+ return (DecompositionType)QChar::decompositionTag(c);
+}
+
+inline int umemcasecmp(const UChar* a, const UChar* b, int len)
+{
+ // handle surrogates correctly
+ for (int i = 0; i < len; ++i) {
+ uint c1 = QChar::toCaseFolded(ushort(a[i]));
+ uint c2 = QChar::toCaseFolded(ushort(b[i]));
+ if (c1 != c2)
+ return c1 - c2;
+ }
+ return 0;
+}
+
+inline Direction direction(UChar32 c)
+{
+ return (Direction)QChar::direction(c);
+}
+
+inline CharCategory category(UChar32 c)
+{
+ return (CharCategory) U_MASK(QChar::category(c));
+}
+
+#else
+
+inline UChar32 toLower(UChar32 ch)
+{
+ if (ch > 0xffff)
+ return ch;
+ return QChar((unsigned short)ch).toLower().unicode();
+}
+
+inline int toLower(UChar* result, int resultLength, const UChar* src, int srcLength, bool* error)
+{
+ *error = false;
+ if (resultLength < srcLength) {
+ *error = true;
+ return srcLength;
+ }
+ for (int i = 0; i < srcLength; ++i)
+ result[i] = QChar(src[i]).toLower().unicode();
+ return srcLength;
+}
+
+inline UChar32 toUpper(UChar32 ch)
+{
+ if (ch > 0xffff)
+ return ch;
+ return QChar((unsigned short)ch).toUpper().unicode();
+}
+
+inline int toUpper(UChar* result, int resultLength, const UChar* src, int srcLength, bool* error)
+{
+ *error = false;
+ if (resultLength < srcLength) {
+ *error = true;
+ return srcLength;
+ }
+ for (int i = 0; i < srcLength; ++i)
+ result[i] = QChar(src[i]).toUpper().unicode();
+ return srcLength;
+}
+
+inline int toTitleCase(UChar32 c)
+{
+ if (c > 0xffff)
+ return c;
+ return QChar((unsigned short)c).toUpper().unicode();
+}
+
+inline UChar32 foldCase(UChar32 c)
+{
+ if (c > 0xffff)
+ return c;
+ return QChar((unsigned short)c).toLower().unicode();
+}
+
+inline int foldCase(UChar* result, int resultLength, const UChar* src, int srcLength, bool* error)
+{
+ return toLower(result, resultLength, src, srcLength, error);
+}
+
+inline bool isPrintableChar(UChar32 c)
+{
+ return (c & 0xffff0000) == 0 && QChar((unsigned short)c).isPrint();
+}
+
+inline bool isArabicChar(UChar32 c)
+{
+ return c >= 0x0600 && c <= 0x06FF;
+}
+
+inline bool isSeparatorSpace(UChar32 c)
+{
+ return (c & 0xffff0000) == 0 && QChar((unsigned short)c).category() == QChar::Separator_Space;
+}
+
+inline bool isPunct(UChar32 c)
+{
+ return (c & 0xffff0000) == 0 && QChar((unsigned short)c).isPunct();
+}
+
+inline bool isLower(UChar32 c)
+{
+ return (c & 0xffff0000) == 0 && QChar((unsigned short)c).category() == QChar::Letter_Lowercase;
+}
+
+inline UChar32 mirroredChar(UChar32 c)
+{
+ if (c > 0xffff)
+ return c;
+ return QChar(c).mirroredChar().unicode();
+}
+
+inline uint8_t combiningClass(UChar32 c)
+{
+ if (c > 0xffff)
+ return 0;
+ return QChar((unsigned short)c).combiningClass();
+}
+
+inline DecompositionType decompositionType(UChar32 c)
+{
+ if (c > 0xffff)
+ return DecompositionNone;
+ return (DecompositionType)QChar(c).decompositionTag();
+}
+
+inline int umemcasecmp(const UChar* a, const UChar* b, int len)
+{
+ for (int i = 0; i < len; ++i) {
+ QChar c1 = QChar(a[i]).toLower();
+ QChar c2 = QChar(b[i]).toLower();
+ if (c1 != c2)
+ return c1.unicode() - c2.unicode();
+ }
+ return 0;
+}
+
+inline Direction direction(UChar32 c)
+{
+ if (c > 0xffff)
+ return LeftToRight;
+ return (Direction)QChar(c).direction();
+}
+
+inline CharCategory category(UChar32 c)
+{
+ if (c > 0xffff)
+ return NoCategory;
+ return (CharCategory) U_MASK(QChar(c).category());
+}
+
+#endif
+
+} }
+
+#endif // WTF_UNICODE_QT4_H
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/wince/FastMallocWince.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/wince/FastMallocWince.h
new file mode 100644
index 0000000..93d9f75
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/wince/FastMallocWince.h
@@ -0,0 +1,177 @@
+/*
+ * This file is part of the KDE libraries
+ * Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2007-2009 Torch Mobile, Inc. All rights reserved
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef FastMallocWince_h
+#define FastMallocWince_h
+
+#include <new.h>
+
+#ifdef __cplusplus
+#include <new>
+#include "MemoryManager.h"
+extern "C" {
+#endif
+
+void* fastMalloc(size_t n);
+void* fastCalloc(size_t n_elements, size_t element_size);
+void fastFree(void* p);
+void* fastRealloc(void* p, size_t n);
+void* fastZeroedMalloc(size_t n);
+// These functions return 0 if an allocation fails.
+void* tryFastMalloc(size_t n);
+void* tryFastZeroedMalloc(size_t n);
+void* tryFastCalloc(size_t n_elements, size_t element_size);
+void* tryFastRealloc(void* p, size_t n);
+char* fastStrDup(const char*);
+
+#ifndef NDEBUG
+void fastMallocForbid();
+void fastMallocAllow();
+#endif
+
+#if !defined(USE_SYSTEM_MALLOC) || !USE_SYSTEM_MALLOC
+
+#define malloc(n) fastMalloc(n)
+#define calloc(n_elements, element_size) fastCalloc(n_elements, element_size)
+#define realloc(p, n) fastRealloc(p, n)
+#define free(p) fastFree(p)
+#define strdup(p) fastStrDup(p)
+
+#else
+
+#define strdup(p) _strdup(p)
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#ifdef __cplusplus
+#if !defined(USE_SYSTEM_MALLOC) || !USE_SYSTEM_MALLOC
+static inline void* __cdecl operator new(size_t s) { return fastMalloc(s); }
+static inline void __cdecl operator delete(void* p) { fastFree(p); }
+static inline void* __cdecl operator new[](size_t s) { return fastMalloc(s); }
+static inline void __cdecl operator delete[](void* p) { fastFree(p); }
+static inline void* operator new(size_t s, const std::nothrow_t&) throw() { return fastMalloc(s); }
+static inline void operator delete(void* p, const std::nothrow_t&) throw() { fastFree(p); }
+static inline void* operator new[](size_t s, const std::nothrow_t&) throw() { return fastMalloc(s); }
+static inline void operator delete[](void* p, const std::nothrow_t&) throw() { fastFree(p); }
+#endif
+
+namespace WTF {
+ // This defines a type which holds an unsigned integer and is the same
+ // size as the minimally aligned memory allocation.
+ typedef unsigned long long AllocAlignmentInteger;
+
+ namespace Internal {
+ enum AllocType { // Start with an unusual number instead of zero, because zero is common.
+ AllocTypeMalloc = 0x375d6750, // Encompasses fastMalloc, fastZeroedMalloc, fastCalloc, fastRealloc.
+ AllocTypeClassNew, // Encompasses class operator new from FastAllocBase.
+ AllocTypeClassNewArray, // Encompasses class operator new[] from FastAllocBase.
+ AllocTypeFastNew, // Encompasses fastNew.
+ AllocTypeFastNewArray, // Encompasses fastNewArray.
+ AllocTypeNew, // Encompasses global operator new.
+ AllocTypeNewArray // Encompasses global operator new[].
+ };
+ }
+
+
+#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+
+ // Malloc validation is a scheme whereby a tag is attached to an
+ // allocation which identifies how it was originally allocated.
+ // This allows us to verify that the freeing operation matches the
+ // allocation operation. If memory is allocated with operator new[]
+ // but freed with free or delete, this system would detect that.
+ // In the implementation here, the tag is an integer prepended to
+ // the allocation memory which is assigned one of the AllocType
+ // enumeration values. An alternative implementation of this
+ // scheme could store the tag somewhere else or ignore it.
+ // Users of FastMalloc don't need to know or care how this tagging
+ // is implemented.
+
+ namespace Internal {
+
+ // Return the AllocType tag associated with the allocated block p.
+ inline AllocType fastMallocMatchValidationType(const void* p)
+ {
+ const AllocAlignmentInteger* type = static_cast<const AllocAlignmentInteger*>(p) - 1;
+ return static_cast<AllocType>(*type);
+ }
+
+ // Return the address of the AllocType tag associated with the allocated block p.
+ inline AllocAlignmentInteger* fastMallocMatchValidationValue(void* p)
+ {
+ return reinterpret_cast<AllocAlignmentInteger*>(static_cast<char*>(p) - sizeof(AllocAlignmentInteger));
+ }
+
+ // Set the AllocType tag to be associaged with the allocated block p.
+ inline void setFastMallocMatchValidationType(void* p, AllocType allocType)
+ {
+ AllocAlignmentInteger* type = static_cast<AllocAlignmentInteger*>(p) - 1;
+ *type = static_cast<AllocAlignmentInteger>(allocType);
+ }
+
+ // Handle a detected alloc/free mismatch. By default this calls CRASH().
+ void fastMallocMatchFailed(void* p);
+
+ } // namespace Internal
+
+ // This is a higher level function which is used by FastMalloc-using code.
+ inline void fastMallocMatchValidateMalloc(void* p, Internal::AllocType allocType)
+ {
+ if (!p)
+ return;
+
+ Internal::setFastMallocMatchValidationType(p, allocType);
+ }
+
+ // This is a higher level function which is used by FastMalloc-using code.
+ inline void fastMallocMatchValidateFree(void* p, Internal::AllocType allocType)
+ {
+ if (!p)
+ return;
+
+ if (Internal::fastMallocMatchValidationType(p) != allocType)
+ Internal::fastMallocMatchFailed(p);
+ Internal::setFastMallocMatchValidationType(p, Internal::AllocTypeMalloc); // Set it to this so that fastFree thinks it's OK.
+ }
+
+#else
+
+ inline void fastMallocMatchValidateMalloc(void*, Internal::AllocType)
+ {
+ }
+
+ inline void fastMallocMatchValidateFree(void*, Internal::AllocType)
+ {
+ }
+
+#endif
+
+} // namespace WTF
+
+#endif
+
+#endif // FastMallocWince_h
+
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/wince/MemoryManager.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/wince/MemoryManager.cpp
new file mode 100644
index 0000000..b65b368
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/wince/MemoryManager.cpp
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2008-2009 Torch Mobile Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#include "config.h"
+#include "MemoryManager.h"
+
+#undef malloc
+#undef calloc
+#undef realloc
+#undef free
+#undef strdup
+#undef _strdup
+#undef VirtualAlloc
+#undef VirtualFree
+
+#include <malloc.h>
+#include <windows.h>
+
+namespace WTF {
+
+MemoryManager* memoryManager()
+{
+ static MemoryManager mm;
+ return &mm;
+}
+
+MemoryManager::MemoryManager()
+: m_allocationCanFail(false)
+{
+}
+
+MemoryManager::~MemoryManager()
+{
+}
+
+HBITMAP MemoryManager::createCompatibleBitmap(HDC hdc, int width, int height)
+{
+ return ::CreateCompatibleBitmap(hdc, width, height);
+}
+
+HBITMAP MemoryManager::createDIBSection(const BITMAPINFO* pbmi, void** ppvBits)
+{
+ return ::CreateDIBSection(0, pbmi, DIB_RGB_COLORS, ppvBits, 0, 0);
+}
+
+void* MemoryManager::m_malloc(size_t size)
+{
+ return malloc(size);
+}
+
+void* MemoryManager::m_calloc(size_t num, size_t size)
+{
+ return calloc(num, size);
+}
+
+void* MemoryManager::m_realloc(void* p, size_t size)
+{
+ return realloc(p, size);
+}
+
+void MemoryManager::m_free(void* p)
+{
+ return free(p);
+}
+
+bool MemoryManager::resizeMemory(void*, size_t)
+{
+ return false;
+}
+
+void* MemoryManager::allocate64kBlock()
+{
+ return VirtualAlloc(0, 65536, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
+}
+
+void MemoryManager::free64kBlock(void* p)
+{
+ VirtualFree(p, 65536, MEM_RELEASE);
+}
+
+bool MemoryManager::onIdle(DWORD& timeLimitMs)
+{
+ return false;
+}
+
+LPVOID MemoryManager::virtualAlloc(LPVOID lpAddress, DWORD dwSize, DWORD flAllocationType, DWORD flProtect)
+{
+ return ::VirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect);
+}
+
+BOOL MemoryManager::virtualFree(LPVOID lpAddress, DWORD dwSize, DWORD dwFreeType)
+{
+ return ::VirtualFree(lpAddress, dwSize, dwFreeType);
+}
+
+
+#if defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC
+
+void *fastMalloc(size_t n) { return malloc(n); }
+void *fastCalloc(size_t n_elements, size_t element_size) { return calloc(n_elements, element_size); }
+void fastFree(void* p) { return free(p); }
+void *fastRealloc(void* p, size_t n) { return realloc(p, n); }
+
+#else
+
+void *fastMalloc(size_t n) { return MemoryManager::m_malloc(n); }
+void *fastCalloc(size_t n_elements, size_t element_size) { return MemoryManager::m_calloc(n_elements, element_size); }
+void fastFree(void* p) { return MemoryManager::m_free(p); }
+void *fastRealloc(void* p, size_t n) { return MemoryManager::m_realloc(p, n); }
+
+#endif
+
+#ifndef NDEBUG
+void fastMallocForbid() {}
+void fastMallocAllow() {}
+#endif
+
+void* fastZeroedMalloc(size_t n)
+{
+ void* p = fastMalloc(n);
+ if (p)
+ memset(p, 0, n);
+ return p;
+}
+
+void* tryFastMalloc(size_t n)
+{
+ MemoryAllocationCanFail canFail;
+ return fastMalloc(n);
+}
+
+void* tryFastZeroedMalloc(size_t n)
+{
+ MemoryAllocationCanFail canFail;
+ return fastZeroedMalloc(n);
+}
+
+void* tryFastCalloc(size_t n_elements, size_t element_size)
+{
+ MemoryAllocationCanFail canFail;
+ return fastCalloc(n_elements, element_size);
+}
+
+void* tryFastRealloc(void* p, size_t n)
+{
+ MemoryAllocationCanFail canFail;
+ return fastRealloc(p, n);
+}
+
+char* fastStrDup(const char* str)
+{
+ return _strdup(str);
+}
+
+} \ No newline at end of file
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/wince/MemoryManager.h b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/wince/MemoryManager.h
new file mode 100644
index 0000000..f405612
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/wince/MemoryManager.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2008-2009 Torch Mobile Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#pragma once
+
+#include <winbase.h>
+
+typedef struct HBITMAP__* HBITMAP;
+typedef struct HDC__* HDC;
+typedef void *HANDLE;
+typedef struct tagBITMAPINFO BITMAPINFO;
+
+namespace WTF {
+
+ class MemoryManager {
+ public:
+ MemoryManager();
+ ~MemoryManager();
+
+ bool allocationCanFail() const { return m_allocationCanFail; }
+ void setAllocationCanFail(bool c) { m_allocationCanFail = c; }
+
+ static HBITMAP createCompatibleBitmap(HDC hdc, int width, int height);
+ static HBITMAP createDIBSection(const BITMAPINFO* pbmi, void** ppvBits);
+ static void* m_malloc(size_t size);
+ static void* m_calloc(size_t num, size_t size);
+ static void* m_realloc(void* p, size_t size);
+ static void m_free(void*);
+ static bool resizeMemory(void* p, size_t newSize);
+ static void* allocate64kBlock();
+ static void free64kBlock(void*);
+ static bool onIdle(DWORD& timeLimitMs);
+ static LPVOID virtualAlloc(LPVOID lpAddress, DWORD dwSize, DWORD flAllocationType, DWORD flProtect);
+ static BOOL virtualFree(LPVOID lpAddress, DWORD dwSize, DWORD dwFreeType);
+
+ private:
+ friend MemoryManager* memoryManager();
+
+ bool m_allocationCanFail;
+ };
+
+ MemoryManager* memoryManager();
+
+ class MemoryAllocationCanFail {
+ public:
+ MemoryAllocationCanFail() : m_old(memoryManager()->allocationCanFail()) { memoryManager()->setAllocationCanFail(true); }
+ ~MemoryAllocationCanFail() { memoryManager()->setAllocationCanFail(m_old); }
+ private:
+ bool m_old;
+ };
+
+ class MemoryAllocationCannotFail {
+ public:
+ MemoryAllocationCannotFail() : m_old(memoryManager()->allocationCanFail()) { memoryManager()->setAllocationCanFail(false); }
+ ~MemoryAllocationCannotFail() { memoryManager()->setAllocationCanFail(m_old); }
+ private:
+ bool m_old;
+ };
+}
+
+using WTF::MemoryManager;
+using WTF::memoryManager;
+using WTF::MemoryAllocationCanFail;
+using WTF::MemoryAllocationCannotFail;
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/wtf/wince/mt19937ar.c b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/wince/mt19937ar.c
new file mode 100644
index 0000000..4715958
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/wtf/wince/mt19937ar.c
@@ -0,0 +1,170 @@
+/*
+ A C-program for MT19937, with initialization improved 2002/1/26.
+ Coded by Takuji Nishimura and Makoto Matsumoto.
+
+ Before using, initialize the state by using init_genrand(seed)
+ or init_by_array(init_key, key_length).
+
+ Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. The names of its contributors may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+ Any feedback is very welcome.
+ http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html
+ email: m-mat @ math.sci.hiroshima-u.ac.jp (remove space)
+*/
+
+#include <stdio.h>
+
+/* Period parameters */
+#define N 624
+#define M 397
+#define MATRIX_A 0x9908b0dfUL /* constant vector a */
+#define UPPER_MASK 0x80000000UL /* most significant w-r bits */
+#define LOWER_MASK 0x7fffffffUL /* least significant r bits */
+
+static unsigned long mt[N]; /* the array for the state vector */
+static int mti=N+1; /* mti==N+1 means mt[N] is not initialized */
+
+/* initializes mt[N] with a seed */
+void init_genrand(unsigned long s)
+{
+ mt[0]= s & 0xffffffffUL;
+ for (mti=1; mti<N; mti++) {
+ mt[mti] = (1812433253UL * (mt[mti-1] ^ (mt[mti-1] >> 30)) + mti);
+ /* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */
+ /* In the previous versions, MSBs of the seed affect */
+ /* only MSBs of the array mt[]. */
+ /* 2002/01/09 modified by Makoto Matsumoto */
+ mt[mti] &= 0xffffffffUL;
+ /* for >32 bit machines */
+ }
+}
+
+/* initialize by an array with array-length */
+/* init_key is the array for initializing keys */
+/* key_length is its length */
+/* slight change for C++, 2004/2/26 */
+void init_by_array(unsigned long init_key[],int key_length)
+{
+ int i, j, k;
+ init_genrand(19650218UL);
+ i=1; j=0;
+ k = (N>key_length ? N : key_length);
+ for (; k; k--) {
+ mt[i] = (mt[i] ^ ((mt[i-1] ^ (mt[i-1] >> 30)) * 1664525UL))
+ + init_key[j] + j; /* non linear */
+ mt[i] &= 0xffffffffUL; /* for WORDSIZE > 32 machines */
+ i++; j++;
+ if (i>=N) { mt[0] = mt[N-1]; i=1; }
+ if (j>=key_length) j=0;
+ }
+ for (k=N-1; k; k--) {
+ mt[i] = (mt[i] ^ ((mt[i-1] ^ (mt[i-1] >> 30)) * 1566083941UL))
+ - i; /* non linear */
+ mt[i] &= 0xffffffffUL; /* for WORDSIZE > 32 machines */
+ i++;
+ if (i>=N) { mt[0] = mt[N-1]; i=1; }
+ }
+
+ mt[0] = 0x80000000UL; /* MSB is 1; assuring non-zero initial array */
+}
+
+/* generates a random number on [0,0xffffffff]-interval */
+unsigned long genrand_int32(void)
+{
+ unsigned long y;
+ static unsigned long mag01[2]={0x0UL, MATRIX_A};
+ /* mag01[x] = x * MATRIX_A for x=0,1 */
+
+ if (mti >= N) { /* generate N words at one time */
+ int kk;
+
+ if (mti == N+1) /* if init_genrand() has not been called, */
+ init_genrand(5489UL); /* a default initial seed is used */
+
+ for (kk=0;kk<N-M;kk++) {
+ y = (mt[kk]&UPPER_MASK)|(mt[kk+1]&LOWER_MASK);
+ mt[kk] = mt[kk+M] ^ (y >> 1) ^ mag01[y & 0x1UL];
+ }
+ for (;kk<N-1;kk++) {
+ y = (mt[kk]&UPPER_MASK)|(mt[kk+1]&LOWER_MASK);
+ mt[kk] = mt[kk+(M-N)] ^ (y >> 1) ^ mag01[y & 0x1UL];
+ }
+ y = (mt[N-1]&UPPER_MASK)|(mt[0]&LOWER_MASK);
+ mt[N-1] = mt[M-1] ^ (y >> 1) ^ mag01[y & 0x1UL];
+
+ mti = 0;
+ }
+
+ y = mt[mti++];
+
+ /* Tempering */
+ y ^= (y >> 11);
+ y ^= (y << 7) & 0x9d2c5680UL;
+ y ^= (y << 15) & 0xefc60000UL;
+ y ^= (y >> 18);
+
+ return y;
+}
+
+/* generates a random number on [0,0x7fffffff]-interval */
+long genrand_int31(void)
+{
+ return (long)(genrand_int32()>>1);
+}
+
+/* generates a random number on [0,1]-real-interval */
+double genrand_real1(void)
+{
+ return genrand_int32()*(1.0/4294967295.0);
+ /* divided by 2^32-1 */
+}
+
+/* generates a random number on [0,1)-real-interval */
+double genrand_real2(void)
+{
+ return genrand_int32()*(1.0/4294967296.0);
+ /* divided by 2^32 */
+}
+
+/* generates a random number on (0,1)-real-interval */
+double genrand_real3(void)
+{
+ return (((double)genrand_int32()) + 0.5)*(1.0/4294967296.0);
+ /* divided by 2^32 */
+}
+
+/* generates a random number on [0,1) with 53-bit resolution*/
+double genrand_res53(void)
+{
+ unsigned long a=genrand_int32()>>5, b=genrand_int32()>>6;
+ return(a*67108864.0+b)*(1.0/9007199254740992.0);
+}