diff options
Diffstat (limited to 'src/3rdparty/webkit/JavaScriptCore/wtf')
75 files changed, 21000 insertions, 0 deletions
diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/ASCIICType.h b/src/3rdparty/webkit/JavaScriptCore/wtf/ASCIICType.h new file mode 100644 index 0000000..0c2ca70 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/ASCIICType.h @@ -0,0 +1,152 @@ +/* + * Copyright (C) 2007, 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef WTF_ASCIICType_h +#define WTF_ASCIICType_h + +#include <wtf/Assertions.h> +#include <wtf/Platform.h> + +// The behavior of many of the functions in the <ctype.h> header is dependent +// on the current locale. But in the WebKit project, all uses of those functions +// are in code processing something that's not locale-specific. These equivalents +// for some of the <ctype.h> functions are named more explicitly, not dependent +// on the C library locale, and we should also optimize them as needed. + +// All functions return false or leave the character unchanged if passed a character +// that is outside the range 0-7F. So they can be used on Unicode strings or +// characters if the intent is to do processing only if the character is ASCII. + +namespace WTF { + + inline bool isASCII(char c) { return !(c & ~0x7F); } + inline bool isASCII(unsigned short c) { return !(c & ~0x7F); } +#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED) + inline bool isASCII(wchar_t c) { return !(c & ~0x7F); } +#endif + inline bool isASCII(int c) { return !(c & ~0x7F); } + + inline bool isASCIIAlpha(char c) { return (c | 0x20) >= 'a' && (c | 0x20) <= 'z'; } + inline bool isASCIIAlpha(unsigned short c) { return (c | 0x20) >= 'a' && (c | 0x20) <= 'z'; } +#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED) + inline bool isASCIIAlpha(wchar_t c) { return (c | 0x20) >= 'a' && (c | 0x20) <= 'z'; } +#endif + inline bool isASCIIAlpha(int c) { return (c | 0x20) >= 'a' && (c | 0x20) <= 'z'; } + + inline bool isASCIIAlphanumeric(char c) { return (c >= '0' && c <= '9') || ((c | 0x20) >= 'a' && (c | 0x20) <= 'z'); } + inline bool isASCIIAlphanumeric(unsigned short c) { return (c >= '0' && c <= '9') || ((c | 0x20) >= 'a' && (c | 0x20) <= 'z'); } +#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED) + inline bool isASCIIAlphanumeric(wchar_t c) { return (c >= '0' && c <= '9') || ((c | 0x20) >= 'a' && (c | 0x20) <= 'z'); } +#endif + inline bool isASCIIAlphanumeric(int c) { return (c >= '0' && c <= '9') || ((c | 0x20) >= 'a' && (c | 0x20) <= 'z'); } + + inline bool isASCIIDigit(char c) { return (c >= '0') & (c <= '9'); } + inline bool isASCIIDigit(unsigned short c) { return (c >= '0') & (c <= '9'); } +#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED) + inline bool isASCIIDigit(wchar_t c) { return (c >= '0') & (c <= '9'); } +#endif + inline bool isASCIIDigit(int c) { return (c >= '0') & (c <= '9'); } + + inline bool isASCIIHexDigit(char c) { return (c >= '0' && c <= '9') || ((c | 0x20) >= 'a' && (c | 0x20) <= 'f'); } + inline bool isASCIIHexDigit(unsigned short c) { return (c >= '0' && c <= '9') || ((c | 0x20) >= 'a' && (c | 0x20) <= 'f'); } +#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED) + inline bool isASCIIHexDigit(wchar_t c) { return (c >= '0' && c <= '9') || ((c | 0x20) >= 'a' && (c | 0x20) <= 'f'); } +#endif + inline bool isASCIIHexDigit(int c) { return (c >= '0' && c <= '9') || ((c | 0x20) >= 'a' && (c | 0x20) <= 'f'); } + + inline bool isASCIIOctalDigit(char c) { return (c >= '0') & (c <= '7'); } + inline bool isASCIIOctalDigit(unsigned short c) { return (c >= '0') & (c <= '7'); } +#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED) + inline bool isASCIIOctalDigit(wchar_t c) { return (c >= '0') & (c <= '7'); } +#endif + inline bool isASCIIOctalDigit(int c) { return (c >= '0') & (c <= '7'); } + + inline bool isASCIILower(char c) { return c >= 'a' && c <= 'z'; } + inline bool isASCIILower(unsigned short c) { return c >= 'a' && c <= 'z'; } +#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED) + inline bool isASCIILower(wchar_t c) { return c >= 'a' && c <= 'z'; } +#endif + inline bool isASCIILower(int c) { return c >= 'a' && c <= 'z'; } + + inline bool isASCIIUpper(char c) { return c >= 'A' && c <= 'Z'; } + inline bool isASCIIUpper(unsigned short c) { return c >= 'A' && c <= 'Z'; } +#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED) + inline bool isASCIIUpper(wchar_t c) { return c >= 'A' && c <= 'Z'; } +#endif + inline bool isASCIIUpper(int c) { return c >= 'A' && c <= 'Z'; } + + /* + Statistics from a run of Apple's page load test for callers of isASCIISpace: + + character count + --------- ----- + non-spaces 689383 + 20 space 294720 + 0A \n 89059 + 09 \t 28320 + 0D \r 0 + 0C \f 0 + 0B \v 0 + */ + inline bool isASCIISpace(char c) { return c <= ' ' && (c == ' ' || (c <= 0xD && c >= 0x9)); } + inline bool isASCIISpace(unsigned short c) { return c <= ' ' && (c == ' ' || (c <= 0xD && c >= 0x9)); } +#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED) + inline bool isASCIISpace(wchar_t c) { return c <= ' ' && (c == ' ' || (c <= 0xD && c >= 0x9)); } +#endif + inline bool isASCIISpace(int c) { return c <= ' ' && (c == ' ' || (c <= 0xD && c >= 0x9)); } + + inline char toASCIILower(char c) { return c | ((c >= 'A' && c <= 'Z') << 5); } + inline unsigned short toASCIILower(unsigned short c) { return c | ((c >= 'A' && c <= 'Z') << 5); } +#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED) + inline wchar_t toASCIILower(wchar_t c) { return c | ((c >= 'A' && c <= 'Z') << 5); } +#endif + inline int toASCIILower(int c) { return c | ((c >= 'A' && c <= 'Z') << 5); } + + inline char toASCIIUpper(char c) { return static_cast<char>(c & ~((c >= 'a' && c <= 'z') << 5)); } + inline unsigned short toASCIIUpper(unsigned short c) { return static_cast<unsigned short>(c & ~((c >= 'a' && c <= 'z') << 5)); } +#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED) + inline wchar_t toASCIIUpper(wchar_t c) { return static_cast<wchar_t>(c & ~((c >= 'a' && c <= 'z') << 5)); } +#endif + inline int toASCIIUpper(int c) { return static_cast<int>(c & ~((c >= 'a' && c <= 'z') << 5)); } + + inline int toASCIIHexValue(char c) { ASSERT(isASCIIHexDigit(c)); return c < 'A' ? c - '0' : (c - 'A' + 10) & 0xF; } + inline int toASCIIHexValue(unsigned short c) { ASSERT(isASCIIHexDigit(c)); return c < 'A' ? c - '0' : (c - 'A' + 10) & 0xF; } +#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED) + inline int toASCIIHexValue(wchar_t c) { ASSERT(isASCIIHexDigit(c)); return c < 'A' ? c - '0' : (c - 'A' + 10) & 0xF; } +#endif + inline int toASCIIHexValue(int c) { ASSERT(isASCIIHexDigit(c)); return c < 'A' ? c - '0' : (c - 'A' + 10) & 0xF; } + + inline bool isASCIIPrintable(char c) { return c >= ' ' && c <= '~'; } + inline bool isASCIIPrintable(unsigned short c) { return c >= ' ' && c <= '~'; } +#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED) + inline bool isASCIIPrintable(wchar_t c) { return c >= ' ' && c <= '~'; } +#endif + inline bool isASCIIPrintable(int c) { return c >= ' ' && c <= '~'; } +} + +#endif diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/AVLTree.h b/src/3rdparty/webkit/JavaScriptCore/wtf/AVLTree.h new file mode 100644 index 0000000..18db8ff --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/AVLTree.h @@ -0,0 +1,959 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * Based on Abstract AVL Tree Template v1.5 by Walt Karas + * <http://geocities.com/wkaras/gen_cpp/avl_tree.html>. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef AVL_TREE_H_ +#define AVL_TREE_H_ + +#include "Assertions.h" + +namespace WTF { + +// Here is the reference class for BSet. +// +// class BSet +// { +// public: +// +// class ANY_bitref +// { +// public: +// operator bool (); +// void operator = (bool b); +// }; +// +// // Does not have to initialize bits. +// BSet(); +// +// // Must return a valid value for index when 0 <= index < maxDepth +// ANY_bitref operator [] (unsigned index); +// +// // Set all bits to 1. +// void set(); +// +// // Set all bits to 0. +// void reset(); +// }; + +template<unsigned maxDepth> +class AVLTreeDefaultBSet { +public: + bool& operator[](unsigned i) { ASSERT(i < maxDepth); return m_data[i]; } + void set() { for (unsigned i = 0; i < maxDepth; ++i) m_data[i] = true; } + void reset() { for (unsigned i = 0; i < maxDepth; ++i) m_data[i] = false; } + +private: + bool m_data[maxDepth]; +}; + +// How to determine maxDepth: +// d Minimum number of nodes +// 2 2 +// 3 4 +// 4 7 +// 5 12 +// 6 20 +// 7 33 +// 8 54 +// 9 88 +// 10 143 +// 11 232 +// 12 376 +// 13 609 +// 14 986 +// 15 1,596 +// 16 2,583 +// 17 4,180 +// 18 6,764 +// 19 10,945 +// 20 17,710 +// 21 28,656 +// 22 46,367 +// 23 75,024 +// 24 121,392 +// 25 196,417 +// 26 317,810 +// 27 514,228 +// 28 832,039 +// 29 1,346,268 +// 30 2,178,308 +// 31 3,524,577 +// 32 5,702,886 +// 33 9,227,464 +// 34 14,930,351 +// 35 24,157,816 +// 36 39,088,168 +// 37 63,245,985 +// 38 102,334,154 +// 39 165,580,140 +// 40 267,914,295 +// 41 433,494,436 +// 42 701,408,732 +// 43 1,134,903,169 +// 44 1,836,311,902 +// 45 2,971,215,072 +// +// E.g., if, in a particular instantiation, the maximum number of nodes in a tree instance is 1,000,000, the maximum depth should be 28. +// You pick 28 because MN(28) is 832,039, which is less than or equal to 1,000,000, and MN(29) is 1,346,268, which is strictly greater than 1,000,000. + +template <class Abstractor, unsigned maxDepth = 32, class BSet = AVLTreeDefaultBSet<maxDepth> > +class AVLTree { +public: + + typedef typename Abstractor::key key; + typedef typename Abstractor::handle handle; + typedef typename Abstractor::size size; + + enum SearchType { + EQUAL = 1, + LESS = 2, + GREATER = 4, + LESS_EQUAL = EQUAL | LESS, + GREATER_EQUAL = EQUAL | GREATER + }; + + + Abstractor& abstractor() { return abs; } + + inline handle insert(handle h); + + inline handle search(key k, SearchType st = EQUAL); + inline handle search_least(); + inline handle search_greatest(); + + inline handle remove(key k); + + inline handle subst(handle new_node); + + void purge() { abs.root = null(); } + + bool is_empty() { return abs.root == null(); } + + AVLTree() { abs.root = null(); } + + class Iterator { + public: + + // Initialize depth to invalid value, to indicate iterator is + // invalid. (Depth is zero-base.) + Iterator() { depth = ~0U; } + + void start_iter(AVLTree &tree, key k, SearchType st = EQUAL) + { + // Mask of high bit in an int. + const int MASK_HIGH_BIT = (int) ~ ((~ (unsigned) 0) >> 1); + + // Save the tree that we're going to iterate through in a + // member variable. + tree_ = &tree; + + int cmp, target_cmp; + handle h = tree_->abs.root; + unsigned d = 0; + + depth = ~0U; + + if (h == null()) + // Tree is empty. + return; + + if (st & LESS) + // Key can be greater than key of starting node. + target_cmp = 1; + else if (st & GREATER) + // Key can be less than key of starting node. + target_cmp = -1; + else + // Key must be same as key of starting node. + target_cmp = 0; + + for (;;) { + cmp = cmp_k_n(k, h); + if (cmp == 0) { + if (st & EQUAL) { + // Equal node was sought and found as starting node. + depth = d; + break; + } + cmp = -target_cmp; + } else if (target_cmp != 0) { + if (!((cmp ^ target_cmp) & MASK_HIGH_BIT)) { + // cmp and target_cmp are both negative or both positive. + depth = d; + } + } + h = cmp < 0 ? get_lt(h) : get_gt(h); + if (h == null()) + break; + branch[d] = cmp > 0; + path_h[d++] = h; + } + } + + void start_iter_least(AVLTree &tree) + { + tree_ = &tree; + + handle h = tree_->abs.root; + + depth = ~0U; + + branch.reset(); + + while (h != null()) { + if (depth != ~0U) + path_h[depth] = h; + depth++; + h = get_lt(h); + } + } + + void start_iter_greatest(AVLTree &tree) + { + tree_ = &tree; + + handle h = tree_->abs.root; + + depth = ~0U; + + branch.set(); + + while (h != null()) { + if (depth != ~0U) + path_h[depth] = h; + depth++; + h = get_gt(h); + } + } + + handle operator*() + { + if (depth == ~0U) + return null(); + + return depth == 0 ? tree_->abs.root : path_h[depth - 1]; + } + + void operator++() + { + if (depth != ~0U) { + handle h = get_gt(**this); + if (h == null()) { + do { + if (depth == 0) { + depth = ~0U; + break; + } + depth--; + } while (branch[depth]); + } else { + branch[depth] = true; + path_h[depth++] = h; + for (;;) { + h = get_lt(h); + if (h == null()) + break; + branch[depth] = false; + path_h[depth++] = h; + } + } + } + } + + void operator--() + { + if (depth != ~0U) { + handle h = get_lt(**this); + if (h == null()) + do { + if (depth == 0) { + depth = ~0U; + break; + } + depth--; + } while (!branch[depth]); + else { + branch[depth] = false; + path_h[depth++] = h; + for (;;) { + h = get_gt(h); + if (h == null()) + break; + branch[depth] = true; + path_h[depth++] = h; + } + } + } + } + + void operator++(int) { ++(*this); } + void operator--(int) { --(*this); } + + protected: + + // Tree being iterated over. + AVLTree *tree_; + + // Records a path into the tree. If branch[n] is true, indicates + // take greater branch from the nth node in the path, otherwise + // take the less branch. branch[0] gives branch from root, and + // so on. + BSet branch; + + // Zero-based depth of path into tree. + unsigned depth; + + // Handles of nodes in path from root to current node (returned by *). + handle path_h[maxDepth - 1]; + + int cmp_k_n(key k, handle h) { return tree_->abs.compare_key_node(k, h); } + int cmp_n_n(handle h1, handle h2) { return tree_->abs.compare_node_node(h1, h2); } + handle get_lt(handle h) { return tree_->abs.get_less(h); } + handle get_gt(handle h) { return tree_->abs.get_greater(h); } + handle null() { return tree_->abs.null(); } + }; + + template<typename fwd_iter> + bool build(fwd_iter p, size num_nodes) + { + if (num_nodes == 0) { + abs.root = null(); + return true; + } + + // Gives path to subtree being built. If branch[N] is false, branch + // less from the node at depth N, if true branch greater. + BSet branch; + + // If rem[N] is true, then for the current subtree at depth N, it's + // greater subtree has one more node than it's less subtree. + BSet rem; + + // Depth of root node of current subtree. + unsigned depth = 0; + + // Number of nodes in current subtree. + size num_sub = num_nodes; + + // The algorithm relies on a stack of nodes whose less subtree has + // been built, but whose right subtree has not yet been built. The + // stack is implemented as linked list. The nodes are linked + // together by having the "greater" handle of a node set to the + // next node in the list. "less_parent" is the handle of the first + // node in the list. + handle less_parent = null(); + + // h is root of current subtree, child is one of its children. + handle h, child; + + for (;;) { + while (num_sub > 2) { + // Subtract one for root of subtree. + num_sub--; + rem[depth] = !!(num_sub & 1); + branch[depth++] = false; + num_sub >>= 1; + } + + if (num_sub == 2) { + // Build a subtree with two nodes, slanting to greater. + // I arbitrarily chose to always have the extra node in the + // greater subtree when there is an odd number of nodes to + // split between the two subtrees. + + h = *p; + p++; + child = *p; + p++; + set_lt(child, null()); + set_gt(child, null()); + set_bf(child, 0); + set_gt(h, child); + set_lt(h, null()); + set_bf(h, 1); + } else { // num_sub == 1 + // Build a subtree with one node. + + h = *p; + p++; + set_lt(h, null()); + set_gt(h, null()); + set_bf(h, 0); + } + + while (depth) { + depth--; + if (!branch[depth]) + // We've completed a less subtree. + break; + + // We've completed a greater subtree, so attach it to + // its parent (that is less than it). We pop the parent + // off the stack of less parents. + child = h; + h = less_parent; + less_parent = get_gt(h); + set_gt(h, child); + // num_sub = 2 * (num_sub - rem[depth]) + rem[depth] + 1 + num_sub <<= 1; + num_sub += 1 - rem[depth]; + if (num_sub & (num_sub - 1)) + // num_sub is not a power of 2 + set_bf(h, 0); + else + // num_sub is a power of 2 + set_bf(h, 1); + } + + if (num_sub == num_nodes) + // We've completed the full tree. + break; + + // The subtree we've completed is the less subtree of the + // next node in the sequence. + + child = h; + h = *p; + p++; + set_lt(h, child); + + // Put h into stack of less parents. + set_gt(h, less_parent); + less_parent = h; + + // Proceed to creating greater than subtree of h. + branch[depth] = true; + num_sub += rem[depth++]; + + } // end for (;;) + + abs.root = h; + + return true; + } + +protected: + + friend class Iterator; + + // Create a class whose sole purpose is to take advantage of + // the "empty member" optimization. + struct abs_plus_root : public Abstractor { + // The handle of the root element in the AVL tree. + handle root; + }; + + abs_plus_root abs; + + + handle get_lt(handle h) { return abs.get_less(h); } + void set_lt(handle h, handle lh) { abs.set_less(h, lh); } + + handle get_gt(handle h) { return abs.get_greater(h); } + void set_gt(handle h, handle gh) { abs.set_greater(h, gh); } + + int get_bf(handle h) { return abs.get_balance_factor(h); } + void set_bf(handle h, int bf) { abs.set_balance_factor(h, bf); } + + int cmp_k_n(key k, handle h) { return abs.compare_key_node(k, h); } + int cmp_n_n(handle h1, handle h2) { return abs.compare_node_node(h1, h2); } + + handle null() { return abs.null(); } + +private: + + // Balances subtree, returns handle of root node of subtree + // after balancing. + handle balance(handle bal_h) + { + handle deep_h; + + // Either the "greater than" or the "less than" subtree of + // this node has to be 2 levels deeper (or else it wouldn't + // need balancing). + + if (get_bf(bal_h) > 0) { + // "Greater than" subtree is deeper. + + deep_h = get_gt(bal_h); + + if (get_bf(deep_h) < 0) { + handle old_h = bal_h; + bal_h = get_lt(deep_h); + + set_gt(old_h, get_lt(bal_h)); + set_lt(deep_h, get_gt(bal_h)); + set_lt(bal_h, old_h); + set_gt(bal_h, deep_h); + + int bf = get_bf(bal_h); + if (bf != 0) { + if (bf > 0) { + set_bf(old_h, -1); + set_bf(deep_h, 0); + } else { + set_bf(deep_h, 1); + set_bf(old_h, 0); + } + set_bf(bal_h, 0); + } else { + set_bf(old_h, 0); + set_bf(deep_h, 0); + } + } else { + set_gt(bal_h, get_lt(deep_h)); + set_lt(deep_h, bal_h); + if (get_bf(deep_h) == 0) { + set_bf(deep_h, -1); + set_bf(bal_h, 1); + } else { + set_bf(deep_h, 0); + set_bf(bal_h, 0); + } + bal_h = deep_h; + } + } else { + // "Less than" subtree is deeper. + + deep_h = get_lt(bal_h); + + if (get_bf(deep_h) > 0) { + handle old_h = bal_h; + bal_h = get_gt(deep_h); + set_lt(old_h, get_gt(bal_h)); + set_gt(deep_h, get_lt(bal_h)); + set_gt(bal_h, old_h); + set_lt(bal_h, deep_h); + + int bf = get_bf(bal_h); + if (bf != 0) { + if (bf < 0) { + set_bf(old_h, 1); + set_bf(deep_h, 0); + } else { + set_bf(deep_h, -1); + set_bf(old_h, 0); + } + set_bf(bal_h, 0); + } else { + set_bf(old_h, 0); + set_bf(deep_h, 0); + } + } else { + set_lt(bal_h, get_gt(deep_h)); + set_gt(deep_h, bal_h); + if (get_bf(deep_h) == 0) { + set_bf(deep_h, 1); + set_bf(bal_h, -1); + } else { + set_bf(deep_h, 0); + set_bf(bal_h, 0); + } + bal_h = deep_h; + } + } + + return bal_h; + } + +}; + +template <class Abstractor, unsigned maxDepth, class BSet> +inline typename AVLTree<Abstractor, maxDepth, BSet>::handle +AVLTree<Abstractor, maxDepth, BSet>::insert(handle h) +{ + set_lt(h, null()); + set_gt(h, null()); + set_bf(h, 0); + + if (abs.root == null()) + abs.root = h; + else { + // Last unbalanced node encountered in search for insertion point. + handle unbal = null(); + // Parent of last unbalanced node. + handle parent_unbal = null(); + // Balance factor of last unbalanced node. + int unbal_bf; + + // Zero-based depth in tree. + unsigned depth = 0, unbal_depth = 0; + + // Records a path into the tree. If branch[n] is true, indicates + // take greater branch from the nth node in the path, otherwise + // take the less branch. branch[0] gives branch from root, and + // so on. + BSet branch; + + handle hh = abs.root; + handle parent = null(); + int cmp; + + do { + if (get_bf(hh) != 0) { + unbal = hh; + parent_unbal = parent; + unbal_depth = depth; + } + cmp = cmp_n_n(h, hh); + if (cmp == 0) + // Duplicate key. + return hh; + parent = hh; + hh = cmp < 0 ? get_lt(hh) : get_gt(hh); + branch[depth++] = cmp > 0; + } while (hh != null()); + + // Add node to insert as leaf of tree. + if (cmp < 0) + set_lt(parent, h); + else + set_gt(parent, h); + + depth = unbal_depth; + + if (unbal == null()) + hh = abs.root; + else { + cmp = branch[depth++] ? 1 : -1; + unbal_bf = get_bf(unbal); + if (cmp < 0) + unbal_bf--; + else // cmp > 0 + unbal_bf++; + hh = cmp < 0 ? get_lt(unbal) : get_gt(unbal); + if ((unbal_bf != -2) && (unbal_bf != 2)) { + // No rebalancing of tree is necessary. + set_bf(unbal, unbal_bf); + unbal = null(); + } + } + + if (hh != null()) + while (h != hh) { + cmp = branch[depth++] ? 1 : -1; + if (cmp < 0) { + set_bf(hh, -1); + hh = get_lt(hh); + } else { // cmp > 0 + set_bf(hh, 1); + hh = get_gt(hh); + } + } + + if (unbal != null()) { + unbal = balance(unbal); + if (parent_unbal == null()) + abs.root = unbal; + else { + depth = unbal_depth - 1; + cmp = branch[depth] ? 1 : -1; + if (cmp < 0) + set_lt(parent_unbal, unbal); + else // cmp > 0 + set_gt(parent_unbal, unbal); + } + } + } + + return h; +} + +template <class Abstractor, unsigned maxDepth, class BSet> +inline typename AVLTree<Abstractor, maxDepth, BSet>::handle +AVLTree<Abstractor, maxDepth, BSet>::search(key k, typename AVLTree<Abstractor, maxDepth, BSet>::SearchType st) +{ + const int MASK_HIGH_BIT = (int) ~ ((~ (unsigned) 0) >> 1); + + int cmp, target_cmp; + handle match_h = null(); + handle h = abs.root; + + if (st & LESS) + target_cmp = 1; + else if (st & GREATER) + target_cmp = -1; + else + target_cmp = 0; + + while (h != null()) { + cmp = cmp_k_n(k, h); + if (cmp == 0) { + if (st & EQUAL) { + match_h = h; + break; + } + cmp = -target_cmp; + } else if (target_cmp != 0) + if (!((cmp ^ target_cmp) & MASK_HIGH_BIT)) + // cmp and target_cmp are both positive or both negative. + match_h = h; + h = cmp < 0 ? get_lt(h) : get_gt(h); + } + + return match_h; +} + +template <class Abstractor, unsigned maxDepth, class BSet> +inline typename AVLTree<Abstractor, maxDepth, BSet>::handle +AVLTree<Abstractor, maxDepth, BSet>::search_least() +{ + handle h = abs.root, parent = null(); + + while (h != null()) { + parent = h; + h = get_lt(h); + } + + return parent; +} + +template <class Abstractor, unsigned maxDepth, class BSet> +inline typename AVLTree<Abstractor, maxDepth, BSet>::handle +AVLTree<Abstractor, maxDepth, BSet>::search_greatest() +{ + handle h = abs.root, parent = null(); + + while (h != null()) { + parent = h; + h = get_gt(h); + } + + return parent; +} + +template <class Abstractor, unsigned maxDepth, class BSet> +inline typename AVLTree<Abstractor, maxDepth, BSet>::handle +AVLTree<Abstractor, maxDepth, BSet>::remove(key k) +{ + // Zero-based depth in tree. + unsigned depth = 0, rm_depth; + + // Records a path into the tree. If branch[n] is true, indicates + // take greater branch from the nth node in the path, otherwise + // take the less branch. branch[0] gives branch from root, and + // so on. + BSet branch; + + handle h = abs.root; + handle parent = null(), child; + int cmp, cmp_shortened_sub_with_path; + + for (;;) { + if (h == null()) + // No node in tree with given key. + return null(); + cmp = cmp_k_n(k, h); + if (cmp == 0) + // Found node to remove. + break; + parent = h; + h = cmp < 0 ? get_lt(h) : get_gt(h); + branch[depth++] = cmp > 0; + cmp_shortened_sub_with_path = cmp; + } + handle rm = h; + handle parent_rm = parent; + rm_depth = depth; + + // If the node to remove is not a leaf node, we need to get a + // leaf node, or a node with a single leaf as its child, to put + // in the place of the node to remove. We will get the greatest + // node in the less subtree (of the node to remove), or the least + // node in the greater subtree. We take the leaf node from the + // deeper subtree, if there is one. + + if (get_bf(h) < 0) { + child = get_lt(h); + branch[depth] = false; + cmp = -1; + } else { + child = get_gt(h); + branch[depth] = true; + cmp = 1; + } + depth++; + + if (child != null()) { + cmp = -cmp; + do { + parent = h; + h = child; + if (cmp < 0) { + child = get_lt(h); + branch[depth] = false; + } else { + child = get_gt(h); + branch[depth] = true; + } + depth++; + } while (child != null()); + + if (parent == rm) + // Only went through do loop once. Deleted node will be replaced + // in the tree structure by one of its immediate children. + cmp_shortened_sub_with_path = -cmp; + else + cmp_shortened_sub_with_path = cmp; + + // Get the handle of the opposite child, which may not be null. + child = cmp > 0 ? get_lt(h) : get_gt(h); + } + + if (parent == null()) + // There were only 1 or 2 nodes in this tree. + abs.root = child; + else if (cmp_shortened_sub_with_path < 0) + set_lt(parent, child); + else + set_gt(parent, child); + + // "path" is the parent of the subtree being eliminated or reduced + // from a depth of 2 to 1. If "path" is the node to be removed, we + // set path to the node we're about to poke into the position of the + // node to be removed. + handle path = parent == rm ? h : parent; + + if (h != rm) { + // Poke in the replacement for the node to be removed. + set_lt(h, get_lt(rm)); + set_gt(h, get_gt(rm)); + set_bf(h, get_bf(rm)); + if (parent_rm == null()) + abs.root = h; + else { + depth = rm_depth - 1; + if (branch[depth]) + set_gt(parent_rm, h); + else + set_lt(parent_rm, h); + } + } + + if (path != null()) { + // Create a temporary linked list from the parent of the path node + // to the root node. + h = abs.root; + parent = null(); + depth = 0; + while (h != path) { + if (branch[depth++]) { + child = get_gt(h); + set_gt(h, parent); + } else { + child = get_lt(h); + set_lt(h, parent); + } + parent = h; + h = child; + } + + // Climb from the path node to the root node using the linked + // list, restoring the tree structure and rebalancing as necessary. + bool reduced_depth = true; + int bf; + cmp = cmp_shortened_sub_with_path; + for (;;) { + if (reduced_depth) { + bf = get_bf(h); + if (cmp < 0) + bf++; + else // cmp > 0 + bf--; + if ((bf == -2) || (bf == 2)) { + h = balance(h); + bf = get_bf(h); + } else + set_bf(h, bf); + reduced_depth = (bf == 0); + } + if (parent == null()) + break; + child = h; + h = parent; + cmp = branch[--depth] ? 1 : -1; + if (cmp < 0) { + parent = get_lt(h); + set_lt(h, child); + } else { + parent = get_gt(h); + set_gt(h, child); + } + } + abs.root = h; + } + + return rm; +} + +template <class Abstractor, unsigned maxDepth, class BSet> +inline typename AVLTree<Abstractor, maxDepth, BSet>::handle +AVLTree<Abstractor, maxDepth, BSet>::subst(handle new_node) +{ + handle h = abs.root; + handle parent = null(); + int cmp, last_cmp; + + /* Search for node already in tree with same key. */ + for (;;) { + if (h == null()) + /* No node in tree with same key as new node. */ + return null(); + cmp = cmp_n_n(new_node, h); + if (cmp == 0) + /* Found the node to substitute new one for. */ + break; + last_cmp = cmp; + parent = h; + h = cmp < 0 ? get_lt(h) : get_gt(h); + } + + /* Copy tree housekeeping fields from node in tree to new node. */ + set_lt(new_node, get_lt(h)); + set_gt(new_node, get_gt(h)); + set_bf(new_node, get_bf(h)); + + if (parent == null()) + /* New node is also new root. */ + abs.root = new_node; + else { + /* Make parent point to new node. */ + if (last_cmp < 0) + set_lt(parent, new_node); + else + set_gt(parent, new_node); + } + + return h; +} + +} + +#endif diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/AlwaysInline.h b/src/3rdparty/webkit/JavaScriptCore/wtf/AlwaysInline.h new file mode 100644 index 0000000..d39b2b9 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/AlwaysInline.h @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2005, 2007, 2008 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#include "Platform.h" + +#ifndef ALWAYS_INLINE +#if COMPILER(GCC) && defined(NDEBUG) && !COMPILER(MINGW) +#define ALWAYS_INLINE inline __attribute__ ((__always_inline__)) +#elif COMPILER(MSVC) && defined(NDEBUG) +#define ALWAYS_INLINE __forceinline +#else +#define ALWAYS_INLINE inline +#endif +#endif + +#ifndef NEVER_INLINE +#if COMPILER(GCC) +#define NEVER_INLINE __attribute__ ((__noinline__)) +#else +#define NEVER_INLINE +#endif +#endif + +#ifndef UNLIKELY +#if COMPILER(GCC) +#define UNLIKELY(x) __builtin_expect((x), 0) +#else +#define UNLIKELY(x) (x) +#endif +#endif + +#ifndef LIKELY +#if COMPILER(GCC) +#define LIKELY(x) __builtin_expect((x), 1) +#else +#define LIKELY(x) (x) +#endif +#endif diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/Assertions.cpp b/src/3rdparty/webkit/JavaScriptCore/wtf/Assertions.cpp new file mode 100644 index 0000000..6e04fe1 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/Assertions.cpp @@ -0,0 +1,186 @@ +/* + * Copyright (C) 2003, 2006, 2007 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "Assertions.h" + +#include <stdio.h> +#include <stdarg.h> +#include <string.h> + +#if PLATFORM(MAC) +#include <CoreFoundation/CFString.h> +#endif + +#if COMPILER(MSVC) && !PLATFORM(WIN_CE) +#ifndef WINVER +#define WINVER 0x0500 +#endif +#ifndef _WIN32_WINNT +#define _WIN32_WINNT 0x0500 +#endif +#include <windows.h> +#include <crtdbg.h> +#endif + +extern "C" { + +WTF_ATTRIBUTE_PRINTF(1, 0) +static void vprintf_stderr_common(const char* format, va_list args) +{ +#if PLATFORM(MAC) + if (strstr(format, "%@")) { + CFStringRef cfFormat = CFStringCreateWithCString(NULL, format, kCFStringEncodingUTF8); + CFStringRef str = CFStringCreateWithFormatAndArguments(NULL, NULL, cfFormat, args); + + int length = CFStringGetMaximumSizeForEncoding(CFStringGetLength(str), kCFStringEncodingUTF8); + char* buffer = (char*)malloc(length + 1); + + CFStringGetCString(str, buffer, length, kCFStringEncodingUTF8); + + fputs(buffer, stderr); + + free(buffer); + CFRelease(str); + CFRelease(cfFormat); + } else +#elif COMPILER(MSVC) && !PLATFORM(WIN_CE) + if (IsDebuggerPresent()) { + size_t size = 1024; + + do { + char* buffer = (char*)malloc(size); + + if (buffer == NULL) + break; + + if (_vsnprintf(buffer, size, format, args) != -1) { + OutputDebugStringA(buffer); + free(buffer); + break; + } + + free(buffer); + size *= 2; + } while (size > 1024); + } +#endif + vfprintf(stderr, format, args); +} + +WTF_ATTRIBUTE_PRINTF(1, 2) +static void printf_stderr_common(const char* format, ...) +{ + va_list args; + va_start(args, format); + vprintf_stderr_common(format, args); + va_end(args); +} + +static void printCallSite(const char* file, int line, const char* function) +{ +#if PLATFORM(WIN) && defined _DEBUG + _CrtDbgReport(_CRT_WARN, file, line, NULL, "%s\n", function); +#else + printf_stderr_common("(%s:%d %s)\n", file, line, function); +#endif +} + +void WTFReportAssertionFailure(const char* file, int line, const char* function, const char* assertion) +{ + if (assertion) + printf_stderr_common("ASSERTION FAILED: %s\n", assertion); + else + printf_stderr_common("SHOULD NEVER BE REACHED\n"); + printCallSite(file, line, function); +} + +void WTFReportAssertionFailureWithMessage(const char* file, int line, const char* function, const char* assertion, const char* format, ...) +{ + printf_stderr_common("ASSERTION FAILED: "); + va_list args; + va_start(args, format); + vprintf_stderr_common(format, args); + va_end(args); + printf_stderr_common("\n%s\n", assertion); + printCallSite(file, line, function); +} + +void WTFReportArgumentAssertionFailure(const char* file, int line, const char* function, const char* argName, const char* assertion) +{ + printf_stderr_common("ARGUMENT BAD: %s, %s\n", argName, assertion); + printCallSite(file, line, function); +} + +void WTFReportFatalError(const char* file, int line, const char* function, const char* format, ...) +{ + printf_stderr_common("FATAL ERROR: "); + va_list args; + va_start(args, format); + vprintf_stderr_common(format, args); + va_end(args); + printf_stderr_common("\n"); + printCallSite(file, line, function); +} + +void WTFReportError(const char* file, int line, const char* function, const char* format, ...) +{ + printf_stderr_common("ERROR: "); + va_list args; + va_start(args, format); + vprintf_stderr_common(format, args); + va_end(args); + printf_stderr_common("\n"); + printCallSite(file, line, function); +} + +void WTFLog(WTFLogChannel* channel, const char* format, ...) +{ + if (channel->state != WTFLogChannelOn) + return; + + va_list args; + va_start(args, format); + vprintf_stderr_common(format, args); + va_end(args); + if (format[strlen(format) - 1] != '\n') + printf_stderr_common("\n"); +} + +void WTFLogVerbose(const char* file, int line, const char* function, WTFLogChannel* channel, const char* format, ...) +{ + if (channel->state != WTFLogChannelOn) + return; + + va_list args; + va_start(args, format); + vprintf_stderr_common(format, args); + va_end(args); + if (format[strlen(format) - 1] != '\n') + printf_stderr_common("\n"); + printCallSite(file, line, function); +} + +} // extern "C" diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/Assertions.h b/src/3rdparty/webkit/JavaScriptCore/wtf/Assertions.h new file mode 100644 index 0000000..c17e501 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/Assertions.h @@ -0,0 +1,245 @@ +/* + * Copyright (C) 2003, 2006, 2007 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef WTF_Assertions_h +#define WTF_Assertions_h + +/* + no namespaces because this file has to be includable from C and Objective-C + + Note, this file uses many GCC extensions, but it should be compatible with + C, Objective C, C++, and Objective C++. + + For non-debug builds, everything is disabled by default. + Defining any of the symbols explicitly prevents this from having any effect. + + MSVC7 note: variadic macro support was added in MSVC8, so for now we disable + those macros in MSVC7. For more info, see the MSDN document on variadic + macros here: + + http://msdn2.microsoft.com/en-us/library/ms177415(VS.80).aspx +*/ + +#include "Platform.h" + +#if COMPILER(MSVC) +#include <stddef.h> +#else +#include <inttypes.h> +#endif + +#ifdef NDEBUG +#define ASSERTIONS_DISABLED_DEFAULT 1 +#else +#define ASSERTIONS_DISABLED_DEFAULT 0 +#endif + +#ifndef ASSERT_DISABLED +#define ASSERT_DISABLED ASSERTIONS_DISABLED_DEFAULT +#endif + +#ifndef ASSERT_ARG_DISABLED +#define ASSERT_ARG_DISABLED ASSERTIONS_DISABLED_DEFAULT +#endif + +#ifndef FATAL_DISABLED +#define FATAL_DISABLED ASSERTIONS_DISABLED_DEFAULT +#endif + +#ifndef ERROR_DISABLED +#define ERROR_DISABLED ASSERTIONS_DISABLED_DEFAULT +#endif + +#ifndef LOG_DISABLED +#define LOG_DISABLED ASSERTIONS_DISABLED_DEFAULT +#endif + +#if COMPILER(GCC) +#define WTF_PRETTY_FUNCTION __PRETTY_FUNCTION__ +#else +#define WTF_PRETTY_FUNCTION __FUNCTION__ +#endif + +/* WTF logging functions can process %@ in the format string to log a NSObject* but the printf format attribute + emits a warning when %@ is used in the format string. Until <rdar://problem/5195437> is resolved we can't include + the attribute when being used from Objective-C code in case it decides to use %@. */ +#if COMPILER(GCC) && !defined(__OBJC__) +#define WTF_ATTRIBUTE_PRINTF(formatStringArgument, extraArguments) __attribute__((__format__(printf, formatStringArgument, extraArguments))) +#else +#define WTF_ATTRIBUTE_PRINTF(formatStringArgument, extraArguments) +#endif + +/* These helper functions are always declared, but not necessarily always defined if the corresponding function is disabled. */ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum { WTFLogChannelOff, WTFLogChannelOn } WTFLogChannelState; + +typedef struct { + unsigned mask; + const char *defaultName; + WTFLogChannelState state; +} WTFLogChannel; + +void WTFReportAssertionFailure(const char* file, int line, const char* function, const char* assertion); +void WTFReportAssertionFailureWithMessage(const char* file, int line, const char* function, const char* assertion, const char* format, ...) WTF_ATTRIBUTE_PRINTF(5, 6); +void WTFReportArgumentAssertionFailure(const char* file, int line, const char* function, const char* argName, const char* assertion); +void WTFReportFatalError(const char* file, int line, const char* function, const char* format, ...) WTF_ATTRIBUTE_PRINTF(4, 5); +void WTFReportError(const char* file, int line, const char* function, const char* format, ...) WTF_ATTRIBUTE_PRINTF(4, 5); +void WTFLog(WTFLogChannel* channel, const char* format, ...) WTF_ATTRIBUTE_PRINTF(2, 3); +void WTFLogVerbose(const char* file, int line, const char* function, WTFLogChannel* channel, const char* format, ...) WTF_ATTRIBUTE_PRINTF(5, 6); + +#ifdef __cplusplus +} +#endif + +/* CRASH -- gets us into the debugger or the crash reporter -- signals are ignored by the crash reporter so we must do better */ + +#ifndef CRASH +#define CRASH() do { \ + *(int *)(uintptr_t)0xbbadbeef = 0; \ + ((void(*)())0)(); /* More reliable, but doesn't say BBADBEEF */ \ +} while(false) +#endif + +/* ASSERT, ASSERT_WITH_MESSAGE, ASSERT_NOT_REACHED */ + +#if PLATFORM(WIN_CE) +/* FIXME: We include this here only to avoid a conflict with the ASSERT macro. */ +#include <windows.h> +#undef min +#undef max +#undef ERROR +#endif + +#if PLATFORM(WIN_OS) +/* FIXME: Change to use something other than ASSERT to avoid this conflict with win32. */ +#undef ASSERT +#endif + +#if ASSERT_DISABLED + +#define ASSERT(assertion) ((void)0) +#define ASSERT_WITH_MESSAGE(assertion, ...) ((void)0) +#define ASSERT_NOT_REACHED() ((void)0) +#define ASSERT_UNUSED(variable, assertion) ((void)variable) + +#else + +#define ASSERT(assertion) do \ + if (!(assertion)) { \ + WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion); \ + CRASH(); \ + } \ +while (0) +#if COMPILER(MSVC7) +#define ASSERT_WITH_MESSAGE(assertion) ((void)0) +#else +#define ASSERT_WITH_MESSAGE(assertion, ...) do \ + if (!(assertion)) { \ + WTFReportAssertionFailureWithMessage(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion, __VA_ARGS__); \ + CRASH(); \ + } \ +while (0) +#endif /* COMPILER(MSVC7) */ +#define ASSERT_NOT_REACHED() do { \ + WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, 0); \ + CRASH(); \ +} while (0) + +#define ASSERT_UNUSED(variable, assertion) ASSERT(assertion) + +#endif + +/* ASSERT_ARG */ + +#if ASSERT_ARG_DISABLED + +#define ASSERT_ARG(argName, assertion) ((void)0) + +#else + +#define ASSERT_ARG(argName, assertion) do \ + if (!(assertion)) { \ + WTFReportArgumentAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #argName, #assertion); \ + CRASH(); \ + } \ +while (0) + +#endif + +/* COMPILE_ASSERT */ +#ifndef COMPILE_ASSERT +#define COMPILE_ASSERT(exp, name) typedef int dummy##name [(exp) ? 1 : -1]; +#endif + +/* FATAL */ + +#if FATAL_DISABLED +#define FATAL(...) ((void)0) +#elif COMPILER(MSVC7) +#define FATAL() ((void)0) +#else +#define FATAL(...) do { \ + WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, __VA_ARGS__); \ + CRASH(); \ +} while (0) +#endif + +/* LOG_ERROR */ + +#if ERROR_DISABLED +#define LOG_ERROR(...) ((void)0) +#elif COMPILER(MSVC7) +#define LOG_ERROR() ((void)0) +#else +#define LOG_ERROR(...) WTFReportError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, __VA_ARGS__) +#endif + +/* LOG */ + +#if LOG_DISABLED +#define LOG(channel, ...) ((void)0) +#elif COMPILER(MSVC7) +#define LOG() ((void)0) +#else +#define LOG(channel, ...) WTFLog(&JOIN_LOG_CHANNEL_WITH_PREFIX(LOG_CHANNEL_PREFIX, channel), __VA_ARGS__) +#define JOIN_LOG_CHANNEL_WITH_PREFIX(prefix, channel) JOIN_LOG_CHANNEL_WITH_PREFIX_LEVEL_2(prefix, channel) +#define JOIN_LOG_CHANNEL_WITH_PREFIX_LEVEL_2(prefix, channel) prefix ## channel +#endif + +/* LOG_VERBOSE */ + +#if LOG_DISABLED +#define LOG_VERBOSE(channel, ...) ((void)0) +#elif COMPILER(MSVC7) +#define LOG_VERBOSE(channel) ((void)0) +#else +#define LOG_VERBOSE(channel, ...) WTFLogVerbose(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, &JOIN_LOG_CHANNEL_WITH_PREFIX(LOG_CHANNEL_PREFIX, channel), __VA_ARGS__) +#endif + +#endif /* WTF_Assertions_h */ diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/Deque.h b/src/3rdparty/webkit/JavaScriptCore/wtf/Deque.h new file mode 100644 index 0000000..70c546b --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/Deque.h @@ -0,0 +1,592 @@ +/* + * Copyright (C) 2007, 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef WTF_Deque_h +#define WTF_Deque_h + +// FIXME: Could move what Vector and Deque share into a separate file. +// Deque doesn't actually use Vector. + +#include "Vector.h" + +namespace WTF { + + template<typename T> class DequeIteratorBase; + template<typename T> class DequeIterator; + template<typename T> class DequeConstIterator; + template<typename T> class DequeReverseIterator; + template<typename T> class DequeConstReverseIterator; + + template<typename T> + class Deque { + public: + typedef DequeIterator<T> iterator; + typedef DequeConstIterator<T> const_iterator; + typedef DequeReverseIterator<T> reverse_iterator; + typedef DequeConstReverseIterator<T> const_reverse_iterator; + + Deque(); + Deque(const Deque<T>&); + Deque& operator=(const Deque<T>&); + ~Deque(); + + void swap(Deque<T>&); + + size_t size() const { return m_start <= m_end ? m_end - m_start : m_end + m_buffer.capacity() - m_start; } + bool isEmpty() const { return m_start == m_end; } + + iterator begin() { return iterator(this, m_start); } + iterator end() { return iterator(this, m_end); } + const_iterator begin() const { return const_iterator(this, m_start); } + const_iterator end() const { return const_iterator(this, m_end); } + reverse_iterator rbegin() { return reverse_iterator(this, m_end); } + reverse_iterator rend() { return reverse_iterator(this, m_start); } + const_reverse_iterator rbegin() const { return const_reverse_iterator(this, m_end); } + const_reverse_iterator rend() const { return const_reverse_iterator(this, m_start); } + + T& first() { ASSERT(m_start != m_end); return m_buffer.buffer()[m_start]; } + const T& first() const { ASSERT(m_start != m_end); return m_buffer.buffer()[m_start]; } + + template<typename U> void append(const U&); + template<typename U> void prepend(const U&); + void removeFirst(); + + void clear(); + + private: + friend class DequeIteratorBase<T>; + + typedef VectorBuffer<T, 0> Buffer; + typedef VectorTypeOperations<T> TypeOperations; + typedef DequeIteratorBase<T> IteratorBase; + + void invalidateIterators(); + void destroyAll(); + void checkValidity() const; + void checkIndexValidity(size_t) const; + void expandCapacityIfNeeded(); + void expandCapacity(); + + size_t m_start; + size_t m_end; + Buffer m_buffer; +#ifndef NDEBUG + mutable IteratorBase* m_iterators; +#endif + }; + + template<typename T> + class DequeIteratorBase { + private: + typedef DequeIteratorBase<T> Base; + + protected: + DequeIteratorBase(); + DequeIteratorBase(const Deque<T>*, size_t); + DequeIteratorBase(const Base&); + Base& operator=(const Base&); + ~DequeIteratorBase(); + + void assign(const Base& other) { *this = other; } + + void increment(); + void decrement(); + + T* before() const; + T* after() const; + + bool isEqual(const Base&) const; + + private: + void addToIteratorsList(); + void checkValidity() const; + void checkValidity(const Base&) const; + + Deque<T>* m_deque; + size_t m_index; + + friend class Deque<T>; + +#ifndef NDEBUG + mutable DequeIteratorBase* m_next; + mutable DequeIteratorBase* m_previous; +#endif + }; + + template<typename T> + class DequeIterator : public DequeIteratorBase<T> { + private: + typedef DequeIteratorBase<T> Base; + typedef DequeIterator<T> Iterator; + + public: + DequeIterator(Deque<T>* deque, size_t index) : Base(deque, index) { } + + DequeIterator(const Iterator& other) : Base(other) { } + DequeIterator& operator=(const Iterator& other) { Base::assign(other); return *this; } + + T& operator*() const { return *Base::after(); } + T* operator->() const { return Base::after(); } + + bool operator==(const Iterator& other) const { return Base::isEqual(other); } + bool operator!=(const Iterator& other) const { return !Base::isEqual(other); } + + Iterator& operator++() { Base::increment(); return *this; } + // postfix ++ intentionally omitted + Iterator& operator--() { Base::decrement(); return *this; } + // postfix -- intentionally omitted + }; + + template<typename T> + class DequeConstIterator : public DequeIteratorBase<T> { + private: + typedef DequeIteratorBase<T> Base; + typedef DequeConstIterator<T> Iterator; + typedef DequeIterator<T> NonConstIterator; + + public: + DequeConstIterator(const Deque<T>* deque, size_t index) : Base(deque, index) { } + + DequeConstIterator(const Iterator& other) : Base(other) { } + DequeConstIterator(const NonConstIterator& other) : Base(other) { } + DequeConstIterator& operator=(const Iterator& other) { Base::assign(other); return *this; } + DequeConstIterator& operator=(const NonConstIterator& other) { Base::assign(other); return *this; } + + const T& operator*() const { return *Base::after(); } + const T* operator->() const { return Base::after(); } + + bool operator==(const Iterator& other) const { return Base::isEqual(other); } + bool operator!=(const Iterator& other) const { return !Base::isEqual(other); } + + Iterator& operator++() { Base::increment(); return *this; } + // postfix ++ intentionally omitted + Iterator& operator--() { Base::decrement(); return *this; } + // postfix -- intentionally omitted + }; + + template<typename T> + class DequeReverseIterator : public DequeIteratorBase<T> { + private: + typedef DequeIteratorBase<T> Base; + typedef DequeReverseIterator<T> Iterator; + + public: + DequeReverseIterator(const Deque<T>* deque, size_t index) : Base(deque, index) { } + + DequeReverseIterator(const Iterator& other) : Base(other) { } + DequeReverseIterator& operator=(const Iterator& other) { Base::assign(other); return *this; } + + T& operator*() const { return *Base::before(); } + T* operator->() const { return Base::before(); } + + bool operator==(const Iterator& other) const { return Base::isEqual(other); } + bool operator!=(const Iterator& other) const { return !Base::isEqual(other); } + + Iterator& operator++() { Base::decrement(); return *this; } + // postfix ++ intentionally omitted + Iterator& operator--() { Base::increment(); return *this; } + // postfix -- intentionally omitted + }; + + template<typename T> + class DequeConstReverseIterator : public DequeIteratorBase<T> { + private: + typedef DequeIteratorBase<T> Base; + typedef DequeConstReverseIterator<T> Iterator; + typedef DequeReverseIterator<T> NonConstIterator; + + public: + DequeConstReverseIterator(const Deque<T>* deque, size_t index) : Base(deque, index) { } + + DequeConstReverseIterator(const Iterator& other) : Base(other) { } + DequeConstReverseIterator(const NonConstIterator& other) : Base(other) { } + DequeConstReverseIterator& operator=(const Iterator& other) { Base::assign(other); return *this; } + DequeConstReverseIterator& operator=(const NonConstIterator& other) { Base::assign(other); return *this; } + + const T& operator*() const { return *Base::before(); } + const T* operator->() const { return Base::before(); } + + bool operator==(const Iterator& other) const { return Base::isEqual(other); } + bool operator!=(const Iterator& other) const { return !Base::isEqual(other); } + + Iterator& operator++() { Base::decrement(); return *this; } + // postfix ++ intentionally omitted + Iterator& operator--() { Base::increment(); return *this; } + // postfix -- intentionally omitted + }; + +#ifdef NDEBUG + template<typename T> inline void Deque<T>::checkValidity() const { } + template<typename T> inline void Deque<T>::checkIndexValidity(size_t) const { } + template<typename T> inline void Deque<T>::invalidateIterators() { } +#else + template<typename T> + void Deque<T>::checkValidity() const + { + if (!m_buffer.capacity()) { + ASSERT(!m_start); + ASSERT(!m_end); + } else { + ASSERT(m_start < m_buffer.capacity()); + ASSERT(m_end < m_buffer.capacity()); + } + } + + template<typename T> + void Deque<T>::checkIndexValidity(size_t index) const + { + ASSERT(index <= m_buffer.capacity()); + if (m_start <= m_end) { + ASSERT(index >= m_start); + ASSERT(index <= m_end); + } else { + ASSERT(index >= m_start || index <= m_end); + } + } + + template<typename T> + void Deque<T>::invalidateIterators() + { + IteratorBase* next; + for (IteratorBase* p = m_iterators; p; p = next) { + next = p->m_next; + p->m_deque = 0; + p->m_next = 0; + p->m_previous = 0; + } + m_iterators = 0; + } +#endif + + template<typename T> + inline Deque<T>::Deque() + : m_start(0) + , m_end(0) +#ifndef NDEBUG + , m_iterators(0) +#endif + { + checkValidity(); + } + + template<typename T> + inline Deque<T>::Deque(const Deque<T>& other) + : m_start(other.m_start) + , m_end(other.m_end) + , m_buffer(other.m_buffer.capacity()) +#ifndef NDEBUG + , m_iterators(0) +#endif + { + const T* otherBuffer = other.m_buffer.buffer(); + if (m_start <= m_end) + TypeOperations::uninitializedCopy(otherBuffer + m_start, otherBuffer + m_end, m_buffer.buffer() + m_start); + else { + TypeOperations::uninitializedCopy(otherBuffer, otherBuffer + m_end, m_buffer.buffer()); + TypeOperations::uninitializedCopy(otherBuffer + m_start, otherBuffer + m_buffer.capacity(), m_buffer.buffer() + m_start); + } + } + + template<typename T> + void deleteAllValues(const Deque<T>& collection) + { + typedef typename Deque<T>::const_iterator iterator; + iterator end = collection.end(); + for (iterator it = collection.begin(); it != end; ++it) + delete *it; + } + + template<typename T> + inline Deque<T>& Deque<T>::operator=(const Deque<T>& other) + { + Deque<T> copy(other); + swap(copy); + return *this; + } + + template<typename T> + inline void Deque<T>::destroyAll() + { + if (m_start <= m_end) + TypeOperations::destruct(m_buffer.buffer() + m_start, m_buffer.buffer() + m_end); + else { + TypeOperations::destruct(m_buffer.buffer(), m_buffer.buffer() + m_end); + TypeOperations::destruct(m_buffer.buffer() + m_start, m_buffer.buffer() + m_buffer.capacity()); + } + } + + template<typename T> + inline Deque<T>::~Deque() + { + checkValidity(); + invalidateIterators(); + destroyAll(); + } + + template <typename T> + inline void Deque<T>::swap(Deque<T>& other) + { + checkValidity(); + other.checkValidity(); + invalidateIterators(); + std::swap(m_start, other.m_start); + std::swap(m_end, other.m_end); + m_buffer.swap(other.m_buffer); + checkValidity(); + other.checkValidity(); + } + + template <typename T> + inline void Deque<T>::clear() + { + checkValidity(); + invalidateIterators(); + destroyAll(); + m_start = 0; + m_end = 0; + checkValidity(); + } + + template<typename T> + inline void Deque<T>::expandCapacityIfNeeded() + { + if (m_start) { + if (m_end + 1 != m_start) + return; + } else if (m_end) { + if (m_end != m_buffer.capacity() - 1) + return; + } else if (m_buffer.capacity()) + return; + + expandCapacity(); + } + + template<typename T> + void Deque<T>::expandCapacity() + { + checkValidity(); + size_t oldCapacity = m_buffer.capacity(); + size_t newCapacity = max(static_cast<size_t>(16), oldCapacity + oldCapacity / 4 + 1); + T* oldBuffer = m_buffer.buffer(); + m_buffer.allocateBuffer(newCapacity); + if (m_start <= m_end) + TypeOperations::move(oldBuffer + m_start, oldBuffer + m_end, m_buffer.buffer() + m_start); + else { + TypeOperations::move(oldBuffer, oldBuffer + m_end, m_buffer.buffer()); + size_t newStart = newCapacity - (oldCapacity - m_start); + TypeOperations::move(oldBuffer + m_start, oldBuffer + oldCapacity, m_buffer.buffer() + newStart); + m_start = newStart; + } + m_buffer.deallocateBuffer(oldBuffer); + checkValidity(); + } + + template<typename T> template<typename U> + inline void Deque<T>::append(const U& value) + { + checkValidity(); + expandCapacityIfNeeded(); + new (&m_buffer.buffer()[m_end]) T(value); + if (m_end == m_buffer.capacity() - 1) + m_end = 0; + else + ++m_end; + checkValidity(); + } + + template<typename T> template<typename U> + inline void Deque<T>::prepend(const U& value) + { + checkValidity(); + expandCapacityIfNeeded(); + if (!m_start) + m_start = m_buffer.capacity() - 1; + else + --m_start; + new (&m_buffer.buffer()[m_start]) T(value); + checkValidity(); + } + + template<typename T> + inline void Deque<T>::removeFirst() + { + checkValidity(); + invalidateIterators(); + ASSERT(!isEmpty()); + TypeOperations::destruct(&m_buffer.buffer()[m_start], &m_buffer.buffer()[m_start + 1]); + if (m_start == m_buffer.capacity() - 1) + m_start = 0; + else + ++m_start; + checkValidity(); + } + +#ifdef NDEBUG + template<typename T> inline void DequeIteratorBase<T>::checkValidity() const { } + template<typename T> inline void DequeIteratorBase<T>::checkValidity(const DequeIteratorBase<T>&) const { } + template<typename T> inline void DequeIteratorBase<T>::addToIteratorsList() { } +#else + template<typename T> + void DequeIteratorBase<T>::checkValidity() const + { + ASSERT(m_deque); + m_deque->checkIndexValidity(m_index); + } + + template<typename T> + void DequeIteratorBase<T>::checkValidity(const Base& other) const + { + checkValidity(); + other.checkValidity(); + ASSERT(m_deque == other.m_deque); + } + + template<typename T> + void DequeIteratorBase<T>::addToIteratorsList() + { + if (!m_deque) + m_next = 0; + else { + m_next = m_deque->m_iterators; + m_deque->m_iterators = this; + if (m_next) + m_next->m_previous = this; + } + m_previous = 0; + } +#endif + + template<typename T> + inline DequeIteratorBase<T>::DequeIteratorBase() + : m_deque(0) + { + } + + template<typename T> + inline DequeIteratorBase<T>::DequeIteratorBase(const Deque<T>* deque, size_t index) + : m_deque(const_cast<Deque<T>*>(deque)) + , m_index(index) + { + addToIteratorsList(); + checkValidity(); + } + + template<typename T> + inline DequeIteratorBase<T>::DequeIteratorBase(const Base& other) + : m_deque(other.m_deque) + , m_index(other.m_index) + { + addToIteratorsList(); + checkValidity(); + } + + template<typename T> + inline DequeIteratorBase<T>::~DequeIteratorBase() + { +#ifndef NDEBUG + // Delete iterator from doubly-linked list of iterators. + if (!m_deque) { + ASSERT(!m_next); + ASSERT(!m_previous); + } else { + if (m_next) { + ASSERT(m_next->m_previous == this); + m_next->m_previous = m_previous; + } + if (m_previous) { + ASSERT(m_deque->m_iterators != this); + ASSERT(m_previous->m_next == this); + m_previous->m_next = m_next; + } else { + ASSERT(m_deque->m_iterators == this); + m_deque->m_iterators = m_next; + } + } + m_deque = 0; + m_next = 0; + m_previous = 0; +#endif + } + + template<typename T> + inline bool DequeIteratorBase<T>::isEqual(const Base& other) const + { + checkValidity(other); + return m_index == other.m_index; + } + + template<typename T> + inline void DequeIteratorBase<T>::increment() + { + checkValidity(); + ASSERT(m_index != m_deque->m_end); + ASSERT(m_deque->m_buffer.capacity()); + if (m_index == m_deque->m_buffer.capacity() - 1) + m_index = 0; + else + ++m_index; + checkValidity(); + } + + template<typename T> + inline void DequeIteratorBase<T>::decrement() + { + checkValidity(); + ASSERT(m_index != m_deque->m_start); + ASSERT(m_deque->m_buffer.capacity()); + if (!m_index) + m_index = m_deque->m_buffer.capacity() - 1; + else + --m_index; + checkValidity(); + } + + template<typename T> + inline T* DequeIteratorBase<T>::after() const + { + checkValidity(); + ASSERT(m_index != m_deque->m_end); + return &m_deque->m_buffer.buffer()[m_index]; + } + + template<typename T> + inline T* DequeIteratorBase<T>::before() const + { + checkValidity(); + ASSERT(m_index != m_deque->m_start); + if (!m_index) + return &m_deque->m_buffer.buffer()[m_deque->m_buffer.capacity() - 1]; + return &m_deque->m_buffer.buffer()[m_index - 1]; + } + +} // namespace WTF + +using WTF::Deque; + +#endif // WTF_Deque_h diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/DisallowCType.h b/src/3rdparty/webkit/JavaScriptCore/wtf/DisallowCType.h new file mode 100644 index 0000000..5dccb0e --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/DisallowCType.h @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2007 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef WTF_DisallowCType_h +#define WTF_DisallowCType_h + +// The behavior of many of the functions in the <ctype.h> header is dependent +// on the current locale. But almost all uses of these functions are for +// locale-independent, ASCII-specific purposes. In WebKit code we use our own +// ASCII-specific functions instead. This header makes sure we get a compile-time +// error if we use one of the <ctype.h> functions by accident. + +#include <ctype.h> + +#undef isalnum +#undef isalpha +#undef isascii +#undef isblank +#undef iscntrl +#undef isdigit +#undef isgraph +#undef islower +#undef isprint +#undef ispunct +#undef isspace +#undef isupper +#undef isxdigit +#undef toascii +#undef tolower +#undef toupper + +#define isalnum WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h +#define isalpha WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h +#define isascii WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h +#define isblank WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h +#define iscntrl WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h +#define isdigit WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h +#define isgraph WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h +#define islower WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h +#define isprint WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h +#define ispunct WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h +#define isspace WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h +#define isupper WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h +#define isxdigit WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h +#define toascii WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h +#define tolower WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h +#define toupper WTF_Please_use_ASCIICType_instead_of_ctype_see_comment_in_ASCIICType_h + +#endif diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/FastMalloc.cpp b/src/3rdparty/webkit/JavaScriptCore/wtf/FastMalloc.cpp new file mode 100644 index 0000000..4387d61 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/FastMalloc.cpp @@ -0,0 +1,3851 @@ +// Copyright (c) 2005, 2007, Google Inc. +// All rights reserved. +// Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// --- +// Author: Sanjay Ghemawat <opensource@google.com> +// +// A malloc that uses a per-thread cache to satisfy small malloc requests. +// (The time for malloc/free of a small object drops from 300 ns to 50 ns.) +// +// See doc/tcmalloc.html for a high-level +// description of how this malloc works. +// +// SYNCHRONIZATION +// 1. The thread-specific lists are accessed without acquiring any locks. +// This is safe because each such list is only accessed by one thread. +// 2. We have a lock per central free-list, and hold it while manipulating +// the central free list for a particular size. +// 3. The central page allocator is protected by "pageheap_lock". +// 4. The pagemap (which maps from page-number to descriptor), +// can be read without holding any locks, and written while holding +// the "pageheap_lock". +// 5. To improve performance, a subset of the information one can get +// from the pagemap is cached in a data structure, pagemap_cache_, +// that atomically reads and writes its entries. This cache can be +// read and written without locking. +// +// This multi-threaded access to the pagemap is safe for fairly +// subtle reasons. We basically assume that when an object X is +// allocated by thread A and deallocated by thread B, there must +// have been appropriate synchronization in the handoff of object +// X from thread A to thread B. The same logic applies to pagemap_cache_. +// +// THE PAGEID-TO-SIZECLASS CACHE +// Hot PageID-to-sizeclass mappings are held by pagemap_cache_. If this cache +// returns 0 for a particular PageID then that means "no information," not that +// the sizeclass is 0. The cache may have stale information for pages that do +// not hold the beginning of any free()'able object. Staleness is eliminated +// in Populate() for pages with sizeclass > 0 objects, and in do_malloc() and +// do_memalign() for all other relevant pages. +// +// TODO: Bias reclamation to larger addresses +// TODO: implement mallinfo/mallopt +// TODO: Better testing +// +// 9/28/2003 (new page-level allocator replaces ptmalloc2): +// * malloc/free of small objects goes from ~300 ns to ~50 ns. +// * allocation of a reasonably complicated struct +// goes from about 1100 ns to about 300 ns. + +#include "config.h" +#include "FastMalloc.h" + +#include "Assertions.h" +#if ENABLE(JSC_MULTIPLE_THREADS) +#include <pthread.h> +#endif + +#ifndef NO_TCMALLOC_SAMPLES +#ifdef WTF_CHANGES +#define NO_TCMALLOC_SAMPLES +#endif +#endif + +#if !defined(USE_SYSTEM_MALLOC) && defined(NDEBUG) +#define FORCE_SYSTEM_MALLOC 0 +#else +#define FORCE_SYSTEM_MALLOC 1 +#endif + +#define TCMALLOC_TRACK_DECOMMITED_SPANS (HAVE(VIRTUALALLOC)) + +#ifndef NDEBUG +namespace WTF { + +#if ENABLE(JSC_MULTIPLE_THREADS) +static pthread_key_t isForbiddenKey; +static pthread_once_t isForbiddenKeyOnce = PTHREAD_ONCE_INIT; +static void initializeIsForbiddenKey() +{ + pthread_key_create(&isForbiddenKey, 0); +} + +static bool isForbidden() +{ + pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey); + return !!pthread_getspecific(isForbiddenKey); +} + +void fastMallocForbid() +{ + pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey); + pthread_setspecific(isForbiddenKey, &isForbiddenKey); +} + +void fastMallocAllow() +{ + pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey); + pthread_setspecific(isForbiddenKey, 0); +} + +#else + +static bool staticIsForbidden; +static bool isForbidden() +{ + return staticIsForbidden; +} + +void fastMallocForbid() +{ + staticIsForbidden = true; +} + +void fastMallocAllow() +{ + staticIsForbidden = false; +} +#endif // ENABLE(JSC_MULTIPLE_THREADS) + +} // namespace WTF +#endif // NDEBUG + +#include <string.h> + +namespace WTF { + +void* fastZeroedMalloc(size_t n) +{ + void* result = fastMalloc(n); + memset(result, 0, n); + return result; +} + +void* tryFastZeroedMalloc(size_t n) +{ + void* result = tryFastMalloc(n); + if (!result) + return 0; + memset(result, 0, n); + return result; +} + +} // namespace WTF + +#if FORCE_SYSTEM_MALLOC + +#include <stdlib.h> +#if !PLATFORM(WIN_OS) + #include <pthread.h> +#else + #include "windows.h" +#endif + +namespace WTF { + +void* tryFastMalloc(size_t n) +{ + ASSERT(!isForbidden()); + return malloc(n); +} + +void* fastMalloc(size_t n) +{ + ASSERT(!isForbidden()); + void* result = malloc(n); + if (!result) + CRASH(); + return result; +} + +void* tryFastCalloc(size_t n_elements, size_t element_size) +{ + ASSERT(!isForbidden()); + return calloc(n_elements, element_size); +} + +void* fastCalloc(size_t n_elements, size_t element_size) +{ + ASSERT(!isForbidden()); + void* result = calloc(n_elements, element_size); + if (!result) + CRASH(); + return result; +} + +void fastFree(void* p) +{ + ASSERT(!isForbidden()); + free(p); +} + +void* tryFastRealloc(void* p, size_t n) +{ + ASSERT(!isForbidden()); + return realloc(p, n); +} + +void* fastRealloc(void* p, size_t n) +{ + ASSERT(!isForbidden()); + void* result = realloc(p, n); + if (!result) + CRASH(); + return result; +} + +void releaseFastMallocFreeMemory() { } + +FastMallocStatistics fastMallocStatistics() +{ + FastMallocStatistics statistics = { 0, 0, 0, 0 }; + return statistics; +} + +} // namespace WTF + +#if PLATFORM(DARWIN) +// This symbol is present in the JavaScriptCore exports file even when FastMalloc is disabled. +// It will never be used in this case, so it's type and value are less interesting than its presence. +extern "C" const int jscore_fastmalloc_introspection = 0; +#endif + +#else // FORCE_SYSTEM_MALLOC + +#if HAVE(STDINT_H) +#include <stdint.h> +#elif HAVE(INTTYPES_H) +#include <inttypes.h> +#else +#include <sys/types.h> +#endif + +#include "AlwaysInline.h" +#include "Assertions.h" +#include "TCPackedCache.h" +#include "TCPageMap.h" +#include "TCSpinLock.h" +#include "TCSystemAlloc.h" +#include <algorithm> +#include <errno.h> +#include <new> +#include <pthread.h> +#include <stdarg.h> +#include <stddef.h> +#include <stdio.h> +#if COMPILER(MSVC) +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include <windows.h> +#endif + +#if WTF_CHANGES + +#if PLATFORM(DARWIN) +#include "MallocZoneSupport.h" +#include <wtf/HashSet.h> +#endif + +#ifndef PRIuS +#define PRIuS "zu" +#endif + +// Calling pthread_getspecific through a global function pointer is faster than a normal +// call to the function on Mac OS X, and it's used in performance-critical code. So we +// use a function pointer. But that's not necessarily faster on other platforms, and we had +// problems with this technique on Windows, so we'll do this only on Mac OS X. +#if PLATFORM(DARWIN) +static void* (*pthread_getspecific_function_pointer)(pthread_key_t) = pthread_getspecific; +#define pthread_getspecific(key) pthread_getspecific_function_pointer(key) +#endif + +#define DEFINE_VARIABLE(type, name, value, meaning) \ + namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \ + type FLAGS_##name(value); \ + char FLAGS_no##name; \ + } \ + using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name + +#define DEFINE_int64(name, value, meaning) \ + DEFINE_VARIABLE(int64_t, name, value, meaning) + +#define DEFINE_double(name, value, meaning) \ + DEFINE_VARIABLE(double, name, value, meaning) + +namespace WTF { + +#define malloc fastMalloc +#define calloc fastCalloc +#define free fastFree +#define realloc fastRealloc + +#define MESSAGE LOG_ERROR +#define CHECK_CONDITION ASSERT + +#if PLATFORM(DARWIN) +class TCMalloc_PageHeap; +class TCMalloc_ThreadCache; +class TCMalloc_Central_FreeListPadded; + +class FastMallocZone { +public: + static void init(); + + static kern_return_t enumerate(task_t, void*, unsigned typeMmask, vm_address_t zoneAddress, memory_reader_t, vm_range_recorder_t); + static size_t goodSize(malloc_zone_t*, size_t size) { return size; } + static boolean_t check(malloc_zone_t*) { return true; } + static void print(malloc_zone_t*, boolean_t) { } + static void log(malloc_zone_t*, void*) { } + static void forceLock(malloc_zone_t*) { } + static void forceUnlock(malloc_zone_t*) { } + static void statistics(malloc_zone_t*, malloc_statistics_t* stats) { memset(stats, 0, sizeof(malloc_statistics_t)); } + +private: + FastMallocZone(TCMalloc_PageHeap*, TCMalloc_ThreadCache**, TCMalloc_Central_FreeListPadded*); + static size_t size(malloc_zone_t*, const void*); + static void* zoneMalloc(malloc_zone_t*, size_t); + static void* zoneCalloc(malloc_zone_t*, size_t numItems, size_t size); + static void zoneFree(malloc_zone_t*, void*); + static void* zoneRealloc(malloc_zone_t*, void*, size_t); + static void* zoneValloc(malloc_zone_t*, size_t) { LOG_ERROR("valloc is not supported"); return 0; } + static void zoneDestroy(malloc_zone_t*) { } + + malloc_zone_t m_zone; + TCMalloc_PageHeap* m_pageHeap; + TCMalloc_ThreadCache** m_threadHeaps; + TCMalloc_Central_FreeListPadded* m_centralCaches; +}; + +#endif + +#endif + +#ifndef WTF_CHANGES +// This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if +// you're porting to a system where you really can't get a stacktrace. +#ifdef NO_TCMALLOC_SAMPLES +// We use #define so code compiles even if you #include stacktrace.h somehow. +# define GetStackTrace(stack, depth, skip) (0) +#else +# include <google/stacktrace.h> +#endif +#endif + +// Even if we have support for thread-local storage in the compiler +// and linker, the OS may not support it. We need to check that at +// runtime. Right now, we have to keep a manual set of "bad" OSes. +#if defined(HAVE_TLS) + static bool kernel_supports_tls = false; // be conservative + static inline bool KernelSupportsTLS() { + return kernel_supports_tls; + } +# if !HAVE_DECL_UNAME // if too old for uname, probably too old for TLS + static void CheckIfKernelSupportsTLS() { + kernel_supports_tls = false; + } +# else +# include <sys/utsname.h> // DECL_UNAME checked for <sys/utsname.h> too + static void CheckIfKernelSupportsTLS() { + struct utsname buf; + if (uname(&buf) != 0) { // should be impossible + MESSAGE("uname failed assuming no TLS support (errno=%d)\n", errno); + kernel_supports_tls = false; + } else if (strcasecmp(buf.sysname, "linux") == 0) { + // The linux case: the first kernel to support TLS was 2.6.0 + if (buf.release[0] < '2' && buf.release[1] == '.') // 0.x or 1.x + kernel_supports_tls = false; + else if (buf.release[0] == '2' && buf.release[1] == '.' && + buf.release[2] >= '0' && buf.release[2] < '6' && + buf.release[3] == '.') // 2.0 - 2.5 + kernel_supports_tls = false; + else + kernel_supports_tls = true; + } else { // some other kernel, we'll be optimisitic + kernel_supports_tls = true; + } + // TODO(csilvers): VLOG(1) the tls status once we support RAW_VLOG + } +# endif // HAVE_DECL_UNAME +#endif // HAVE_TLS + +// __THROW is defined in glibc systems. It means, counter-intuitively, +// "This function will never throw an exception." It's an optional +// optimization tool, but we may need to use it to match glibc prototypes. +#ifndef __THROW // I guess we're not on a glibc system +# define __THROW // __THROW is just an optimization, so ok to make it "" +#endif + +//------------------------------------------------------------------- +// Configuration +//------------------------------------------------------------------- + +// Not all possible combinations of the following parameters make +// sense. In particular, if kMaxSize increases, you may have to +// increase kNumClasses as well. +static const size_t kPageShift = 12; +static const size_t kPageSize = 1 << kPageShift; +static const size_t kMaxSize = 8u * kPageSize; +static const size_t kAlignShift = 3; +static const size_t kAlignment = 1 << kAlignShift; +static const size_t kNumClasses = 68; + +// Allocates a big block of memory for the pagemap once we reach more than +// 128MB +static const size_t kPageMapBigAllocationThreshold = 128 << 20; + +// Minimum number of pages to fetch from system at a time. Must be +// significantly bigger than kBlockSize to amortize system-call +// overhead, and also to reduce external fragementation. Also, we +// should keep this value big because various incarnations of Linux +// have small limits on the number of mmap() regions per +// address-space. +static const size_t kMinSystemAlloc = 1 << (20 - kPageShift); + +// Number of objects to move between a per-thread list and a central +// list in one shot. We want this to be not too small so we can +// amortize the lock overhead for accessing the central list. Making +// it too big may temporarily cause unnecessary memory wastage in the +// per-thread free list until the scavenger cleans up the list. +static int num_objects_to_move[kNumClasses]; + +// Maximum length we allow a per-thread free-list to have before we +// move objects from it into the corresponding central free-list. We +// want this big to avoid locking the central free-list too often. It +// should not hurt to make this list somewhat big because the +// scavenging code will shrink it down when its contents are not in use. +static const int kMaxFreeListLength = 256; + +// Lower and upper bounds on the per-thread cache sizes +static const size_t kMinThreadCacheSize = kMaxSize * 2; +static const size_t kMaxThreadCacheSize = 2 << 20; + +// Default bound on the total amount of thread caches +static const size_t kDefaultOverallThreadCacheSize = 16 << 20; + +// For all span-lengths < kMaxPages we keep an exact-size list. +// REQUIRED: kMaxPages >= kMinSystemAlloc; +static const size_t kMaxPages = kMinSystemAlloc; + +/* The smallest prime > 2^n */ +static int primes_list[] = { + // Small values might cause high rates of sampling + // and hence commented out. + // 2, 5, 11, 17, 37, 67, 131, 257, + // 521, 1031, 2053, 4099, 8209, 16411, + 32771, 65537, 131101, 262147, 524309, 1048583, + 2097169, 4194319, 8388617, 16777259, 33554467 }; + +// Twice the approximate gap between sampling actions. +// I.e., we take one sample approximately once every +// tcmalloc_sample_parameter/2 +// bytes of allocation, i.e., ~ once every 128KB. +// Must be a prime number. +#ifdef NO_TCMALLOC_SAMPLES +DEFINE_int64(tcmalloc_sample_parameter, 0, + "Unused: code is compiled with NO_TCMALLOC_SAMPLES"); +static size_t sample_period = 0; +#else +DEFINE_int64(tcmalloc_sample_parameter, 262147, + "Twice the approximate gap between sampling actions." + " Must be a prime number. Otherwise will be rounded up to a " + " larger prime number"); +static size_t sample_period = 262147; +#endif + +// Protects sample_period above +static SpinLock sample_period_lock = SPINLOCK_INITIALIZER; + +// Parameters for controlling how fast memory is returned to the OS. + +DEFINE_double(tcmalloc_release_rate, 1, + "Rate at which we release unused memory to the system. " + "Zero means we never release memory back to the system. " + "Increase this flag to return memory faster; decrease it " + "to return memory slower. Reasonable rates are in the " + "range [0,10]"); + +//------------------------------------------------------------------- +// Mapping from size to size_class and vice versa +//------------------------------------------------------------------- + +// Sizes <= 1024 have an alignment >= 8. So for such sizes we have an +// array indexed by ceil(size/8). Sizes > 1024 have an alignment >= 128. +// So for these larger sizes we have an array indexed by ceil(size/128). +// +// We flatten both logical arrays into one physical array and use +// arithmetic to compute an appropriate index. The constants used by +// ClassIndex() were selected to make the flattening work. +// +// Examples: +// Size Expression Index +// ------------------------------------------------------- +// 0 (0 + 7) / 8 0 +// 1 (1 + 7) / 8 1 +// ... +// 1024 (1024 + 7) / 8 128 +// 1025 (1025 + 127 + (120<<7)) / 128 129 +// ... +// 32768 (32768 + 127 + (120<<7)) / 128 376 +static const size_t kMaxSmallSize = 1024; +static const int shift_amount[2] = { 3, 7 }; // For divides by 8 or 128 +static const int add_amount[2] = { 7, 127 + (120 << 7) }; +static unsigned char class_array[377]; + +// Compute index of the class_array[] entry for a given size +static inline int ClassIndex(size_t s) { + const int i = (s > kMaxSmallSize); + return static_cast<int>((s + add_amount[i]) >> shift_amount[i]); +} + +// Mapping from size class to max size storable in that class +static size_t class_to_size[kNumClasses]; + +// Mapping from size class to number of pages to allocate at a time +static size_t class_to_pages[kNumClasses]; + +// TransferCache is used to cache transfers of num_objects_to_move[size_class] +// back and forth between thread caches and the central cache for a given size +// class. +struct TCEntry { + void *head; // Head of chain of objects. + void *tail; // Tail of chain of objects. +}; +// A central cache freelist can have anywhere from 0 to kNumTransferEntries +// slots to put link list chains into. To keep memory usage bounded the total +// number of TCEntries across size classes is fixed. Currently each size +// class is initially given one TCEntry which also means that the maximum any +// one class can have is kNumClasses. +static const int kNumTransferEntries = kNumClasses; + +// Note: the following only works for "n"s that fit in 32-bits, but +// that is fine since we only use it for small sizes. +static inline int LgFloor(size_t n) { + int log = 0; + for (int i = 4; i >= 0; --i) { + int shift = (1 << i); + size_t x = n >> shift; + if (x != 0) { + n = x; + log += shift; + } + } + ASSERT(n == 1); + return log; +} + +// Some very basic linked list functions for dealing with using void * as +// storage. + +static inline void *SLL_Next(void *t) { + return *(reinterpret_cast<void**>(t)); +} + +static inline void SLL_SetNext(void *t, void *n) { + *(reinterpret_cast<void**>(t)) = n; +} + +static inline void SLL_Push(void **list, void *element) { + SLL_SetNext(element, *list); + *list = element; +} + +static inline void *SLL_Pop(void **list) { + void *result = *list; + *list = SLL_Next(*list); + return result; +} + + +// Remove N elements from a linked list to which head points. head will be +// modified to point to the new head. start and end will point to the first +// and last nodes of the range. Note that end will point to NULL after this +// function is called. +static inline void SLL_PopRange(void **head, int N, void **start, void **end) { + if (N == 0) { + *start = NULL; + *end = NULL; + return; + } + + void *tmp = *head; + for (int i = 1; i < N; ++i) { + tmp = SLL_Next(tmp); + } + + *start = *head; + *end = tmp; + *head = SLL_Next(tmp); + // Unlink range from list. + SLL_SetNext(tmp, NULL); +} + +static inline void SLL_PushRange(void **head, void *start, void *end) { + if (!start) return; + SLL_SetNext(end, *head); + *head = start; +} + +static inline size_t SLL_Size(void *head) { + int count = 0; + while (head) { + count++; + head = SLL_Next(head); + } + return count; +} + +// Setup helper functions. + +static ALWAYS_INLINE size_t SizeClass(size_t size) { + return class_array[ClassIndex(size)]; +} + +// Get the byte-size for a specified class +static ALWAYS_INLINE size_t ByteSizeForClass(size_t cl) { + return class_to_size[cl]; +} +static int NumMoveSize(size_t size) { + if (size == 0) return 0; + // Use approx 64k transfers between thread and central caches. + int num = static_cast<int>(64.0 * 1024.0 / size); + if (num < 2) num = 2; + // Clamp well below kMaxFreeListLength to avoid ping pong between central + // and thread caches. + if (num > static_cast<int>(0.8 * kMaxFreeListLength)) + num = static_cast<int>(0.8 * kMaxFreeListLength); + + // Also, avoid bringing in too many objects into small object free + // lists. There are lots of such lists, and if we allow each one to + // fetch too many at a time, we end up having to scavenge too often + // (especially when there are lots of threads and each thread gets a + // small allowance for its thread cache). + // + // TODO: Make thread cache free list sizes dynamic so that we do not + // have to equally divide a fixed resource amongst lots of threads. + if (num > 32) num = 32; + + return num; +} + +// Initialize the mapping arrays +static void InitSizeClasses() { + // Do some sanity checking on add_amount[]/shift_amount[]/class_array[] + if (ClassIndex(0) < 0) { + MESSAGE("Invalid class index %d for size 0\n", ClassIndex(0)); + CRASH(); + } + if (static_cast<size_t>(ClassIndex(kMaxSize)) >= sizeof(class_array)) { + MESSAGE("Invalid class index %d for kMaxSize\n", ClassIndex(kMaxSize)); + CRASH(); + } + + // Compute the size classes we want to use + size_t sc = 1; // Next size class to assign + unsigned char alignshift = kAlignShift; + int last_lg = -1; + for (size_t size = kAlignment; size <= kMaxSize; size += (1 << alignshift)) { + int lg = LgFloor(size); + if (lg > last_lg) { + // Increase alignment every so often. + // + // Since we double the alignment every time size doubles and + // size >= 128, this means that space wasted due to alignment is + // at most 16/128 i.e., 12.5%. Plus we cap the alignment at 256 + // bytes, so the space wasted as a percentage starts falling for + // sizes > 2K. + if ((lg >= 7) && (alignshift < 8)) { + alignshift++; + } + last_lg = lg; + } + + // Allocate enough pages so leftover is less than 1/8 of total. + // This bounds wasted space to at most 12.5%. + size_t psize = kPageSize; + while ((psize % size) > (psize >> 3)) { + psize += kPageSize; + } + const size_t my_pages = psize >> kPageShift; + + if (sc > 1 && my_pages == class_to_pages[sc-1]) { + // See if we can merge this into the previous class without + // increasing the fragmentation of the previous class. + const size_t my_objects = (my_pages << kPageShift) / size; + const size_t prev_objects = (class_to_pages[sc-1] << kPageShift) + / class_to_size[sc-1]; + if (my_objects == prev_objects) { + // Adjust last class to include this size + class_to_size[sc-1] = size; + continue; + } + } + + // Add new class + class_to_pages[sc] = my_pages; + class_to_size[sc] = size; + sc++; + } + if (sc != kNumClasses) { + MESSAGE("wrong number of size classes: found %" PRIuS " instead of %d\n", + sc, int(kNumClasses)); + CRASH(); + } + + // Initialize the mapping arrays + int next_size = 0; + for (unsigned char c = 1; c < kNumClasses; c++) { + const size_t max_size_in_class = class_to_size[c]; + for (size_t s = next_size; s <= max_size_in_class; s += kAlignment) { + class_array[ClassIndex(s)] = c; + } + next_size = static_cast<int>(max_size_in_class + kAlignment); + } + + // Double-check sizes just to be safe + for (size_t size = 0; size <= kMaxSize; size++) { + const size_t sc = SizeClass(size); + if (sc == 0) { + MESSAGE("Bad size class %" PRIuS " for %" PRIuS "\n", sc, size); + CRASH(); + } + if (sc > 1 && size <= class_to_size[sc-1]) { + MESSAGE("Allocating unnecessarily large class %" PRIuS " for %" PRIuS + "\n", sc, size); + CRASH(); + } + if (sc >= kNumClasses) { + MESSAGE("Bad size class %" PRIuS " for %" PRIuS "\n", sc, size); + CRASH(); + } + const size_t s = class_to_size[sc]; + if (size > s) { + MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %" PRIuS ")\n", s, size, sc); + CRASH(); + } + if (s == 0) { + MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %" PRIuS ")\n", s, size, sc); + CRASH(); + } + } + + // Initialize the num_objects_to_move array. + for (size_t cl = 1; cl < kNumClasses; ++cl) { + num_objects_to_move[cl] = NumMoveSize(ByteSizeForClass(cl)); + } + +#ifndef WTF_CHANGES + if (false) { + // Dump class sizes and maximum external wastage per size class + for (size_t cl = 1; cl < kNumClasses; ++cl) { + const int alloc_size = class_to_pages[cl] << kPageShift; + const int alloc_objs = alloc_size / class_to_size[cl]; + const int min_used = (class_to_size[cl-1] + 1) * alloc_objs; + const int max_waste = alloc_size - min_used; + MESSAGE("SC %3d [ %8d .. %8d ] from %8d ; %2.0f%% maxwaste\n", + int(cl), + int(class_to_size[cl-1] + 1), + int(class_to_size[cl]), + int(class_to_pages[cl] << kPageShift), + max_waste * 100.0 / alloc_size + ); + } + } +#endif +} + +// ------------------------------------------------------------------------- +// Simple allocator for objects of a specified type. External locking +// is required before accessing one of these objects. +// ------------------------------------------------------------------------- + +// Metadata allocator -- keeps stats about how many bytes allocated +static uint64_t metadata_system_bytes = 0; +static void* MetaDataAlloc(size_t bytes) { + void* result = TCMalloc_SystemAlloc(bytes, 0); + if (result != NULL) { + metadata_system_bytes += bytes; + } + return result; +} + +template <class T> +class PageHeapAllocator { + private: + // How much to allocate from system at a time + static const size_t kAllocIncrement = 32 << 10; + + // Aligned size of T + static const size_t kAlignedSize + = (((sizeof(T) + kAlignment - 1) / kAlignment) * kAlignment); + + // Free area from which to carve new objects + char* free_area_; + size_t free_avail_; + + // Free list of already carved objects + void* free_list_; + + // Number of allocated but unfreed objects + int inuse_; + + public: + void Init() { + ASSERT(kAlignedSize <= kAllocIncrement); + inuse_ = 0; + free_area_ = NULL; + free_avail_ = 0; + free_list_ = NULL; + } + + T* New() { + // Consult free list + void* result; + if (free_list_ != NULL) { + result = free_list_; + free_list_ = *(reinterpret_cast<void**>(result)); + } else { + if (free_avail_ < kAlignedSize) { + // Need more room + free_area_ = reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement)); + if (free_area_ == NULL) CRASH(); + free_avail_ = kAllocIncrement; + } + result = free_area_; + free_area_ += kAlignedSize; + free_avail_ -= kAlignedSize; + } + inuse_++; + return reinterpret_cast<T*>(result); + } + + void Delete(T* p) { + *(reinterpret_cast<void**>(p)) = free_list_; + free_list_ = p; + inuse_--; + } + + int inuse() const { return inuse_; } +}; + +// ------------------------------------------------------------------------- +// Span - a contiguous run of pages +// ------------------------------------------------------------------------- + +// Type that can hold a page number +typedef uintptr_t PageID; + +// Type that can hold the length of a run of pages +typedef uintptr_t Length; + +static const Length kMaxValidPages = (~static_cast<Length>(0)) >> kPageShift; + +// Convert byte size into pages. This won't overflow, but may return +// an unreasonably large value if bytes is huge enough. +static inline Length pages(size_t bytes) { + return (bytes >> kPageShift) + + ((bytes & (kPageSize - 1)) > 0 ? 1 : 0); +} + +// Convert a user size into the number of bytes that will actually be +// allocated +static size_t AllocationSize(size_t bytes) { + if (bytes > kMaxSize) { + // Large object: we allocate an integral number of pages + ASSERT(bytes <= (kMaxValidPages << kPageShift)); + return pages(bytes) << kPageShift; + } else { + // Small object: find the size class to which it belongs + return ByteSizeForClass(SizeClass(bytes)); + } +} + +// Information kept for a span (a contiguous run of pages). +struct Span { + PageID start; // Starting page number + Length length; // Number of pages in span + Span* next; // Used when in link list + Span* prev; // Used when in link list + void* objects; // Linked list of free objects + unsigned int free : 1; // Is the span free +#ifndef NO_TCMALLOC_SAMPLES + unsigned int sample : 1; // Sampled object? +#endif + unsigned int sizeclass : 8; // Size-class for small objects (or 0) + unsigned int refcount : 11; // Number of non-free objects + bool decommitted : 1; + +#undef SPAN_HISTORY +#ifdef SPAN_HISTORY + // For debugging, we can keep a log events per span + int nexthistory; + char history[64]; + int value[64]; +#endif +}; + +#if TCMALLOC_TRACK_DECOMMITED_SPANS +#define ASSERT_SPAN_COMMITTED(span) ASSERT(!span->decommitted) +#else +#define ASSERT_SPAN_COMMITTED(span) +#endif + +#ifdef SPAN_HISTORY +void Event(Span* span, char op, int v = 0) { + span->history[span->nexthistory] = op; + span->value[span->nexthistory] = v; + span->nexthistory++; + if (span->nexthistory == sizeof(span->history)) span->nexthistory = 0; +} +#else +#define Event(s,o,v) ((void) 0) +#endif + +// Allocator/deallocator for spans +static PageHeapAllocator<Span> span_allocator; +static Span* NewSpan(PageID p, Length len) { + Span* result = span_allocator.New(); + memset(result, 0, sizeof(*result)); + result->start = p; + result->length = len; +#ifdef SPAN_HISTORY + result->nexthistory = 0; +#endif + return result; +} + +static inline void DeleteSpan(Span* span) { +#ifndef NDEBUG + // In debug mode, trash the contents of deleted Spans + memset(span, 0x3f, sizeof(*span)); +#endif + span_allocator.Delete(span); +} + +// ------------------------------------------------------------------------- +// Doubly linked list of spans. +// ------------------------------------------------------------------------- + +static inline void DLL_Init(Span* list) { + list->next = list; + list->prev = list; +} + +static inline void DLL_Remove(Span* span) { + span->prev->next = span->next; + span->next->prev = span->prev; + span->prev = NULL; + span->next = NULL; +} + +static ALWAYS_INLINE bool DLL_IsEmpty(const Span* list) { + return list->next == list; +} + +static int DLL_Length(const Span* list) { + int result = 0; + for (Span* s = list->next; s != list; s = s->next) { + result++; + } + return result; +} + +#if 0 /* Not needed at the moment -- causes compiler warnings if not used */ +static void DLL_Print(const char* label, const Span* list) { + MESSAGE("%-10s %p:", label, list); + for (const Span* s = list->next; s != list; s = s->next) { + MESSAGE(" <%p,%u,%u>", s, s->start, s->length); + } + MESSAGE("\n"); +} +#endif + +static inline void DLL_Prepend(Span* list, Span* span) { + ASSERT(span->next == NULL); + ASSERT(span->prev == NULL); + span->next = list->next; + span->prev = list; + list->next->prev = span; + list->next = span; +} + +// ------------------------------------------------------------------------- +// Stack traces kept for sampled allocations +// The following state is protected by pageheap_lock_. +// ------------------------------------------------------------------------- + +// size/depth are made the same size as a pointer so that some generic +// code below can conveniently cast them back and forth to void*. +static const int kMaxStackDepth = 31; +struct StackTrace { + uintptr_t size; // Size of object + uintptr_t depth; // Number of PC values stored in array below + void* stack[kMaxStackDepth]; +}; +static PageHeapAllocator<StackTrace> stacktrace_allocator; +static Span sampled_objects; + +// ------------------------------------------------------------------------- +// Map from page-id to per-page data +// ------------------------------------------------------------------------- + +// We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines. +// We also use a simple one-level cache for hot PageID-to-sizeclass mappings, +// because sometimes the sizeclass is all the information we need. + +// Selector class -- general selector uses 3-level map +template <int BITS> class MapSelector { + public: + typedef TCMalloc_PageMap3<BITS-kPageShift> Type; + typedef PackedCache<BITS, uint64_t> CacheType; +}; + +// A two-level map for 32-bit machines +template <> class MapSelector<32> { + public: + typedef TCMalloc_PageMap2<32-kPageShift> Type; + typedef PackedCache<32-kPageShift, uint16_t> CacheType; +}; + +// ------------------------------------------------------------------------- +// Page-level allocator +// * Eager coalescing +// +// Heap for page-level allocation. We allow allocating and freeing a +// contiguous runs of pages (called a "span"). +// ------------------------------------------------------------------------- + +class TCMalloc_PageHeap { + public: + void init(); + + // Allocate a run of "n" pages. Returns zero if out of memory. + Span* New(Length n); + + // Delete the span "[p, p+n-1]". + // REQUIRES: span was returned by earlier call to New() and + // has not yet been deleted. + void Delete(Span* span); + + // Mark an allocated span as being used for small objects of the + // specified size-class. + // REQUIRES: span was returned by an earlier call to New() + // and has not yet been deleted. + void RegisterSizeClass(Span* span, size_t sc); + + // Split an allocated span into two spans: one of length "n" pages + // followed by another span of length "span->length - n" pages. + // Modifies "*span" to point to the first span of length "n" pages. + // Returns a pointer to the second span. + // + // REQUIRES: "0 < n < span->length" + // REQUIRES: !span->free + // REQUIRES: span->sizeclass == 0 + Span* Split(Span* span, Length n); + + // Return the descriptor for the specified page. + inline Span* GetDescriptor(PageID p) const { + return reinterpret_cast<Span*>(pagemap_.get(p)); + } + +#ifdef WTF_CHANGES + inline Span* GetDescriptorEnsureSafe(PageID p) + { + pagemap_.Ensure(p, 1); + return GetDescriptor(p); + } + + size_t ReturnedBytes() const; +#endif + + // Dump state to stderr +#ifndef WTF_CHANGES + void Dump(TCMalloc_Printer* out); +#endif + + // Return number of bytes allocated from system + inline uint64_t SystemBytes() const { return system_bytes_; } + + // Return number of free bytes in heap + uint64_t FreeBytes() const { + return (static_cast<uint64_t>(free_pages_) << kPageShift); + } + + bool Check(); + bool CheckList(Span* list, Length min_pages, Length max_pages); + + // Release all pages on the free list for reuse by the OS: + void ReleaseFreePages(); + + // Return 0 if we have no information, or else the correct sizeclass for p. + // Reads and writes to pagemap_cache_ do not require locking. + // The entries are 64 bits on 64-bit hardware and 16 bits on + // 32-bit hardware, and we don't mind raciness as long as each read of + // an entry yields a valid entry, not a partially updated entry. + size_t GetSizeClassIfCached(PageID p) const { + return pagemap_cache_.GetOrDefault(p, 0); + } + void CacheSizeClass(PageID p, size_t cl) const { pagemap_cache_.Put(p, cl); } + + private: + // Pick the appropriate map and cache types based on pointer size + typedef MapSelector<8*sizeof(uintptr_t)>::Type PageMap; + typedef MapSelector<8*sizeof(uintptr_t)>::CacheType PageMapCache; + PageMap pagemap_; + mutable PageMapCache pagemap_cache_; + + // We segregate spans of a given size into two circular linked + // lists: one for normal spans, and one for spans whose memory + // has been returned to the system. + struct SpanList { + Span normal; + Span returned; + }; + + // List of free spans of length >= kMaxPages + SpanList large_; + + // Array mapping from span length to a doubly linked list of free spans + SpanList free_[kMaxPages]; + + // Number of pages kept in free lists + uintptr_t free_pages_; + + // Bytes allocated from system + uint64_t system_bytes_; + + bool GrowHeap(Length n); + + // REQUIRES span->length >= n + // Remove span from its free list, and move any leftover part of + // span into appropriate free lists. Also update "span" to have + // length exactly "n" and mark it as non-free so it can be returned + // to the client. + // + // "released" is true iff "span" was found on a "returned" list. + void Carve(Span* span, Length n, bool released); + + void RecordSpan(Span* span) { + pagemap_.set(span->start, span); + if (span->length > 1) { + pagemap_.set(span->start + span->length - 1, span); + } + } + + // Allocate a large span of length == n. If successful, returns a + // span of exactly the specified length. Else, returns NULL. + Span* AllocLarge(Length n); + + // Incrementally release some memory to the system. + // IncrementalScavenge(n) is called whenever n pages are freed. + void IncrementalScavenge(Length n); + + // Number of pages to deallocate before doing more scavenging + int64_t scavenge_counter_; + + // Index of last free list we scavenged + size_t scavenge_index_; + +#if defined(WTF_CHANGES) && PLATFORM(DARWIN) + friend class FastMallocZone; +#endif +}; + +void TCMalloc_PageHeap::init() +{ + pagemap_.init(MetaDataAlloc); + pagemap_cache_ = PageMapCache(0); + free_pages_ = 0; + system_bytes_ = 0; + scavenge_counter_ = 0; + // Start scavenging at kMaxPages list + scavenge_index_ = kMaxPages-1; + COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits); + DLL_Init(&large_.normal); + DLL_Init(&large_.returned); + for (size_t i = 0; i < kMaxPages; i++) { + DLL_Init(&free_[i].normal); + DLL_Init(&free_[i].returned); + } +} + +inline Span* TCMalloc_PageHeap::New(Length n) { + ASSERT(Check()); + ASSERT(n > 0); + + // Find first size >= n that has a non-empty list + for (Length s = n; s < kMaxPages; s++) { + Span* ll = NULL; + bool released = false; + if (!DLL_IsEmpty(&free_[s].normal)) { + // Found normal span + ll = &free_[s].normal; + } else if (!DLL_IsEmpty(&free_[s].returned)) { + // Found returned span; reallocate it + ll = &free_[s].returned; + released = true; + } else { + // Keep looking in larger classes + continue; + } + + Span* result = ll->next; + Carve(result, n, released); +#if TCMALLOC_TRACK_DECOMMITED_SPANS + if (result->decommitted) { + TCMalloc_SystemCommit(reinterpret_cast<void*>(result->start << kPageShift), static_cast<size_t>(n << kPageShift)); + result->decommitted = false; + } +#endif + ASSERT(Check()); + free_pages_ -= n; + return result; + } + + Span* result = AllocLarge(n); + if (result != NULL) { + ASSERT_SPAN_COMMITTED(result); + return result; + } + + // Grow the heap and try again + if (!GrowHeap(n)) { + ASSERT(Check()); + return NULL; + } + + return AllocLarge(n); +} + +Span* TCMalloc_PageHeap::AllocLarge(Length n) { + // find the best span (closest to n in size). + // The following loops implements address-ordered best-fit. + bool from_released = false; + Span *best = NULL; + + // Search through normal list + for (Span* span = large_.normal.next; + span != &large_.normal; + span = span->next) { + if (span->length >= n) { + if ((best == NULL) + || (span->length < best->length) + || ((span->length == best->length) && (span->start < best->start))) { + best = span; + from_released = false; + } + } + } + + // Search through released list in case it has a better fit + for (Span* span = large_.returned.next; + span != &large_.returned; + span = span->next) { + if (span->length >= n) { + if ((best == NULL) + || (span->length < best->length) + || ((span->length == best->length) && (span->start < best->start))) { + best = span; + from_released = true; + } + } + } + + if (best != NULL) { + Carve(best, n, from_released); +#if TCMALLOC_TRACK_DECOMMITED_SPANS + if (best->decommitted) { + TCMalloc_SystemCommit(reinterpret_cast<void*>(best->start << kPageShift), static_cast<size_t>(n << kPageShift)); + best->decommitted = false; + } +#endif + ASSERT(Check()); + free_pages_ -= n; + return best; + } + return NULL; +} + +Span* TCMalloc_PageHeap::Split(Span* span, Length n) { + ASSERT(0 < n); + ASSERT(n < span->length); + ASSERT(!span->free); + ASSERT(span->sizeclass == 0); + Event(span, 'T', n); + + const Length extra = span->length - n; + Span* leftover = NewSpan(span->start + n, extra); + Event(leftover, 'U', extra); + RecordSpan(leftover); + pagemap_.set(span->start + n - 1, span); // Update map from pageid to span + span->length = n; + + return leftover; +} + +#if !TCMALLOC_TRACK_DECOMMITED_SPANS +static ALWAYS_INLINE void propagateDecommittedState(Span*, Span*) { } +#else +static ALWAYS_INLINE void propagateDecommittedState(Span* destination, Span* source) +{ + destination->decommitted = source->decommitted; +} +#endif + +inline void TCMalloc_PageHeap::Carve(Span* span, Length n, bool released) { + ASSERT(n > 0); + DLL_Remove(span); + span->free = 0; + Event(span, 'A', n); + + const int extra = static_cast<int>(span->length - n); + ASSERT(extra >= 0); + if (extra > 0) { + Span* leftover = NewSpan(span->start + n, extra); + leftover->free = 1; + propagateDecommittedState(leftover, span); + Event(leftover, 'S', extra); + RecordSpan(leftover); + + // Place leftover span on appropriate free list + SpanList* listpair = (static_cast<size_t>(extra) < kMaxPages) ? &free_[extra] : &large_; + Span* dst = released ? &listpair->returned : &listpair->normal; + DLL_Prepend(dst, leftover); + + span->length = n; + pagemap_.set(span->start + n - 1, span); + } +} + +#if !TCMALLOC_TRACK_DECOMMITED_SPANS +static ALWAYS_INLINE void mergeDecommittedStates(Span*, Span*) { } +#else +static ALWAYS_INLINE void mergeDecommittedStates(Span* destination, Span* other) +{ + if (other->decommitted) + destination->decommitted = true; +} +#endif + +inline void TCMalloc_PageHeap::Delete(Span* span) { + ASSERT(Check()); + ASSERT(!span->free); + ASSERT(span->length > 0); + ASSERT(GetDescriptor(span->start) == span); + ASSERT(GetDescriptor(span->start + span->length - 1) == span); + span->sizeclass = 0; +#ifndef NO_TCMALLOC_SAMPLES + span->sample = 0; +#endif + + // Coalesce -- we guarantee that "p" != 0, so no bounds checking + // necessary. We do not bother resetting the stale pagemap + // entries for the pieces we are merging together because we only + // care about the pagemap entries for the boundaries. + // + // Note that the spans we merge into "span" may come out of + // a "returned" list. For simplicity, we move these into the + // "normal" list of the appropriate size class. + const PageID p = span->start; + const Length n = span->length; + Span* prev = GetDescriptor(p-1); + if (prev != NULL && prev->free) { + // Merge preceding span into this span + ASSERT(prev->start + prev->length == p); + const Length len = prev->length; + mergeDecommittedStates(span, prev); + DLL_Remove(prev); + DeleteSpan(prev); + span->start -= len; + span->length += len; + pagemap_.set(span->start, span); + Event(span, 'L', len); + } + Span* next = GetDescriptor(p+n); + if (next != NULL && next->free) { + // Merge next span into this span + ASSERT(next->start == p+n); + const Length len = next->length; + mergeDecommittedStates(span, next); + DLL_Remove(next); + DeleteSpan(next); + span->length += len; + pagemap_.set(span->start + span->length - 1, span); + Event(span, 'R', len); + } + + Event(span, 'D', span->length); + span->free = 1; + if (span->length < kMaxPages) { + DLL_Prepend(&free_[span->length].normal, span); + } else { + DLL_Prepend(&large_.normal, span); + } + free_pages_ += n; + + IncrementalScavenge(n); + ASSERT(Check()); +} + +void TCMalloc_PageHeap::IncrementalScavenge(Length n) { + // Fast path; not yet time to release memory + scavenge_counter_ -= n; + if (scavenge_counter_ >= 0) return; // Not yet time to scavenge + + // If there is nothing to release, wait for so many pages before + // scavenging again. With 4K pages, this comes to 16MB of memory. + static const size_t kDefaultReleaseDelay = 1 << 8; + + // Find index of free list to scavenge + size_t index = scavenge_index_ + 1; + for (size_t i = 0; i < kMaxPages+1; i++) { + if (index > kMaxPages) index = 0; + SpanList* slist = (index == kMaxPages) ? &large_ : &free_[index]; + if (!DLL_IsEmpty(&slist->normal)) { + // Release the last span on the normal portion of this list + Span* s = slist->normal.prev; + DLL_Remove(s); + TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift), + static_cast<size_t>(s->length << kPageShift)); +#if TCMALLOC_TRACK_DECOMMITED_SPANS + s->decommitted = true; +#endif + DLL_Prepend(&slist->returned, s); + + scavenge_counter_ = std::max<size_t>(64UL, std::min<size_t>(kDefaultReleaseDelay, kDefaultReleaseDelay - (free_pages_ / kDefaultReleaseDelay))); + + if (index == kMaxPages && !DLL_IsEmpty(&slist->normal)) + scavenge_index_ = index - 1; + else + scavenge_index_ = index; + return; + } + index++; + } + + // Nothing to scavenge, delay for a while + scavenge_counter_ = kDefaultReleaseDelay; +} + +void TCMalloc_PageHeap::RegisterSizeClass(Span* span, size_t sc) { + // Associate span object with all interior pages as well + ASSERT(!span->free); + ASSERT(GetDescriptor(span->start) == span); + ASSERT(GetDescriptor(span->start+span->length-1) == span); + Event(span, 'C', sc); + span->sizeclass = static_cast<unsigned int>(sc); + for (Length i = 1; i < span->length-1; i++) { + pagemap_.set(span->start+i, span); + } +} + +#ifdef WTF_CHANGES +size_t TCMalloc_PageHeap::ReturnedBytes() const { + size_t result = 0; + for (unsigned s = 0; s < kMaxPages; s++) { + const int r_length = DLL_Length(&free_[s].returned); + unsigned r_pages = s * r_length; + result += r_pages << kPageShift; + } + + for (Span* s = large_.returned.next; s != &large_.returned; s = s->next) + result += s->length << kPageShift; + return result; +} +#endif + +#ifndef WTF_CHANGES +static double PagesToMB(uint64_t pages) { + return (pages << kPageShift) / 1048576.0; +} + +void TCMalloc_PageHeap::Dump(TCMalloc_Printer* out) { + int nonempty_sizes = 0; + for (int s = 0; s < kMaxPages; s++) { + if (!DLL_IsEmpty(&free_[s].normal) || !DLL_IsEmpty(&free_[s].returned)) { + nonempty_sizes++; + } + } + out->printf("------------------------------------------------\n"); + out->printf("PageHeap: %d sizes; %6.1f MB free\n", + nonempty_sizes, PagesToMB(free_pages_)); + out->printf("------------------------------------------------\n"); + uint64_t total_normal = 0; + uint64_t total_returned = 0; + for (int s = 0; s < kMaxPages; s++) { + const int n_length = DLL_Length(&free_[s].normal); + const int r_length = DLL_Length(&free_[s].returned); + if (n_length + r_length > 0) { + uint64_t n_pages = s * n_length; + uint64_t r_pages = s * r_length; + total_normal += n_pages; + total_returned += r_pages; + out->printf("%6u pages * %6u spans ~ %6.1f MB; %6.1f MB cum" + "; unmapped: %6.1f MB; %6.1f MB cum\n", + s, + (n_length + r_length), + PagesToMB(n_pages + r_pages), + PagesToMB(total_normal + total_returned), + PagesToMB(r_pages), + PagesToMB(total_returned)); + } + } + + uint64_t n_pages = 0; + uint64_t r_pages = 0; + int n_spans = 0; + int r_spans = 0; + out->printf("Normal large spans:\n"); + for (Span* s = large_.normal.next; s != &large_.normal; s = s->next) { + out->printf(" [ %6" PRIuS " pages ] %6.1f MB\n", + s->length, PagesToMB(s->length)); + n_pages += s->length; + n_spans++; + } + out->printf("Unmapped large spans:\n"); + for (Span* s = large_.returned.next; s != &large_.returned; s = s->next) { + out->printf(" [ %6" PRIuS " pages ] %6.1f MB\n", + s->length, PagesToMB(s->length)); + r_pages += s->length; + r_spans++; + } + total_normal += n_pages; + total_returned += r_pages; + out->printf(">255 large * %6u spans ~ %6.1f MB; %6.1f MB cum" + "; unmapped: %6.1f MB; %6.1f MB cum\n", + (n_spans + r_spans), + PagesToMB(n_pages + r_pages), + PagesToMB(total_normal + total_returned), + PagesToMB(r_pages), + PagesToMB(total_returned)); +} +#endif + +bool TCMalloc_PageHeap::GrowHeap(Length n) { + ASSERT(kMaxPages >= kMinSystemAlloc); + if (n > kMaxValidPages) return false; + Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc); + size_t actual_size; + void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize); + if (ptr == NULL) { + if (n < ask) { + // Try growing just "n" pages + ask = n; + ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize); + } + if (ptr == NULL) return false; + } + ask = actual_size >> kPageShift; + + uint64_t old_system_bytes = system_bytes_; + system_bytes_ += (ask << kPageShift); + const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; + ASSERT(p > 0); + + // If we have already a lot of pages allocated, just pre allocate a bunch of + // memory for the page map. This prevents fragmentation by pagemap metadata + // when a program keeps allocating and freeing large blocks. + + if (old_system_bytes < kPageMapBigAllocationThreshold + && system_bytes_ >= kPageMapBigAllocationThreshold) { + pagemap_.PreallocateMoreMemory(); + } + + // Make sure pagemap_ has entries for all of the new pages. + // Plus ensure one before and one after so coalescing code + // does not need bounds-checking. + if (pagemap_.Ensure(p-1, ask+2)) { + // Pretend the new area is allocated and then Delete() it to + // cause any necessary coalescing to occur. + // + // We do not adjust free_pages_ here since Delete() will do it for us. + Span* span = NewSpan(p, ask); + RecordSpan(span); + Delete(span); + ASSERT(Check()); + return true; + } else { + // We could not allocate memory within "pagemap_" + // TODO: Once we can return memory to the system, return the new span + return false; + } +} + +bool TCMalloc_PageHeap::Check() { + ASSERT(free_[0].normal.next == &free_[0].normal); + ASSERT(free_[0].returned.next == &free_[0].returned); + CheckList(&large_.normal, kMaxPages, 1000000000); + CheckList(&large_.returned, kMaxPages, 1000000000); + for (Length s = 1; s < kMaxPages; s++) { + CheckList(&free_[s].normal, s, s); + CheckList(&free_[s].returned, s, s); + } + return true; +} + +#if ASSERT_DISABLED +bool TCMalloc_PageHeap::CheckList(Span*, Length, Length) { + return true; +} +#else +bool TCMalloc_PageHeap::CheckList(Span* list, Length min_pages, Length max_pages) { + for (Span* s = list->next; s != list; s = s->next) { + CHECK_CONDITION(s->free); + CHECK_CONDITION(s->length >= min_pages); + CHECK_CONDITION(s->length <= max_pages); + CHECK_CONDITION(GetDescriptor(s->start) == s); + CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s); + } + return true; +} +#endif + +static void ReleaseFreeList(Span* list, Span* returned) { + // Walk backwards through list so that when we push these + // spans on the "returned" list, we preserve the order. + while (!DLL_IsEmpty(list)) { + Span* s = list->prev; + DLL_Remove(s); + DLL_Prepend(returned, s); + TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift), + static_cast<size_t>(s->length << kPageShift)); + } +} + +void TCMalloc_PageHeap::ReleaseFreePages() { + for (Length s = 0; s < kMaxPages; s++) { + ReleaseFreeList(&free_[s].normal, &free_[s].returned); + } + ReleaseFreeList(&large_.normal, &large_.returned); + ASSERT(Check()); +} + +//------------------------------------------------------------------- +// Free list +//------------------------------------------------------------------- + +class TCMalloc_ThreadCache_FreeList { + private: + void* list_; // Linked list of nodes + uint16_t length_; // Current length + uint16_t lowater_; // Low water mark for list length + + public: + void Init() { + list_ = NULL; + length_ = 0; + lowater_ = 0; + } + + // Return current length of list + int length() const { + return length_; + } + + // Is list empty? + bool empty() const { + return list_ == NULL; + } + + // Low-water mark management + int lowwatermark() const { return lowater_; } + void clear_lowwatermark() { lowater_ = length_; } + + ALWAYS_INLINE void Push(void* ptr) { + SLL_Push(&list_, ptr); + length_++; + } + + void PushRange(int N, void *start, void *end) { + SLL_PushRange(&list_, start, end); + length_ = length_ + static_cast<uint16_t>(N); + } + + void PopRange(int N, void **start, void **end) { + SLL_PopRange(&list_, N, start, end); + ASSERT(length_ >= N); + length_ = length_ - static_cast<uint16_t>(N); + if (length_ < lowater_) lowater_ = length_; + } + + ALWAYS_INLINE void* Pop() { + ASSERT(list_ != NULL); + length_--; + if (length_ < lowater_) lowater_ = length_; + return SLL_Pop(&list_); + } + +#ifdef WTF_CHANGES + template <class Finder, class Reader> + void enumerateFreeObjects(Finder& finder, const Reader& reader) + { + for (void* nextObject = list_; nextObject; nextObject = *reader(reinterpret_cast<void**>(nextObject))) + finder.visit(nextObject); + } +#endif +}; + +//------------------------------------------------------------------- +// Data kept per thread +//------------------------------------------------------------------- + +class TCMalloc_ThreadCache { + private: + typedef TCMalloc_ThreadCache_FreeList FreeList; +#if COMPILER(MSVC) + typedef DWORD ThreadIdentifier; +#else + typedef pthread_t ThreadIdentifier; +#endif + + size_t size_; // Combined size of data + ThreadIdentifier tid_; // Which thread owns it + bool in_setspecific_; // Called pthread_setspecific? + FreeList list_[kNumClasses]; // Array indexed by size-class + + // We sample allocations, biased by the size of the allocation + uint32_t rnd_; // Cheap random number generator + size_t bytes_until_sample_; // Bytes until we sample next + + // Allocate a new heap. REQUIRES: pageheap_lock is held. + static inline TCMalloc_ThreadCache* NewHeap(ThreadIdentifier tid); + + // Use only as pthread thread-specific destructor function. + static void DestroyThreadCache(void* ptr); + public: + // All ThreadCache objects are kept in a linked list (for stats collection) + TCMalloc_ThreadCache* next_; + TCMalloc_ThreadCache* prev_; + + void Init(ThreadIdentifier tid); + void Cleanup(); + + // Accessors (mostly just for printing stats) + int freelist_length(size_t cl) const { return list_[cl].length(); } + + // Total byte size in cache + size_t Size() const { return size_; } + + void* Allocate(size_t size); + void Deallocate(void* ptr, size_t size_class); + + void FetchFromCentralCache(size_t cl, size_t allocationSize); + void ReleaseToCentralCache(size_t cl, int N); + void Scavenge(); + void Print() const; + + // Record allocation of "k" bytes. Return true iff allocation + // should be sampled + bool SampleAllocation(size_t k); + + // Pick next sampling point + void PickNextSample(size_t k); + + static void InitModule(); + static void InitTSD(); + static TCMalloc_ThreadCache* GetThreadHeap(); + static TCMalloc_ThreadCache* GetCache(); + static TCMalloc_ThreadCache* GetCacheIfPresent(); + static TCMalloc_ThreadCache* CreateCacheIfNecessary(); + static void DeleteCache(TCMalloc_ThreadCache* heap); + static void BecomeIdle(); + static void RecomputeThreadCacheSize(); + +#ifdef WTF_CHANGES + template <class Finder, class Reader> + void enumerateFreeObjects(Finder& finder, const Reader& reader) + { + for (unsigned sizeClass = 0; sizeClass < kNumClasses; sizeClass++) + list_[sizeClass].enumerateFreeObjects(finder, reader); + } +#endif +}; + +//------------------------------------------------------------------- +// Data kept per size-class in central cache +//------------------------------------------------------------------- + +class TCMalloc_Central_FreeList { + public: + void Init(size_t cl); + + // These methods all do internal locking. + + // Insert the specified range into the central freelist. N is the number of + // elements in the range. + void InsertRange(void *start, void *end, int N); + + // Returns the actual number of fetched elements into N. + void RemoveRange(void **start, void **end, int *N); + + // Returns the number of free objects in cache. + size_t length() { + SpinLockHolder h(&lock_); + return counter_; + } + + // Returns the number of free objects in the transfer cache. + int tc_length() { + SpinLockHolder h(&lock_); + return used_slots_ * num_objects_to_move[size_class_]; + } + +#ifdef WTF_CHANGES + template <class Finder, class Reader> + void enumerateFreeObjects(Finder& finder, const Reader& reader, TCMalloc_Central_FreeList* remoteCentralFreeList) + { + for (Span* span = &empty_; span && span != &empty_; span = (span->next ? reader(span->next) : 0)) + ASSERT(!span->objects); + + ASSERT(!nonempty_.objects); + static const ptrdiff_t nonemptyOffset = reinterpret_cast<const char*>(&nonempty_) - reinterpret_cast<const char*>(this); + + Span* remoteNonempty = reinterpret_cast<Span*>(reinterpret_cast<char*>(remoteCentralFreeList) + nonemptyOffset); + Span* remoteSpan = nonempty_.next; + + for (Span* span = reader(remoteSpan); span && remoteSpan != remoteNonempty; remoteSpan = span->next, span = (span->next ? reader(span->next) : 0)) { + for (void* nextObject = span->objects; nextObject; nextObject = *reader(reinterpret_cast<void**>(nextObject))) + finder.visit(nextObject); + } + } +#endif + + private: + // REQUIRES: lock_ is held + // Remove object from cache and return. + // Return NULL if no free entries in cache. + void* FetchFromSpans(); + + // REQUIRES: lock_ is held + // Remove object from cache and return. Fetches + // from pageheap if cache is empty. Only returns + // NULL on allocation failure. + void* FetchFromSpansSafe(); + + // REQUIRES: lock_ is held + // Release a linked list of objects to spans. + // May temporarily release lock_. + void ReleaseListToSpans(void *start); + + // REQUIRES: lock_ is held + // Release an object to spans. + // May temporarily release lock_. + void ReleaseToSpans(void* object); + + // REQUIRES: lock_ is held + // Populate cache by fetching from the page heap. + // May temporarily release lock_. + void Populate(); + + // REQUIRES: lock is held. + // Tries to make room for a TCEntry. If the cache is full it will try to + // expand it at the cost of some other cache size. Return false if there is + // no space. + bool MakeCacheSpace(); + + // REQUIRES: lock_ for locked_size_class is held. + // Picks a "random" size class to steal TCEntry slot from. In reality it + // just iterates over the sizeclasses but does so without taking a lock. + // Returns true on success. + // May temporarily lock a "random" size class. + static bool EvictRandomSizeClass(size_t locked_size_class, bool force); + + // REQUIRES: lock_ is *not* held. + // Tries to shrink the Cache. If force is true it will relase objects to + // spans if it allows it to shrink the cache. Return false if it failed to + // shrink the cache. Decrements cache_size_ on succeess. + // May temporarily take lock_. If it takes lock_, the locked_size_class + // lock is released to the thread from holding two size class locks + // concurrently which could lead to a deadlock. + bool ShrinkCache(int locked_size_class, bool force); + + // This lock protects all the data members. cached_entries and cache_size_ + // may be looked at without holding the lock. + SpinLock lock_; + + // We keep linked lists of empty and non-empty spans. + size_t size_class_; // My size class + Span empty_; // Dummy header for list of empty spans + Span nonempty_; // Dummy header for list of non-empty spans + size_t counter_; // Number of free objects in cache entry + + // Here we reserve space for TCEntry cache slots. Since one size class can + // end up getting all the TCEntries quota in the system we just preallocate + // sufficient number of entries here. + TCEntry tc_slots_[kNumTransferEntries]; + + // Number of currently used cached entries in tc_slots_. This variable is + // updated under a lock but can be read without one. + int32_t used_slots_; + // The current number of slots for this size class. This is an + // adaptive value that is increased if there is lots of traffic + // on a given size class. + int32_t cache_size_; +}; + +// Pad each CentralCache object to multiple of 64 bytes +class TCMalloc_Central_FreeListPadded : public TCMalloc_Central_FreeList { + private: + char pad_[(64 - (sizeof(TCMalloc_Central_FreeList) % 64)) % 64]; +}; + +//------------------------------------------------------------------- +// Global variables +//------------------------------------------------------------------- + +// Central cache -- a collection of free-lists, one per size-class. +// We have a separate lock per free-list to reduce contention. +static TCMalloc_Central_FreeListPadded central_cache[kNumClasses]; + +// Page-level allocator +static SpinLock pageheap_lock = SPINLOCK_INITIALIZER; +static void* pageheap_memory[(sizeof(TCMalloc_PageHeap) + sizeof(void*) - 1) / sizeof(void*)]; +static bool phinited = false; + +// Avoid extra level of indirection by making "pageheap" be just an alias +// of pageheap_memory. +typedef union { + void* m_memory; + TCMalloc_PageHeap* m_pageHeap; +} PageHeapUnion; + +static inline TCMalloc_PageHeap* getPageHeap() +{ + PageHeapUnion u = { &pageheap_memory[0] }; + return u.m_pageHeap; +} + +#define pageheap getPageHeap() + +// If TLS is available, we also store a copy +// of the per-thread object in a __thread variable +// since __thread variables are faster to read +// than pthread_getspecific(). We still need +// pthread_setspecific() because __thread +// variables provide no way to run cleanup +// code when a thread is destroyed. +#ifdef HAVE_TLS +static __thread TCMalloc_ThreadCache *threadlocal_heap; +#endif +// Thread-specific key. Initialization here is somewhat tricky +// because some Linux startup code invokes malloc() before it +// is in a good enough state to handle pthread_keycreate(). +// Therefore, we use TSD keys only after tsd_inited is set to true. +// Until then, we use a slow path to get the heap object. +static bool tsd_inited = false; +static pthread_key_t heap_key; +#if COMPILER(MSVC) +DWORD tlsIndex = TLS_OUT_OF_INDEXES; +#endif + +static ALWAYS_INLINE void setThreadHeap(TCMalloc_ThreadCache* heap) +{ + // still do pthread_setspecific when using MSVC fast TLS to + // benefit from the delete callback. + pthread_setspecific(heap_key, heap); +#if COMPILER(MSVC) + TlsSetValue(tlsIndex, heap); +#endif +} + +// Allocator for thread heaps +static PageHeapAllocator<TCMalloc_ThreadCache> threadheap_allocator; + +// Linked list of heap objects. Protected by pageheap_lock. +static TCMalloc_ThreadCache* thread_heaps = NULL; +static int thread_heap_count = 0; + +// Overall thread cache size. Protected by pageheap_lock. +static size_t overall_thread_cache_size = kDefaultOverallThreadCacheSize; + +// Global per-thread cache size. Writes are protected by +// pageheap_lock. Reads are done without any locking, which should be +// fine as long as size_t can be written atomically and we don't place +// invariants between this variable and other pieces of state. +static volatile size_t per_thread_cache_size = kMaxThreadCacheSize; + +//------------------------------------------------------------------- +// Central cache implementation +//------------------------------------------------------------------- + +void TCMalloc_Central_FreeList::Init(size_t cl) { + lock_.Init(); + size_class_ = cl; + DLL_Init(&empty_); + DLL_Init(&nonempty_); + counter_ = 0; + + cache_size_ = 1; + used_slots_ = 0; + ASSERT(cache_size_ <= kNumTransferEntries); +} + +void TCMalloc_Central_FreeList::ReleaseListToSpans(void* start) { + while (start) { + void *next = SLL_Next(start); + ReleaseToSpans(start); + start = next; + } +} + +ALWAYS_INLINE void TCMalloc_Central_FreeList::ReleaseToSpans(void* object) { + const PageID p = reinterpret_cast<uintptr_t>(object) >> kPageShift; + Span* span = pageheap->GetDescriptor(p); + ASSERT(span != NULL); + ASSERT(span->refcount > 0); + + // If span is empty, move it to non-empty list + if (span->objects == NULL) { + DLL_Remove(span); + DLL_Prepend(&nonempty_, span); + Event(span, 'N', 0); + } + + // The following check is expensive, so it is disabled by default + if (false) { + // Check that object does not occur in list + int got = 0; + for (void* p = span->objects; p != NULL; p = *((void**) p)) { + ASSERT(p != object); + got++; + } + ASSERT(got + span->refcount == + (span->length<<kPageShift)/ByteSizeForClass(span->sizeclass)); + } + + counter_++; + span->refcount--; + if (span->refcount == 0) { + Event(span, '#', 0); + counter_ -= (span->length<<kPageShift) / ByteSizeForClass(span->sizeclass); + DLL_Remove(span); + + // Release central list lock while operating on pageheap + lock_.Unlock(); + { + SpinLockHolder h(&pageheap_lock); + pageheap->Delete(span); + } + lock_.Lock(); + } else { + *(reinterpret_cast<void**>(object)) = span->objects; + span->objects = object; + } +} + +ALWAYS_INLINE bool TCMalloc_Central_FreeList::EvictRandomSizeClass( + size_t locked_size_class, bool force) { + static int race_counter = 0; + int t = race_counter++; // Updated without a lock, but who cares. + if (t >= static_cast<int>(kNumClasses)) { + while (t >= static_cast<int>(kNumClasses)) { + t -= kNumClasses; + } + race_counter = t; + } + ASSERT(t >= 0); + ASSERT(t < static_cast<int>(kNumClasses)); + if (t == static_cast<int>(locked_size_class)) return false; + return central_cache[t].ShrinkCache(static_cast<int>(locked_size_class), force); +} + +bool TCMalloc_Central_FreeList::MakeCacheSpace() { + // Is there room in the cache? + if (used_slots_ < cache_size_) return true; + // Check if we can expand this cache? + if (cache_size_ == kNumTransferEntries) return false; + // Ok, we'll try to grab an entry from some other size class. + if (EvictRandomSizeClass(size_class_, false) || + EvictRandomSizeClass(size_class_, true)) { + // Succeeded in evicting, we're going to make our cache larger. + cache_size_++; + return true; + } + return false; +} + + +namespace { +class LockInverter { + private: + SpinLock *held_, *temp_; + public: + inline explicit LockInverter(SpinLock* held, SpinLock *temp) + : held_(held), temp_(temp) { held_->Unlock(); temp_->Lock(); } + inline ~LockInverter() { temp_->Unlock(); held_->Lock(); } +}; +} + +bool TCMalloc_Central_FreeList::ShrinkCache(int locked_size_class, bool force) { + // Start with a quick check without taking a lock. + if (cache_size_ == 0) return false; + // We don't evict from a full cache unless we are 'forcing'. + if (force == false && used_slots_ == cache_size_) return false; + + // Grab lock, but first release the other lock held by this thread. We use + // the lock inverter to ensure that we never hold two size class locks + // concurrently. That can create a deadlock because there is no well + // defined nesting order. + LockInverter li(¢ral_cache[locked_size_class].lock_, &lock_); + ASSERT(used_slots_ <= cache_size_); + ASSERT(0 <= cache_size_); + if (cache_size_ == 0) return false; + if (used_slots_ == cache_size_) { + if (force == false) return false; + // ReleaseListToSpans releases the lock, so we have to make all the + // updates to the central list before calling it. + cache_size_--; + used_slots_--; + ReleaseListToSpans(tc_slots_[used_slots_].head); + return true; + } + cache_size_--; + return true; +} + +void TCMalloc_Central_FreeList::InsertRange(void *start, void *end, int N) { + SpinLockHolder h(&lock_); + if (N == num_objects_to_move[size_class_] && + MakeCacheSpace()) { + int slot = used_slots_++; + ASSERT(slot >=0); + ASSERT(slot < kNumTransferEntries); + TCEntry *entry = &tc_slots_[slot]; + entry->head = start; + entry->tail = end; + return; + } + ReleaseListToSpans(start); +} + +void TCMalloc_Central_FreeList::RemoveRange(void **start, void **end, int *N) { + int num = *N; + ASSERT(num > 0); + + SpinLockHolder h(&lock_); + if (num == num_objects_to_move[size_class_] && used_slots_ > 0) { + int slot = --used_slots_; + ASSERT(slot >= 0); + TCEntry *entry = &tc_slots_[slot]; + *start = entry->head; + *end = entry->tail; + return; + } + + // TODO: Prefetch multiple TCEntries? + void *tail = FetchFromSpansSafe(); + if (!tail) { + // We are completely out of memory. + *start = *end = NULL; + *N = 0; + return; + } + + SLL_SetNext(tail, NULL); + void *head = tail; + int count = 1; + while (count < num) { + void *t = FetchFromSpans(); + if (!t) break; + SLL_Push(&head, t); + count++; + } + *start = head; + *end = tail; + *N = count; +} + + +void* TCMalloc_Central_FreeList::FetchFromSpansSafe() { + void *t = FetchFromSpans(); + if (!t) { + Populate(); + t = FetchFromSpans(); + } + return t; +} + +void* TCMalloc_Central_FreeList::FetchFromSpans() { + if (DLL_IsEmpty(&nonempty_)) return NULL; + Span* span = nonempty_.next; + + ASSERT(span->objects != NULL); + ASSERT_SPAN_COMMITTED(span); + span->refcount++; + void* result = span->objects; + span->objects = *(reinterpret_cast<void**>(result)); + if (span->objects == NULL) { + // Move to empty list + DLL_Remove(span); + DLL_Prepend(&empty_, span); + Event(span, 'E', 0); + } + counter_--; + return result; +} + +// Fetch memory from the system and add to the central cache freelist. +ALWAYS_INLINE void TCMalloc_Central_FreeList::Populate() { + // Release central list lock while operating on pageheap + lock_.Unlock(); + const size_t npages = class_to_pages[size_class_]; + + Span* span; + { + SpinLockHolder h(&pageheap_lock); + span = pageheap->New(npages); + if (span) pageheap->RegisterSizeClass(span, size_class_); + } + if (span == NULL) { + MESSAGE("allocation failed: %d\n", errno); + lock_.Lock(); + return; + } + ASSERT_SPAN_COMMITTED(span); + ASSERT(span->length == npages); + // Cache sizeclass info eagerly. Locking is not necessary. + // (Instead of being eager, we could just replace any stale info + // about this span, but that seems to be no better in practice.) + for (size_t i = 0; i < npages; i++) { + pageheap->CacheSizeClass(span->start + i, size_class_); + } + + // Split the block into pieces and add to the free-list + // TODO: coloring of objects to avoid cache conflicts? + void** tail = &span->objects; + char* ptr = reinterpret_cast<char*>(span->start << kPageShift); + char* limit = ptr + (npages << kPageShift); + const size_t size = ByteSizeForClass(size_class_); + int num = 0; + char* nptr; + while ((nptr = ptr + size) <= limit) { + *tail = ptr; + tail = reinterpret_cast<void**>(ptr); + ptr = nptr; + num++; + } + ASSERT(ptr <= limit); + *tail = NULL; + span->refcount = 0; // No sub-object in use yet + + // Add span to list of non-empty spans + lock_.Lock(); + DLL_Prepend(&nonempty_, span); + counter_ += num; +} + +//------------------------------------------------------------------- +// TCMalloc_ThreadCache implementation +//------------------------------------------------------------------- + +inline bool TCMalloc_ThreadCache::SampleAllocation(size_t k) { + if (bytes_until_sample_ < k) { + PickNextSample(k); + return true; + } else { + bytes_until_sample_ -= k; + return false; + } +} + +void TCMalloc_ThreadCache::Init(ThreadIdentifier tid) { + size_ = 0; + next_ = NULL; + prev_ = NULL; + tid_ = tid; + in_setspecific_ = false; + for (size_t cl = 0; cl < kNumClasses; ++cl) { + list_[cl].Init(); + } + + // Initialize RNG -- run it for a bit to get to good values + bytes_until_sample_ = 0; + rnd_ = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this)); + for (int i = 0; i < 100; i++) { + PickNextSample(static_cast<size_t>(FLAGS_tcmalloc_sample_parameter * 2)); + } +} + +void TCMalloc_ThreadCache::Cleanup() { + // Put unused memory back into central cache + for (size_t cl = 0; cl < kNumClasses; ++cl) { + if (list_[cl].length() > 0) { + ReleaseToCentralCache(cl, list_[cl].length()); + } + } +} + +ALWAYS_INLINE void* TCMalloc_ThreadCache::Allocate(size_t size) { + ASSERT(size <= kMaxSize); + const size_t cl = SizeClass(size); + FreeList* list = &list_[cl]; + size_t allocationSize = ByteSizeForClass(cl); + if (list->empty()) { + FetchFromCentralCache(cl, allocationSize); + if (list->empty()) return NULL; + } + size_ -= allocationSize; + return list->Pop(); +} + +inline void TCMalloc_ThreadCache::Deallocate(void* ptr, size_t cl) { + size_ += ByteSizeForClass(cl); + FreeList* list = &list_[cl]; + list->Push(ptr); + // If enough data is free, put back into central cache + if (list->length() > kMaxFreeListLength) { + ReleaseToCentralCache(cl, num_objects_to_move[cl]); + } + if (size_ >= per_thread_cache_size) Scavenge(); +} + +// Remove some objects of class "cl" from central cache and add to thread heap +ALWAYS_INLINE void TCMalloc_ThreadCache::FetchFromCentralCache(size_t cl, size_t allocationSize) { + int fetch_count = num_objects_to_move[cl]; + void *start, *end; + central_cache[cl].RemoveRange(&start, &end, &fetch_count); + list_[cl].PushRange(fetch_count, start, end); + size_ += allocationSize * fetch_count; +} + +// Remove some objects of class "cl" from thread heap and add to central cache +inline void TCMalloc_ThreadCache::ReleaseToCentralCache(size_t cl, int N) { + ASSERT(N > 0); + FreeList* src = &list_[cl]; + if (N > src->length()) N = src->length(); + size_ -= N*ByteSizeForClass(cl); + + // We return prepackaged chains of the correct size to the central cache. + // TODO: Use the same format internally in the thread caches? + int batch_size = num_objects_to_move[cl]; + while (N > batch_size) { + void *tail, *head; + src->PopRange(batch_size, &head, &tail); + central_cache[cl].InsertRange(head, tail, batch_size); + N -= batch_size; + } + void *tail, *head; + src->PopRange(N, &head, &tail); + central_cache[cl].InsertRange(head, tail, N); +} + +// Release idle memory to the central cache +inline void TCMalloc_ThreadCache::Scavenge() { + // If the low-water mark for the free list is L, it means we would + // not have had to allocate anything from the central cache even if + // we had reduced the free list size by L. We aim to get closer to + // that situation by dropping L/2 nodes from the free list. This + // may not release much memory, but if so we will call scavenge again + // pretty soon and the low-water marks will be high on that call. + //int64 start = CycleClock::Now(); + + for (size_t cl = 0; cl < kNumClasses; cl++) { + FreeList* list = &list_[cl]; + const int lowmark = list->lowwatermark(); + if (lowmark > 0) { + const int drop = (lowmark > 1) ? lowmark/2 : 1; + ReleaseToCentralCache(cl, drop); + } + list->clear_lowwatermark(); + } + + //int64 finish = CycleClock::Now(); + //CycleTimer ct; + //MESSAGE("GC: %.0f ns\n", ct.CyclesToUsec(finish-start)*1000.0); +} + +void TCMalloc_ThreadCache::PickNextSample(size_t k) { + // Make next "random" number + // x^32+x^22+x^2+x^1+1 is a primitive polynomial for random numbers + static const uint32_t kPoly = (1 << 22) | (1 << 2) | (1 << 1) | (1 << 0); + uint32_t r = rnd_; + rnd_ = (r << 1) ^ ((static_cast<int32_t>(r) >> 31) & kPoly); + + // Next point is "rnd_ % (sample_period)". I.e., average + // increment is "sample_period/2". + const int flag_value = static_cast<int>(FLAGS_tcmalloc_sample_parameter); + static int last_flag_value = -1; + + if (flag_value != last_flag_value) { + SpinLockHolder h(&sample_period_lock); + int i; + for (i = 0; i < (static_cast<int>(sizeof(primes_list)/sizeof(primes_list[0])) - 1); i++) { + if (primes_list[i] >= flag_value) { + break; + } + } + sample_period = primes_list[i]; + last_flag_value = flag_value; + } + + bytes_until_sample_ += rnd_ % sample_period; + + if (k > (static_cast<size_t>(-1) >> 2)) { + // If the user has asked for a huge allocation then it is possible + // for the code below to loop infinitely. Just return (note that + // this throws off the sampling accuracy somewhat, but a user who + // is allocating more than 1G of memory at a time can live with a + // minor inaccuracy in profiling of small allocations, and also + // would rather not wait for the loop below to terminate). + return; + } + + while (bytes_until_sample_ < k) { + // Increase bytes_until_sample_ by enough average sampling periods + // (sample_period >> 1) to allow us to sample past the current + // allocation. + bytes_until_sample_ += (sample_period >> 1); + } + + bytes_until_sample_ -= k; +} + +void TCMalloc_ThreadCache::InitModule() { + // There is a slight potential race here because of double-checked + // locking idiom. However, as long as the program does a small + // allocation before switching to multi-threaded mode, we will be + // fine. We increase the chances of doing such a small allocation + // by doing one in the constructor of the module_enter_exit_hook + // object declared below. + SpinLockHolder h(&pageheap_lock); + if (!phinited) { +#ifdef WTF_CHANGES + InitTSD(); +#endif + InitSizeClasses(); + threadheap_allocator.Init(); + span_allocator.Init(); + span_allocator.New(); // Reduce cache conflicts + span_allocator.New(); // Reduce cache conflicts + stacktrace_allocator.Init(); + DLL_Init(&sampled_objects); + for (size_t i = 0; i < kNumClasses; ++i) { + central_cache[i].Init(i); + } + pageheap->init(); + phinited = 1; +#if defined(WTF_CHANGES) && PLATFORM(DARWIN) + FastMallocZone::init(); +#endif + } +} + +inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::NewHeap(ThreadIdentifier tid) { + // Create the heap and add it to the linked list + TCMalloc_ThreadCache *heap = threadheap_allocator.New(); + heap->Init(tid); + heap->next_ = thread_heaps; + heap->prev_ = NULL; + if (thread_heaps != NULL) thread_heaps->prev_ = heap; + thread_heaps = heap; + thread_heap_count++; + RecomputeThreadCacheSize(); + return heap; +} + +inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetThreadHeap() { +#ifdef HAVE_TLS + // __thread is faster, but only when the kernel supports it + if (KernelSupportsTLS()) + return threadlocal_heap; +#elif COMPILER(MSVC) + return static_cast<TCMalloc_ThreadCache*>(TlsGetValue(tlsIndex)); +#else + return static_cast<TCMalloc_ThreadCache*>(pthread_getspecific(heap_key)); +#endif +} + +inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCache() { + TCMalloc_ThreadCache* ptr = NULL; + if (!tsd_inited) { + InitModule(); + } else { + ptr = GetThreadHeap(); + } + if (ptr == NULL) ptr = CreateCacheIfNecessary(); + return ptr; +} + +// In deletion paths, we do not try to create a thread-cache. This is +// because we may be in the thread destruction code and may have +// already cleaned up the cache for this thread. +inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCacheIfPresent() { + if (!tsd_inited) return NULL; + void* const p = GetThreadHeap(); + return reinterpret_cast<TCMalloc_ThreadCache*>(p); +} + +void TCMalloc_ThreadCache::InitTSD() { + ASSERT(!tsd_inited); + pthread_key_create(&heap_key, DestroyThreadCache); +#if COMPILER(MSVC) + tlsIndex = TlsAlloc(); +#endif + tsd_inited = true; + +#if !COMPILER(MSVC) + // We may have used a fake pthread_t for the main thread. Fix it. + pthread_t zero; + memset(&zero, 0, sizeof(zero)); +#endif +#ifndef WTF_CHANGES + SpinLockHolder h(&pageheap_lock); +#else + ASSERT(pageheap_lock.IsHeld()); +#endif + for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) { +#if COMPILER(MSVC) + if (h->tid_ == 0) { + h->tid_ = GetCurrentThreadId(); + } +#else + if (pthread_equal(h->tid_, zero)) { + h->tid_ = pthread_self(); + } +#endif + } +} + +TCMalloc_ThreadCache* TCMalloc_ThreadCache::CreateCacheIfNecessary() { + // Initialize per-thread data if necessary + TCMalloc_ThreadCache* heap = NULL; + { + SpinLockHolder h(&pageheap_lock); + +#if COMPILER(MSVC) + DWORD me; + if (!tsd_inited) { + me = 0; + } else { + me = GetCurrentThreadId(); + } +#else + // Early on in glibc's life, we cannot even call pthread_self() + pthread_t me; + if (!tsd_inited) { + memset(&me, 0, sizeof(me)); + } else { + me = pthread_self(); + } +#endif + + // This may be a recursive malloc call from pthread_setspecific() + // In that case, the heap for this thread has already been created + // and added to the linked list. So we search for that first. + for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) { +#if COMPILER(MSVC) + if (h->tid_ == me) { +#else + if (pthread_equal(h->tid_, me)) { +#endif + heap = h; + break; + } + } + + if (heap == NULL) heap = NewHeap(me); + } + + // We call pthread_setspecific() outside the lock because it may + // call malloc() recursively. The recursive call will never get + // here again because it will find the already allocated heap in the + // linked list of heaps. + if (!heap->in_setspecific_ && tsd_inited) { + heap->in_setspecific_ = true; + setThreadHeap(heap); + } + return heap; +} + +void TCMalloc_ThreadCache::BecomeIdle() { + if (!tsd_inited) return; // No caches yet + TCMalloc_ThreadCache* heap = GetThreadHeap(); + if (heap == NULL) return; // No thread cache to remove + if (heap->in_setspecific_) return; // Do not disturb the active caller + + heap->in_setspecific_ = true; + pthread_setspecific(heap_key, NULL); +#ifdef HAVE_TLS + // Also update the copy in __thread + threadlocal_heap = NULL; +#endif + heap->in_setspecific_ = false; + if (GetThreadHeap() == heap) { + // Somehow heap got reinstated by a recursive call to malloc + // from pthread_setspecific. We give up in this case. + return; + } + + // We can now get rid of the heap + DeleteCache(heap); +} + +void TCMalloc_ThreadCache::DestroyThreadCache(void* ptr) { + // Note that "ptr" cannot be NULL since pthread promises not + // to invoke the destructor on NULL values, but for safety, + // we check anyway. + if (ptr == NULL) return; +#ifdef HAVE_TLS + // Prevent fast path of GetThreadHeap() from returning heap. + threadlocal_heap = NULL; +#endif + DeleteCache(reinterpret_cast<TCMalloc_ThreadCache*>(ptr)); +} + +void TCMalloc_ThreadCache::DeleteCache(TCMalloc_ThreadCache* heap) { + // Remove all memory from heap + heap->Cleanup(); + + // Remove from linked list + SpinLockHolder h(&pageheap_lock); + if (heap->next_ != NULL) heap->next_->prev_ = heap->prev_; + if (heap->prev_ != NULL) heap->prev_->next_ = heap->next_; + if (thread_heaps == heap) thread_heaps = heap->next_; + thread_heap_count--; + RecomputeThreadCacheSize(); + + threadheap_allocator.Delete(heap); +} + +void TCMalloc_ThreadCache::RecomputeThreadCacheSize() { + // Divide available space across threads + int n = thread_heap_count > 0 ? thread_heap_count : 1; + size_t space = overall_thread_cache_size / n; + + // Limit to allowed range + if (space < kMinThreadCacheSize) space = kMinThreadCacheSize; + if (space > kMaxThreadCacheSize) space = kMaxThreadCacheSize; + + per_thread_cache_size = space; +} + +void TCMalloc_ThreadCache::Print() const { + for (size_t cl = 0; cl < kNumClasses; ++cl) { + MESSAGE(" %5" PRIuS " : %4d len; %4d lo\n", + ByteSizeForClass(cl), + list_[cl].length(), + list_[cl].lowwatermark()); + } +} + +// Extract interesting stats +struct TCMallocStats { + uint64_t system_bytes; // Bytes alloced from system + uint64_t thread_bytes; // Bytes in thread caches + uint64_t central_bytes; // Bytes in central cache + uint64_t transfer_bytes; // Bytes in central transfer cache + uint64_t pageheap_bytes; // Bytes in page heap + uint64_t metadata_bytes; // Bytes alloced for metadata +}; + +#ifndef WTF_CHANGES +// Get stats into "r". Also get per-size-class counts if class_count != NULL +static void ExtractStats(TCMallocStats* r, uint64_t* class_count) { + r->central_bytes = 0; + r->transfer_bytes = 0; + for (int cl = 0; cl < kNumClasses; ++cl) { + const int length = central_cache[cl].length(); + const int tc_length = central_cache[cl].tc_length(); + r->central_bytes += static_cast<uint64_t>(ByteSizeForClass(cl)) * length; + r->transfer_bytes += + static_cast<uint64_t>(ByteSizeForClass(cl)) * tc_length; + if (class_count) class_count[cl] = length + tc_length; + } + + // Add stats from per-thread heaps + r->thread_bytes = 0; + { // scope + SpinLockHolder h(&pageheap_lock); + for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) { + r->thread_bytes += h->Size(); + if (class_count) { + for (size_t cl = 0; cl < kNumClasses; ++cl) { + class_count[cl] += h->freelist_length(cl); + } + } + } + } + + { //scope + SpinLockHolder h(&pageheap_lock); + r->system_bytes = pageheap->SystemBytes(); + r->metadata_bytes = metadata_system_bytes; + r->pageheap_bytes = pageheap->FreeBytes(); + } +} +#endif + +#ifndef WTF_CHANGES +// WRITE stats to "out" +static void DumpStats(TCMalloc_Printer* out, int level) { + TCMallocStats stats; + uint64_t class_count[kNumClasses]; + ExtractStats(&stats, (level >= 2 ? class_count : NULL)); + + if (level >= 2) { + out->printf("------------------------------------------------\n"); + uint64_t cumulative = 0; + for (int cl = 0; cl < kNumClasses; ++cl) { + if (class_count[cl] > 0) { + uint64_t class_bytes = class_count[cl] * ByteSizeForClass(cl); + cumulative += class_bytes; + out->printf("class %3d [ %8" PRIuS " bytes ] : " + "%8" PRIu64 " objs; %5.1f MB; %5.1f cum MB\n", + cl, ByteSizeForClass(cl), + class_count[cl], + class_bytes / 1048576.0, + cumulative / 1048576.0); + } + } + + SpinLockHolder h(&pageheap_lock); + pageheap->Dump(out); + } + + const uint64_t bytes_in_use = stats.system_bytes + - stats.pageheap_bytes + - stats.central_bytes + - stats.transfer_bytes + - stats.thread_bytes; + + out->printf("------------------------------------------------\n" + "MALLOC: %12" PRIu64 " Heap size\n" + "MALLOC: %12" PRIu64 " Bytes in use by application\n" + "MALLOC: %12" PRIu64 " Bytes free in page heap\n" + "MALLOC: %12" PRIu64 " Bytes free in central cache\n" + "MALLOC: %12" PRIu64 " Bytes free in transfer cache\n" + "MALLOC: %12" PRIu64 " Bytes free in thread caches\n" + "MALLOC: %12" PRIu64 " Spans in use\n" + "MALLOC: %12" PRIu64 " Thread heaps in use\n" + "MALLOC: %12" PRIu64 " Metadata allocated\n" + "------------------------------------------------\n", + stats.system_bytes, + bytes_in_use, + stats.pageheap_bytes, + stats.central_bytes, + stats.transfer_bytes, + stats.thread_bytes, + uint64_t(span_allocator.inuse()), + uint64_t(threadheap_allocator.inuse()), + stats.metadata_bytes); +} + +static void PrintStats(int level) { + const int kBufferSize = 16 << 10; + char* buffer = new char[kBufferSize]; + TCMalloc_Printer printer(buffer, kBufferSize); + DumpStats(&printer, level); + write(STDERR_FILENO, buffer, strlen(buffer)); + delete[] buffer; +} + +static void** DumpStackTraces() { + // Count how much space we need + int needed_slots = 0; + { + SpinLockHolder h(&pageheap_lock); + for (Span* s = sampled_objects.next; s != &sampled_objects; s = s->next) { + StackTrace* stack = reinterpret_cast<StackTrace*>(s->objects); + needed_slots += 3 + stack->depth; + } + needed_slots += 100; // Slop in case sample grows + needed_slots += needed_slots/8; // An extra 12.5% slop + } + + void** result = new void*[needed_slots]; + if (result == NULL) { + MESSAGE("tcmalloc: could not allocate %d slots for stack traces\n", + needed_slots); + return NULL; + } + + SpinLockHolder h(&pageheap_lock); + int used_slots = 0; + for (Span* s = sampled_objects.next; s != &sampled_objects; s = s->next) { + ASSERT(used_slots < needed_slots); // Need to leave room for terminator + StackTrace* stack = reinterpret_cast<StackTrace*>(s->objects); + if (used_slots + 3 + stack->depth >= needed_slots) { + // No more room + break; + } + + result[used_slots+0] = reinterpret_cast<void*>(static_cast<uintptr_t>(1)); + result[used_slots+1] = reinterpret_cast<void*>(stack->size); + result[used_slots+2] = reinterpret_cast<void*>(stack->depth); + for (int d = 0; d < stack->depth; d++) { + result[used_slots+3+d] = stack->stack[d]; + } + used_slots += 3 + stack->depth; + } + result[used_slots] = reinterpret_cast<void*>(static_cast<uintptr_t>(0)); + return result; +} +#endif + +#ifndef WTF_CHANGES + +// TCMalloc's support for extra malloc interfaces +class TCMallocImplementation : public MallocExtension { + public: + virtual void GetStats(char* buffer, int buffer_length) { + ASSERT(buffer_length > 0); + TCMalloc_Printer printer(buffer, buffer_length); + + // Print level one stats unless lots of space is available + if (buffer_length < 10000) { + DumpStats(&printer, 1); + } else { + DumpStats(&printer, 2); + } + } + + virtual void** ReadStackTraces() { + return DumpStackTraces(); + } + + virtual bool GetNumericProperty(const char* name, size_t* value) { + ASSERT(name != NULL); + + if (strcmp(name, "generic.current_allocated_bytes") == 0) { + TCMallocStats stats; + ExtractStats(&stats, NULL); + *value = stats.system_bytes + - stats.thread_bytes + - stats.central_bytes + - stats.pageheap_bytes; + return true; + } + + if (strcmp(name, "generic.heap_size") == 0) { + TCMallocStats stats; + ExtractStats(&stats, NULL); + *value = stats.system_bytes; + return true; + } + + if (strcmp(name, "tcmalloc.slack_bytes") == 0) { + // We assume that bytes in the page heap are not fragmented too + // badly, and are therefore available for allocation. + SpinLockHolder l(&pageheap_lock); + *value = pageheap->FreeBytes(); + return true; + } + + if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) { + SpinLockHolder l(&pageheap_lock); + *value = overall_thread_cache_size; + return true; + } + + if (strcmp(name, "tcmalloc.current_total_thread_cache_bytes") == 0) { + TCMallocStats stats; + ExtractStats(&stats, NULL); + *value = stats.thread_bytes; + return true; + } + + return false; + } + + virtual bool SetNumericProperty(const char* name, size_t value) { + ASSERT(name != NULL); + + if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) { + // Clip the value to a reasonable range + if (value < kMinThreadCacheSize) value = kMinThreadCacheSize; + if (value > (1<<30)) value = (1<<30); // Limit to 1GB + + SpinLockHolder l(&pageheap_lock); + overall_thread_cache_size = static_cast<size_t>(value); + TCMalloc_ThreadCache::RecomputeThreadCacheSize(); + return true; + } + + return false; + } + + virtual void MarkThreadIdle() { + TCMalloc_ThreadCache::BecomeIdle(); + } + + virtual void ReleaseFreeMemory() { + SpinLockHolder h(&pageheap_lock); + pageheap->ReleaseFreePages(); + } +}; +#endif + +// The constructor allocates an object to ensure that initialization +// runs before main(), and therefore we do not have a chance to become +// multi-threaded before initialization. We also create the TSD key +// here. Presumably by the time this constructor runs, glibc is in +// good enough shape to handle pthread_key_create(). +// +// The constructor also takes the opportunity to tell STL to use +// tcmalloc. We want to do this early, before construct time, so +// all user STL allocations go through tcmalloc (which works really +// well for STL). +// +// The destructor prints stats when the program exits. +class TCMallocGuard { + public: + + TCMallocGuard() { +#ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS + // Check whether the kernel also supports TLS (needs to happen at runtime) + CheckIfKernelSupportsTLS(); +#endif +#ifndef WTF_CHANGES +#ifdef WIN32 // patch the windows VirtualAlloc, etc. + PatchWindowsFunctions(); // defined in windows/patch_functions.cc +#endif +#endif + free(malloc(1)); + TCMalloc_ThreadCache::InitTSD(); + free(malloc(1)); +#ifndef WTF_CHANGES + MallocExtension::Register(new TCMallocImplementation); +#endif + } + +#ifndef WTF_CHANGES + ~TCMallocGuard() { + const char* env = getenv("MALLOCSTATS"); + if (env != NULL) { + int level = atoi(env); + if (level < 1) level = 1; + PrintStats(level); + } +#ifdef WIN32 + UnpatchWindowsFunctions(); +#endif + } +#endif +}; + +#ifndef WTF_CHANGES +static TCMallocGuard module_enter_exit_hook; +#endif + + +//------------------------------------------------------------------- +// Helpers for the exported routines below +//------------------------------------------------------------------- + +#ifndef WTF_CHANGES + +static Span* DoSampledAllocation(size_t size) { + + // Grab the stack trace outside the heap lock + StackTrace tmp; + tmp.depth = GetStackTrace(tmp.stack, kMaxStackDepth, 1); + tmp.size = size; + + SpinLockHolder h(&pageheap_lock); + // Allocate span + Span *span = pageheap->New(pages(size == 0 ? 1 : size)); + if (span == NULL) { + return NULL; + } + + // Allocate stack trace + StackTrace *stack = stacktrace_allocator.New(); + if (stack == NULL) { + // Sampling failed because of lack of memory + return span; + } + + *stack = tmp; + span->sample = 1; + span->objects = stack; + DLL_Prepend(&sampled_objects, span); + + return span; +} +#endif + +static inline bool CheckCachedSizeClass(void *ptr) { + PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; + size_t cached_value = pageheap->GetSizeClassIfCached(p); + return cached_value == 0 || + cached_value == pageheap->GetDescriptor(p)->sizeclass; +} + +static inline void* CheckedMallocResult(void *result) +{ + ASSERT(result == 0 || CheckCachedSizeClass(result)); + return result; +} + +static inline void* SpanToMallocResult(Span *span) { + ASSERT_SPAN_COMMITTED(span); + pageheap->CacheSizeClass(span->start, 0); + return + CheckedMallocResult(reinterpret_cast<void*>(span->start << kPageShift)); +} + +#ifdef WTF_CHANGES +template <bool crashOnFailure> +#endif +static ALWAYS_INLINE void* do_malloc(size_t size) { + void* ret = NULL; + +#ifdef WTF_CHANGES + ASSERT(!isForbidden()); +#endif + + // The following call forces module initialization + TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache(); +#ifndef WTF_CHANGES + if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) { + Span* span = DoSampledAllocation(size); + if (span != NULL) { + ret = SpanToMallocResult(span); + } + } else +#endif + if (size > kMaxSize) { + // Use page-level allocator + SpinLockHolder h(&pageheap_lock); + Span* span = pageheap->New(pages(size)); + if (span != NULL) { + ret = SpanToMallocResult(span); + } + } else { + // The common case, and also the simplest. This just pops the + // size-appropriate freelist, afer replenishing it if it's empty. + ret = CheckedMallocResult(heap->Allocate(size)); + } + if (!ret) { +#ifdef WTF_CHANGES + if (crashOnFailure) // This branch should be optimized out by the compiler. + CRASH(); +#else + errno = ENOMEM; +#endif + } + return ret; +} + +static ALWAYS_INLINE void do_free(void* ptr) { + if (ptr == NULL) return; + ASSERT(pageheap != NULL); // Should not call free() before malloc() + const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; + Span* span = NULL; + size_t cl = pageheap->GetSizeClassIfCached(p); + + if (cl == 0) { + span = pageheap->GetDescriptor(p); + cl = span->sizeclass; + pageheap->CacheSizeClass(p, cl); + } + if (cl != 0) { +#ifndef NO_TCMALLOC_SAMPLES + ASSERT(!pageheap->GetDescriptor(p)->sample); +#endif + TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCacheIfPresent(); + if (heap != NULL) { + heap->Deallocate(ptr, cl); + } else { + // Delete directly into central cache + SLL_SetNext(ptr, NULL); + central_cache[cl].InsertRange(ptr, ptr, 1); + } + } else { + SpinLockHolder h(&pageheap_lock); + ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0); + ASSERT(span != NULL && span->start == p); +#ifndef NO_TCMALLOC_SAMPLES + if (span->sample) { + DLL_Remove(span); + stacktrace_allocator.Delete(reinterpret_cast<StackTrace*>(span->objects)); + span->objects = NULL; + } +#endif + pageheap->Delete(span); + } +} + +#ifndef WTF_CHANGES +// For use by exported routines below that want specific alignments +// +// Note: this code can be slow, and can significantly fragment memory. +// The expectation is that memalign/posix_memalign/valloc/pvalloc will +// not be invoked very often. This requirement simplifies our +// implementation and allows us to tune for expected allocation +// patterns. +static void* do_memalign(size_t align, size_t size) { + ASSERT((align & (align - 1)) == 0); + ASSERT(align > 0); + if (pageheap == NULL) TCMalloc_ThreadCache::InitModule(); + + // Allocate at least one byte to avoid boundary conditions below + if (size == 0) size = 1; + + if (size <= kMaxSize && align < kPageSize) { + // Search through acceptable size classes looking for one with + // enough alignment. This depends on the fact that + // InitSizeClasses() currently produces several size classes that + // are aligned at powers of two. We will waste time and space if + // we miss in the size class array, but that is deemed acceptable + // since memalign() should be used rarely. + size_t cl = SizeClass(size); + while (cl < kNumClasses && ((class_to_size[cl] & (align - 1)) != 0)) { + cl++; + } + if (cl < kNumClasses) { + TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache(); + return CheckedMallocResult(heap->Allocate(class_to_size[cl])); + } + } + + // We will allocate directly from the page heap + SpinLockHolder h(&pageheap_lock); + + if (align <= kPageSize) { + // Any page-level allocation will be fine + // TODO: We could put the rest of this page in the appropriate + // TODO: cache but it does not seem worth it. + Span* span = pageheap->New(pages(size)); + return span == NULL ? NULL : SpanToMallocResult(span); + } + + // Allocate extra pages and carve off an aligned portion + const Length alloc = pages(size + align); + Span* span = pageheap->New(alloc); + if (span == NULL) return NULL; + + // Skip starting portion so that we end up aligned + Length skip = 0; + while ((((span->start+skip) << kPageShift) & (align - 1)) != 0) { + skip++; + } + ASSERT(skip < alloc); + if (skip > 0) { + Span* rest = pageheap->Split(span, skip); + pageheap->Delete(span); + span = rest; + } + + // Skip trailing portion that we do not need to return + const Length needed = pages(size); + ASSERT(span->length >= needed); + if (span->length > needed) { + Span* trailer = pageheap->Split(span, needed); + pageheap->Delete(trailer); + } + return SpanToMallocResult(span); +} +#endif + +// Helpers for use by exported routines below: + +#ifndef WTF_CHANGES +static inline void do_malloc_stats() { + PrintStats(1); +} +#endif + +static inline int do_mallopt(int, int) { + return 1; // Indicates error +} + +#ifdef HAVE_STRUCT_MALLINFO // mallinfo isn't defined on freebsd, for instance +static inline struct mallinfo do_mallinfo() { + TCMallocStats stats; + ExtractStats(&stats, NULL); + + // Just some of the fields are filled in. + struct mallinfo info; + memset(&info, 0, sizeof(info)); + + // Unfortunately, the struct contains "int" field, so some of the + // size values will be truncated. + info.arena = static_cast<int>(stats.system_bytes); + info.fsmblks = static_cast<int>(stats.thread_bytes + + stats.central_bytes + + stats.transfer_bytes); + info.fordblks = static_cast<int>(stats.pageheap_bytes); + info.uordblks = static_cast<int>(stats.system_bytes + - stats.thread_bytes + - stats.central_bytes + - stats.transfer_bytes + - stats.pageheap_bytes); + + return info; +} +#endif + +//------------------------------------------------------------------- +// Exported routines +//------------------------------------------------------------------- + +// CAVEAT: The code structure below ensures that MallocHook methods are always +// called from the stack frame of the invoked allocation function. +// heap-checker.cc depends on this to start a stack trace from +// the call to the (de)allocation function. + +#ifndef WTF_CHANGES +extern "C" +#else +#define do_malloc do_malloc<crashOnFailure> + +template <bool crashOnFailure> +void* malloc(size_t); + +void* fastMalloc(size_t size) +{ + return malloc<true>(size); +} + +void* tryFastMalloc(size_t size) +{ + return malloc<false>(size); +} + +template <bool crashOnFailure> +ALWAYS_INLINE +#endif +void* malloc(size_t size) { + void* result = do_malloc(size); +#ifndef WTF_CHANGES + MallocHook::InvokeNewHook(result, size); +#endif + return result; +} + +#ifndef WTF_CHANGES +extern "C" +#endif +void free(void* ptr) { +#ifndef WTF_CHANGES + MallocHook::InvokeDeleteHook(ptr); +#endif + do_free(ptr); +} + +#ifndef WTF_CHANGES +extern "C" +#else +template <bool crashOnFailure> +void* calloc(size_t, size_t); + +void* fastCalloc(size_t n, size_t elem_size) +{ + return calloc<true>(n, elem_size); +} + +void* tryFastCalloc(size_t n, size_t elem_size) +{ + return calloc<false>(n, elem_size); +} + +template <bool crashOnFailure> +ALWAYS_INLINE +#endif +void* calloc(size_t n, size_t elem_size) { + const size_t totalBytes = n * elem_size; + + // Protect against overflow + if (n > 1 && elem_size && (totalBytes / elem_size) != n) + return 0; + + void* result = do_malloc(totalBytes); + if (result != NULL) { + memset(result, 0, totalBytes); + } +#ifndef WTF_CHANGES + MallocHook::InvokeNewHook(result, totalBytes); +#endif + return result; +} + +#ifndef WTF_CHANGES +extern "C" +#endif +void cfree(void* ptr) { +#ifndef WTF_CHANGES + MallocHook::InvokeDeleteHook(ptr); +#endif + do_free(ptr); +} + +#ifndef WTF_CHANGES +extern "C" +#else +template <bool crashOnFailure> +void* realloc(void*, size_t); + +void* fastRealloc(void* old_ptr, size_t new_size) +{ + return realloc<true>(old_ptr, new_size); +} + +void* tryFastRealloc(void* old_ptr, size_t new_size) +{ + return realloc<false>(old_ptr, new_size); +} + +template <bool crashOnFailure> +ALWAYS_INLINE +#endif +void* realloc(void* old_ptr, size_t new_size) { + if (old_ptr == NULL) { + void* result = do_malloc(new_size); +#ifndef WTF_CHANGES + MallocHook::InvokeNewHook(result, new_size); +#endif + return result; + } + if (new_size == 0) { +#ifndef WTF_CHANGES + MallocHook::InvokeDeleteHook(old_ptr); +#endif + free(old_ptr); + return NULL; + } + + // Get the size of the old entry + const PageID p = reinterpret_cast<uintptr_t>(old_ptr) >> kPageShift; + size_t cl = pageheap->GetSizeClassIfCached(p); + Span *span = NULL; + size_t old_size; + if (cl == 0) { + span = pageheap->GetDescriptor(p); + cl = span->sizeclass; + pageheap->CacheSizeClass(p, cl); + } + if (cl != 0) { + old_size = ByteSizeForClass(cl); + } else { + ASSERT(span != NULL); + old_size = span->length << kPageShift; + } + + // Reallocate if the new size is larger than the old size, + // or if the new size is significantly smaller than the old size. + if ((new_size > old_size) || (AllocationSize(new_size) < old_size)) { + // Need to reallocate + void* new_ptr = do_malloc(new_size); + if (new_ptr == NULL) { + return NULL; + } +#ifndef WTF_CHANGES + MallocHook::InvokeNewHook(new_ptr, new_size); +#endif + memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size)); +#ifndef WTF_CHANGES + MallocHook::InvokeDeleteHook(old_ptr); +#endif + // We could use a variant of do_free() that leverages the fact + // that we already know the sizeclass of old_ptr. The benefit + // would be small, so don't bother. + do_free(old_ptr); + return new_ptr; + } else { + return old_ptr; + } +} + +#ifdef WTF_CHANGES +#undef do_malloc +#else + +static SpinLock set_new_handler_lock = SPINLOCK_INITIALIZER; + +static inline void* cpp_alloc(size_t size, bool nothrow) { + for (;;) { + void* p = do_malloc(size); +#ifdef PREANSINEW + return p; +#else + if (p == NULL) { // allocation failed + // Get the current new handler. NB: this function is not + // thread-safe. We make a feeble stab at making it so here, but + // this lock only protects against tcmalloc interfering with + // itself, not with other libraries calling set_new_handler. + std::new_handler nh; + { + SpinLockHolder h(&set_new_handler_lock); + nh = std::set_new_handler(0); + (void) std::set_new_handler(nh); + } + // If no new_handler is established, the allocation failed. + if (!nh) { + if (nothrow) return 0; + throw std::bad_alloc(); + } + // Otherwise, try the new_handler. If it returns, retry the + // allocation. If it throws std::bad_alloc, fail the allocation. + // if it throws something else, don't interfere. + try { + (*nh)(); + } catch (const std::bad_alloc&) { + if (!nothrow) throw; + return p; + } + } else { // allocation success + return p; + } +#endif + } +} + +void* operator new(size_t size) { + void* p = cpp_alloc(size, false); + // We keep this next instruction out of cpp_alloc for a reason: when + // it's in, and new just calls cpp_alloc, the optimizer may fold the + // new call into cpp_alloc, which messes up our whole section-based + // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc + // isn't the last thing this fn calls, and prevents the folding. + MallocHook::InvokeNewHook(p, size); + return p; +} + +void* operator new(size_t size, const std::nothrow_t&) __THROW { + void* p = cpp_alloc(size, true); + MallocHook::InvokeNewHook(p, size); + return p; +} + +void operator delete(void* p) __THROW { + MallocHook::InvokeDeleteHook(p); + do_free(p); +} + +void operator delete(void* p, const std::nothrow_t&) __THROW { + MallocHook::InvokeDeleteHook(p); + do_free(p); +} + +void* operator new[](size_t size) { + void* p = cpp_alloc(size, false); + // We keep this next instruction out of cpp_alloc for a reason: when + // it's in, and new just calls cpp_alloc, the optimizer may fold the + // new call into cpp_alloc, which messes up our whole section-based + // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc + // isn't the last thing this fn calls, and prevents the folding. + MallocHook::InvokeNewHook(p, size); + return p; +} + +void* operator new[](size_t size, const std::nothrow_t&) __THROW { + void* p = cpp_alloc(size, true); + MallocHook::InvokeNewHook(p, size); + return p; +} + +void operator delete[](void* p) __THROW { + MallocHook::InvokeDeleteHook(p); + do_free(p); +} + +void operator delete[](void* p, const std::nothrow_t&) __THROW { + MallocHook::InvokeDeleteHook(p); + do_free(p); +} + +extern "C" void* memalign(size_t align, size_t size) __THROW { + void* result = do_memalign(align, size); + MallocHook::InvokeNewHook(result, size); + return result; +} + +extern "C" int posix_memalign(void** result_ptr, size_t align, size_t size) + __THROW { + if (((align % sizeof(void*)) != 0) || + ((align & (align - 1)) != 0) || + (align == 0)) { + return EINVAL; + } + + void* result = do_memalign(align, size); + MallocHook::InvokeNewHook(result, size); + if (result == NULL) { + return ENOMEM; + } else { + *result_ptr = result; + return 0; + } +} + +static size_t pagesize = 0; + +extern "C" void* valloc(size_t size) __THROW { + // Allocate page-aligned object of length >= size bytes + if (pagesize == 0) pagesize = getpagesize(); + void* result = do_memalign(pagesize, size); + MallocHook::InvokeNewHook(result, size); + return result; +} + +extern "C" void* pvalloc(size_t size) __THROW { + // Round up size to a multiple of pagesize + if (pagesize == 0) pagesize = getpagesize(); + size = (size + pagesize - 1) & ~(pagesize - 1); + void* result = do_memalign(pagesize, size); + MallocHook::InvokeNewHook(result, size); + return result; +} + +extern "C" void malloc_stats(void) { + do_malloc_stats(); +} + +extern "C" int mallopt(int cmd, int value) { + return do_mallopt(cmd, value); +} + +#ifdef HAVE_STRUCT_MALLINFO +extern "C" struct mallinfo mallinfo(void) { + return do_mallinfo(); +} +#endif + +//------------------------------------------------------------------- +// Some library routines on RedHat 9 allocate memory using malloc() +// and free it using __libc_free() (or vice-versa). Since we provide +// our own implementations of malloc/free, we need to make sure that +// the __libc_XXX variants (defined as part of glibc) also point to +// the same implementations. +//------------------------------------------------------------------- + +#if defined(__GLIBC__) +extern "C" { +#if COMPILER(GCC) && !defined(__MACH__) && defined(HAVE___ATTRIBUTE__) + // Potentially faster variants that use the gcc alias extension. + // Mach-O (Darwin) does not support weak aliases, hence the __MACH__ check. +# define ALIAS(x) __attribute__ ((weak, alias (x))) + void* __libc_malloc(size_t size) ALIAS("malloc"); + void __libc_free(void* ptr) ALIAS("free"); + void* __libc_realloc(void* ptr, size_t size) ALIAS("realloc"); + void* __libc_calloc(size_t n, size_t size) ALIAS("calloc"); + void __libc_cfree(void* ptr) ALIAS("cfree"); + void* __libc_memalign(size_t align, size_t s) ALIAS("memalign"); + void* __libc_valloc(size_t size) ALIAS("valloc"); + void* __libc_pvalloc(size_t size) ALIAS("pvalloc"); + int __posix_memalign(void** r, size_t a, size_t s) ALIAS("posix_memalign"); +# undef ALIAS +# else /* not __GNUC__ */ + // Portable wrappers + void* __libc_malloc(size_t size) { return malloc(size); } + void __libc_free(void* ptr) { free(ptr); } + void* __libc_realloc(void* ptr, size_t size) { return realloc(ptr, size); } + void* __libc_calloc(size_t n, size_t size) { return calloc(n, size); } + void __libc_cfree(void* ptr) { cfree(ptr); } + void* __libc_memalign(size_t align, size_t s) { return memalign(align, s); } + void* __libc_valloc(size_t size) { return valloc(size); } + void* __libc_pvalloc(size_t size) { return pvalloc(size); } + int __posix_memalign(void** r, size_t a, size_t s) { + return posix_memalign(r, a, s); + } +# endif /* __GNUC__ */ +} +#endif /* __GLIBC__ */ + +// Override __libc_memalign in libc on linux boxes specially. +// They have a bug in libc that causes them to (very rarely) allocate +// with __libc_memalign() yet deallocate with free() and the +// definitions above don't catch it. +// This function is an exception to the rule of calling MallocHook method +// from the stack frame of the allocation function; +// heap-checker handles this special case explicitly. +static void *MemalignOverride(size_t align, size_t size, const void *caller) + __THROW { + void* result = do_memalign(align, size); + MallocHook::InvokeNewHook(result, size); + return result; +} +void *(*__memalign_hook)(size_t, size_t, const void *) = MemalignOverride; + +#endif + +#if defined(WTF_CHANGES) && PLATFORM(DARWIN) + +class FreeObjectFinder { + const RemoteMemoryReader& m_reader; + HashSet<void*> m_freeObjects; + +public: + FreeObjectFinder(const RemoteMemoryReader& reader) : m_reader(reader) { } + + void visit(void* ptr) { m_freeObjects.add(ptr); } + bool isFreeObject(void* ptr) const { return m_freeObjects.contains(ptr); } + size_t freeObjectCount() const { return m_freeObjects.size(); } + + void findFreeObjects(TCMalloc_ThreadCache* threadCache) + { + for (; threadCache; threadCache = (threadCache->next_ ? m_reader(threadCache->next_) : 0)) + threadCache->enumerateFreeObjects(*this, m_reader); + } + + void findFreeObjects(TCMalloc_Central_FreeListPadded* centralFreeList, size_t numSizes, TCMalloc_Central_FreeListPadded* remoteCentralFreeList) + { + for (unsigned i = 0; i < numSizes; i++) + centralFreeList[i].enumerateFreeObjects(*this, m_reader, remoteCentralFreeList + i); + } +}; + +class PageMapFreeObjectFinder { + const RemoteMemoryReader& m_reader; + FreeObjectFinder& m_freeObjectFinder; + +public: + PageMapFreeObjectFinder(const RemoteMemoryReader& reader, FreeObjectFinder& freeObjectFinder) + : m_reader(reader) + , m_freeObjectFinder(freeObjectFinder) + { } + + int visit(void* ptr) const + { + if (!ptr) + return 1; + + Span* span = m_reader(reinterpret_cast<Span*>(ptr)); + if (span->free) { + void* ptr = reinterpret_cast<void*>(span->start << kPageShift); + m_freeObjectFinder.visit(ptr); + } else if (span->sizeclass) { + // Walk the free list of the small-object span, keeping track of each object seen + for (void* nextObject = span->objects; nextObject; nextObject = *m_reader(reinterpret_cast<void**>(nextObject))) + m_freeObjectFinder.visit(nextObject); + } + return span->length; + } +}; + +class PageMapMemoryUsageRecorder { + task_t m_task; + void* m_context; + unsigned m_typeMask; + vm_range_recorder_t* m_recorder; + const RemoteMemoryReader& m_reader; + const FreeObjectFinder& m_freeObjectFinder; + mutable HashSet<void*> m_seenPointers; + +public: + PageMapMemoryUsageRecorder(task_t task, void* context, unsigned typeMask, vm_range_recorder_t* recorder, const RemoteMemoryReader& reader, const FreeObjectFinder& freeObjectFinder) + : m_task(task) + , m_context(context) + , m_typeMask(typeMask) + , m_recorder(recorder) + , m_reader(reader) + , m_freeObjectFinder(freeObjectFinder) + { } + + int visit(void* ptr) const + { + if (!ptr) + return 1; + + Span* span = m_reader(reinterpret_cast<Span*>(ptr)); + if (m_seenPointers.contains(ptr)) + return span->length; + m_seenPointers.add(ptr); + + // Mark the memory used for the Span itself as an administrative region + vm_range_t ptrRange = { reinterpret_cast<vm_address_t>(ptr), sizeof(Span) }; + if (m_typeMask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE)) + (*m_recorder)(m_task, m_context, MALLOC_ADMIN_REGION_RANGE_TYPE, &ptrRange, 1); + + ptrRange.address = span->start << kPageShift; + ptrRange.size = span->length * kPageSize; + + // Mark the memory region the span represents as candidates for containing pointers + if (m_typeMask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE)) + (*m_recorder)(m_task, m_context, MALLOC_PTR_REGION_RANGE_TYPE, &ptrRange, 1); + + if (!span->free && (m_typeMask & MALLOC_PTR_IN_USE_RANGE_TYPE)) { + // If it's an allocated large object span, mark it as in use + if (span->sizeclass == 0 && !m_freeObjectFinder.isFreeObject(reinterpret_cast<void*>(ptrRange.address))) + (*m_recorder)(m_task, m_context, MALLOC_PTR_IN_USE_RANGE_TYPE, &ptrRange, 1); + else if (span->sizeclass) { + const size_t byteSize = ByteSizeForClass(span->sizeclass); + unsigned totalObjects = (span->length << kPageShift) / byteSize; + ASSERT(span->refcount <= totalObjects); + char* ptr = reinterpret_cast<char*>(span->start << kPageShift); + + // Mark each allocated small object within the span as in use + for (unsigned i = 0; i < totalObjects; i++) { + char* thisObject = ptr + (i * byteSize); + if (m_freeObjectFinder.isFreeObject(thisObject)) + continue; + + vm_range_t objectRange = { reinterpret_cast<vm_address_t>(thisObject), byteSize }; + (*m_recorder)(m_task, m_context, MALLOC_PTR_IN_USE_RANGE_TYPE, &objectRange, 1); + } + } + } + + return span->length; + } +}; + +kern_return_t FastMallocZone::enumerate(task_t task, void* context, unsigned typeMask, vm_address_t zoneAddress, memory_reader_t reader, vm_range_recorder_t recorder) +{ + RemoteMemoryReader memoryReader(task, reader); + + InitSizeClasses(); + + FastMallocZone* mzone = memoryReader(reinterpret_cast<FastMallocZone*>(zoneAddress)); + TCMalloc_PageHeap* pageHeap = memoryReader(mzone->m_pageHeap); + TCMalloc_ThreadCache** threadHeapsPointer = memoryReader(mzone->m_threadHeaps); + TCMalloc_ThreadCache* threadHeaps = memoryReader(*threadHeapsPointer); + + TCMalloc_Central_FreeListPadded* centralCaches = memoryReader(mzone->m_centralCaches, sizeof(TCMalloc_Central_FreeListPadded) * kNumClasses); + + FreeObjectFinder finder(memoryReader); + finder.findFreeObjects(threadHeaps); + finder.findFreeObjects(centralCaches, kNumClasses, mzone->m_centralCaches); + + TCMalloc_PageHeap::PageMap* pageMap = &pageHeap->pagemap_; + PageMapFreeObjectFinder pageMapFinder(memoryReader, finder); + pageMap->visit(pageMapFinder, memoryReader); + + PageMapMemoryUsageRecorder usageRecorder(task, context, typeMask, recorder, memoryReader, finder); + pageMap->visit(usageRecorder, memoryReader); + + return 0; +} + +size_t FastMallocZone::size(malloc_zone_t*, const void*) +{ + return 0; +} + +void* FastMallocZone::zoneMalloc(malloc_zone_t*, size_t) +{ + return 0; +} + +void* FastMallocZone::zoneCalloc(malloc_zone_t*, size_t, size_t) +{ + return 0; +} + +void FastMallocZone::zoneFree(malloc_zone_t*, void* ptr) +{ + // Due to <rdar://problem/5671357> zoneFree may be called by the system free even if the pointer + // is not in this zone. When this happens, the pointer being freed was not allocated by any + // zone so we need to print a useful error for the application developer. + malloc_printf("*** error for object %p: pointer being freed was not allocated\n", ptr); +} + +void* FastMallocZone::zoneRealloc(malloc_zone_t*, void*, size_t) +{ + return 0; +} + + +#undef malloc +#undef free +#undef realloc +#undef calloc + +extern "C" { +malloc_introspection_t jscore_fastmalloc_introspection = { &FastMallocZone::enumerate, &FastMallocZone::goodSize, &FastMallocZone::check, &FastMallocZone::print, + &FastMallocZone::log, &FastMallocZone::forceLock, &FastMallocZone::forceUnlock, &FastMallocZone::statistics }; +} + +FastMallocZone::FastMallocZone(TCMalloc_PageHeap* pageHeap, TCMalloc_ThreadCache** threadHeaps, TCMalloc_Central_FreeListPadded* centralCaches) + : m_pageHeap(pageHeap) + , m_threadHeaps(threadHeaps) + , m_centralCaches(centralCaches) +{ + memset(&m_zone, 0, sizeof(m_zone)); + m_zone.zone_name = "JavaScriptCore FastMalloc"; + m_zone.size = &FastMallocZone::size; + m_zone.malloc = &FastMallocZone::zoneMalloc; + m_zone.calloc = &FastMallocZone::zoneCalloc; + m_zone.realloc = &FastMallocZone::zoneRealloc; + m_zone.free = &FastMallocZone::zoneFree; + m_zone.valloc = &FastMallocZone::zoneValloc; + m_zone.destroy = &FastMallocZone::zoneDestroy; + m_zone.introspect = &jscore_fastmalloc_introspection; + malloc_zone_register(&m_zone); +} + + +void FastMallocZone::init() +{ + static FastMallocZone zone(pageheap, &thread_heaps, static_cast<TCMalloc_Central_FreeListPadded*>(central_cache)); +} + +#endif + +#if WTF_CHANGES +void releaseFastMallocFreeMemory() +{ + // Flush free pages in the current thread cache back to the page heap. + // Low watermark mechanism in Scavenge() prevents full return on the first pass. + // The second pass flushes everything. + if (TCMalloc_ThreadCache* threadCache = TCMalloc_ThreadCache::GetCacheIfPresent()) { + threadCache->Scavenge(); + threadCache->Scavenge(); + } + + SpinLockHolder h(&pageheap_lock); + pageheap->ReleaseFreePages(); +} + +FastMallocStatistics fastMallocStatistics() +{ + FastMallocStatistics statistics; + { + SpinLockHolder lockHolder(&pageheap_lock); + statistics.heapSize = static_cast<size_t>(pageheap->SystemBytes()); + statistics.freeSizeInHeap = static_cast<size_t>(pageheap->FreeBytes()); + statistics.returnedSize = pageheap->ReturnedBytes(); + statistics.freeSizeInCaches = 0; + for (TCMalloc_ThreadCache* threadCache = thread_heaps; threadCache ; threadCache = threadCache->next_) + statistics.freeSizeInCaches += threadCache->Size(); + } + for (unsigned cl = 0; cl < kNumClasses; ++cl) { + const int length = central_cache[cl].length(); + const int tc_length = central_cache[cl].tc_length(); + statistics.freeSizeInCaches += ByteSizeForClass(cl) * (length + tc_length); + } + return statistics; +} + +} // namespace WTF +#endif + +#endif // FORCE_SYSTEM_MALLOC diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/FastMalloc.h b/src/3rdparty/webkit/JavaScriptCore/wtf/FastMalloc.h new file mode 100644 index 0000000..f1264ac --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/FastMalloc.h @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_FastMalloc_h +#define WTF_FastMalloc_h + +#include "Platform.h" +#include <stdlib.h> +#include <new> + +namespace WTF { + + // These functions call CRASH() if an allocation fails. + void* fastMalloc(size_t n); + void* fastZeroedMalloc(size_t n); + void* fastCalloc(size_t n_elements, size_t element_size); + void* fastRealloc(void* p, size_t n); + + // These functions return NULL if an allocation fails. + void* tryFastMalloc(size_t n); + void* tryFastZeroedMalloc(size_t n); + void* tryFastCalloc(size_t n_elements, size_t element_size); + void* tryFastRealloc(void* p, size_t n); + + void fastFree(void* p); + +#ifndef NDEBUG + void fastMallocForbid(); + void fastMallocAllow(); +#endif + + void releaseFastMallocFreeMemory(); + + struct FastMallocStatistics { + size_t heapSize; + size_t freeSizeInHeap; + size_t freeSizeInCaches; + size_t returnedSize; + }; + FastMallocStatistics fastMallocStatistics(); + +} // namespace WTF + +using WTF::fastMalloc; +using WTF::fastZeroedMalloc; +using WTF::fastCalloc; +using WTF::fastRealloc; +using WTF::tryFastMalloc; +using WTF::tryFastZeroedMalloc; +using WTF::tryFastCalloc; +using WTF::tryFastRealloc; +using WTF::fastFree; + +#ifndef NDEBUG +using WTF::fastMallocForbid; +using WTF::fastMallocAllow; +#endif + +#if COMPILER(GCC) && PLATFORM(DARWIN) +#define WTF_PRIVATE_INLINE __private_extern__ inline __attribute__((always_inline)) +#elif COMPILER(GCC) +#define WTF_PRIVATE_INLINE inline __attribute__((always_inline)) +#elif COMPILER(MSVC) +#define WTF_PRIVATE_INLINE __forceinline +#else +#define WTF_PRIVATE_INLINE inline +#endif + +#ifndef _CRTDBG_MAP_ALLOC + +#if !defined(USE_SYSTEM_MALLOC) || !(USE_SYSTEM_MALLOC) +WTF_PRIVATE_INLINE void* operator new(size_t s) { return fastMalloc(s); } +WTF_PRIVATE_INLINE void operator delete(void* p) { fastFree(p); } +WTF_PRIVATE_INLINE void* operator new[](size_t s) { return fastMalloc(s); } +WTF_PRIVATE_INLINE void operator delete[](void* p) { fastFree(p); } +#endif + +#endif // _CRTDBG_MAP_ALLOC + +#endif /* WTF_FastMalloc_h */ diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/Forward.h b/src/3rdparty/webkit/JavaScriptCore/wtf/Forward.h new file mode 100644 index 0000000..67dc3be --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/Forward.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2006 Apple Computer, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_Forward_h +#define WTF_Forward_h + +#include <stddef.h> + +namespace WTF { + template<typename T> class ListRefPtr; + template<typename T> class OwnArrayPtr; + template<typename T> class OwnPtr; + template<typename T> class PassRefPtr; + template<typename T> class RefPtr; + template<typename T, size_t inlineCapacity> class Vector; +} + +using WTF::ListRefPtr; +using WTF::OwnArrayPtr; +using WTF::OwnPtr; +using WTF::PassRefPtr; +using WTF::RefPtr; +using WTF::Vector; + +#endif // WTF_Forward_h + diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/GOwnPtr.cpp b/src/3rdparty/webkit/JavaScriptCore/wtf/GOwnPtr.cpp new file mode 100644 index 0000000..58869f4 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/GOwnPtr.cpp @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2008 Collabora Ltd. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "config.h" +#include "GOwnPtr.h" + +namespace WTF { + +template <> void freeOwnedGPtr<GError>(GError* ptr) +{ + if (ptr) + g_error_free(ptr); +} + +template <> void freeOwnedGPtr<GList>(GList* ptr) +{ + g_list_free(ptr); +} + +template <> void freeOwnedGPtr<GCond>(GCond* ptr) +{ + if (ptr) + g_cond_free(ptr); +} + +template <> void freeOwnedGPtr<GMutex>(GMutex* ptr) +{ + if (ptr) + g_mutex_free(ptr); +} + +template <> void freeOwnedGPtr<GPatternSpec>(GPatternSpec* ptr) +{ + if (ptr) + g_pattern_spec_free(ptr); +} + +template <> void freeOwnedGPtr<GDir>(GDir* ptr) +{ + if (ptr) + g_dir_close(ptr); +} + +} // namespace WTF diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/GOwnPtr.h b/src/3rdparty/webkit/JavaScriptCore/wtf/GOwnPtr.h new file mode 100644 index 0000000..bbb793a --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/GOwnPtr.h @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2006, 2007 Apple Inc. All rights reserved. + * Copyright (C) 2008 Collabora Ltd. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef GOwnPtr_h +#define GOwnPtr_h + +#include <algorithm> +#include <glib.h> +#include <wtf/Assertions.h> +#include <wtf/Noncopyable.h> + +namespace WTF { + template <typename T> inline void freeOwnedGPtr(T* ptr) { g_free(reinterpret_cast<void*>(ptr)); } + template<> void freeOwnedGPtr<GError>(GError*); + template<> void freeOwnedGPtr<GList>(GList*); + template<> void freeOwnedGPtr<GCond>(GCond*); + template<> void freeOwnedGPtr<GMutex>(GMutex*); + template<> void freeOwnedGPtr<GPatternSpec>(GPatternSpec*); + template<> void freeOwnedGPtr<GDir>(GDir*); + + template <typename T> class GOwnPtr : Noncopyable { + public: + explicit GOwnPtr(T* ptr = 0) : m_ptr(ptr) { } + ~GOwnPtr() { freeOwnedGPtr(m_ptr); } + + T* get() const { return m_ptr; } + T* release() { T* ptr = m_ptr; m_ptr = 0; return ptr; } + T*& outPtr() { ASSERT(!m_ptr); return m_ptr; } + + void set(T* ptr) { ASSERT(!ptr || m_ptr != ptr); freeOwnedGPtr(m_ptr); m_ptr = ptr; } + void clear() { freeOwnedGPtr(m_ptr); m_ptr = 0; } + + T& operator*() const { ASSERT(m_ptr); return *m_ptr; } + T* operator->() const { ASSERT(m_ptr); return m_ptr; } + + bool operator!() const { return !m_ptr; } + + // This conversion operator allows implicit conversion to bool but not to other integer types. + typedef T* GOwnPtr::*UnspecifiedBoolType; + operator UnspecifiedBoolType() const { return m_ptr ? &GOwnPtr::m_ptr : 0; } + + void swap(GOwnPtr& o) { std::swap(m_ptr, o.m_ptr); } + + private: + T* m_ptr; + }; + + template <typename T> inline void swap(GOwnPtr<T>& a, GOwnPtr<T>& b) { a.swap(b); } + + template <typename T, typename U> inline bool operator==(const GOwnPtr<T>& a, U* b) + { + return a.get() == b; + } + + template <typename T, typename U> inline bool operator==(T* a, const GOwnPtr<U>& b) + { + return a == b.get(); + } + + template <typename T, typename U> inline bool operator!=(const GOwnPtr<T>& a, U* b) + { + return a.get() != b; + } + + template <typename T, typename U> inline bool operator!=(T* a, const GOwnPtr<U>& b) + { + return a != b.get(); + } + + template <typename T> inline typename GOwnPtr<T>::PtrType getPtr(const GOwnPtr<T>& p) + { + return p.get(); + } + +} // namespace WTF + +using WTF::GOwnPtr; + +#endif // GOwnPtr_h diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/GetPtr.h b/src/3rdparty/webkit/JavaScriptCore/wtf/GetPtr.h new file mode 100644 index 0000000..25a0e6d --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/GetPtr.h @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2006 Apple Computer, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_GetPtr_h +#define WTF_GetPtr_h + +namespace WTF { + + template <typename T> inline T* getPtr(T* p) + { + return p; + } + +} // namespace WTF + +#endif // WTF_GetPtr_h diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/HashCountedSet.h b/src/3rdparty/webkit/JavaScriptCore/wtf/HashCountedSet.h new file mode 100644 index 0000000..6fc0234 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/HashCountedSet.h @@ -0,0 +1,204 @@ +/* + * Copyright (C) 2005, 2006, 2008 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_HashCountedSet_h +#define WTF_HashCountedSet_h + +#include "Assertions.h" +#include "HashMap.h" +#include "Vector.h" + +namespace WTF { + + template<typename Value, typename HashFunctions = typename DefaultHash<Value>::Hash, + typename Traits = HashTraits<Value> > class HashCountedSet { + private: + typedef HashMap<Value, unsigned, HashFunctions, Traits> ImplType; + public: + typedef Value ValueType; + typedef typename ImplType::iterator iterator; + typedef typename ImplType::const_iterator const_iterator; + + HashCountedSet() {} + + int size() const; + int capacity() const; + bool isEmpty() const; + + // iterators iterate over pairs of values and counts + iterator begin(); + iterator end(); + const_iterator begin() const; + const_iterator end() const; + + iterator find(const ValueType& value); + const_iterator find(const ValueType& value) const; + bool contains(const ValueType& value) const; + unsigned count(const ValueType& value) const; + + // increases the count if an equal value is already present + // the return value is a pair of an interator to the new value's location, + // and a bool that is true if an new entry was added + std::pair<iterator, bool> add(const ValueType &value); + + // reduces the count of the value, and removes it if count + // goes down to zero + void remove(const ValueType& value); + void remove(iterator it); + + void clear(); + + private: + ImplType m_impl; + }; + + template<typename Value, typename HashFunctions, typename Traits> + inline int HashCountedSet<Value, HashFunctions, Traits>::size() const + { + return m_impl.size(); + } + + template<typename Value, typename HashFunctions, typename Traits> + inline int HashCountedSet<Value, HashFunctions, Traits>::capacity() const + { + return m_impl.capacity(); + } + + template<typename Value, typename HashFunctions, typename Traits> + inline bool HashCountedSet<Value, HashFunctions, Traits>::isEmpty() const + { + return size() == 0; + } + + template<typename Value, typename HashFunctions, typename Traits> + inline typename HashCountedSet<Value, HashFunctions, Traits>::iterator HashCountedSet<Value, HashFunctions, Traits>::begin() + { + return m_impl.begin(); + } + + template<typename Value, typename HashFunctions, typename Traits> + inline typename HashCountedSet<Value, HashFunctions, Traits>::iterator HashCountedSet<Value, HashFunctions, Traits>::end() + { + return m_impl.end(); + } + + template<typename Value, typename HashFunctions, typename Traits> + inline typename HashCountedSet<Value, HashFunctions, Traits>::const_iterator HashCountedSet<Value, HashFunctions, Traits>::begin() const + { + return m_impl.begin(); + } + + template<typename Value, typename HashFunctions, typename Traits> + inline typename HashCountedSet<Value, HashFunctions, Traits>::const_iterator HashCountedSet<Value, HashFunctions, Traits>::end() const + { + return m_impl.end(); + } + + template<typename Value, typename HashFunctions, typename Traits> + inline typename HashCountedSet<Value, HashFunctions, Traits>::iterator HashCountedSet<Value, HashFunctions, Traits>::find(const ValueType& value) + { + return m_impl.find(value); + } + + template<typename Value, typename HashFunctions, typename Traits> + inline typename HashCountedSet<Value, HashFunctions, Traits>::const_iterator HashCountedSet<Value, HashFunctions, Traits>::find(const ValueType& value) const + { + return m_impl.find(value); + } + + template<typename Value, typename HashFunctions, typename Traits> + inline bool HashCountedSet<Value, HashFunctions, Traits>::contains(const ValueType& value) const + { + return m_impl.contains(value); + } + + template<typename Value, typename HashFunctions, typename Traits> + inline unsigned HashCountedSet<Value, HashFunctions, Traits>::count(const ValueType& value) const + { + return m_impl.get(value); + } + + template<typename Value, typename HashFunctions, typename Traits> + inline std::pair<typename HashCountedSet<Value, HashFunctions, Traits>::iterator, bool> HashCountedSet<Value, HashFunctions, Traits>::add(const ValueType &value) + { + pair<iterator, bool> result = m_impl.add(value, 0); + ++result.first->second; + return result; + } + + template<typename Value, typename HashFunctions, typename Traits> + inline void HashCountedSet<Value, HashFunctions, Traits>::remove(const ValueType& value) + { + remove(find(value)); + } + + template<typename Value, typename HashFunctions, typename Traits> + inline void HashCountedSet<Value, HashFunctions, Traits>::remove(iterator it) + { + if (it == end()) + return; + + unsigned oldVal = it->second; + ASSERT(oldVal != 0); + unsigned newVal = oldVal - 1; + if (newVal == 0) + m_impl.remove(it); + else + it->second = newVal; + } + + template<typename Value, typename HashFunctions, typename Traits> + inline void HashCountedSet<Value, HashFunctions, Traits>::clear() + { + m_impl.clear(); + } + + template<typename Value, typename HashFunctions, typename Traits, typename VectorType> + inline void copyToVector(const HashCountedSet<Value, HashFunctions, Traits>& collection, VectorType& vector) + { + typedef typename HashCountedSet<Value, HashFunctions, Traits>::const_iterator iterator; + + vector.resize(collection.size()); + + iterator it = collection.begin(); + iterator end = collection.end(); + for (unsigned i = 0; it != end; ++it, ++i) + vector[i] = *it; + } + + template<typename Value, typename HashFunctions, typename Traits> + inline void copyToVector(const HashCountedSet<Value, HashFunctions, Traits>& collection, Vector<Value>& vector) + { + typedef typename HashCountedSet<Value, HashFunctions, Traits>::const_iterator iterator; + + vector.resize(collection.size()); + + iterator it = collection.begin(); + iterator end = collection.end(); + for (unsigned i = 0; it != end; ++it, ++i) + vector[i] = (*it).first; + } + + +} // namespace khtml + +using WTF::HashCountedSet; + +#endif /* WTF_HashCountedSet_h */ diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/HashFunctions.h b/src/3rdparty/webkit/JavaScriptCore/wtf/HashFunctions.h new file mode 100644 index 0000000..13afb72 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/HashFunctions.h @@ -0,0 +1,186 @@ +/* + * Copyright (C) 2005, 2006, 2008 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_HashFunctions_h +#define WTF_HashFunctions_h + +#include "RefPtr.h" +#include <stdint.h> + +namespace WTF { + + template<size_t size> struct IntTypes; + template<> struct IntTypes<1> { typedef int8_t SignedType; typedef uint8_t UnsignedType; }; + template<> struct IntTypes<2> { typedef int16_t SignedType; typedef uint16_t UnsignedType; }; + template<> struct IntTypes<4> { typedef int32_t SignedType; typedef uint32_t UnsignedType; }; + template<> struct IntTypes<8> { typedef int64_t SignedType; typedef uint64_t UnsignedType; }; + + // integer hash function + + // Thomas Wang's 32 Bit Mix Function: http://www.cris.com/~Ttwang/tech/inthash.htm + inline unsigned intHash(uint8_t key8) + { + unsigned key = key8; + key += ~(key << 15); + key ^= (key >> 10); + key += (key << 3); + key ^= (key >> 6); + key += ~(key << 11); + key ^= (key >> 16); + return key; + } + + // Thomas Wang's 32 Bit Mix Function: http://www.cris.com/~Ttwang/tech/inthash.htm + inline unsigned intHash(uint16_t key16) + { + unsigned key = key16; + key += ~(key << 15); + key ^= (key >> 10); + key += (key << 3); + key ^= (key >> 6); + key += ~(key << 11); + key ^= (key >> 16); + return key; + } + + // Thomas Wang's 32 Bit Mix Function: http://www.cris.com/~Ttwang/tech/inthash.htm + inline unsigned intHash(uint32_t key) + { + key += ~(key << 15); + key ^= (key >> 10); + key += (key << 3); + key ^= (key >> 6); + key += ~(key << 11); + key ^= (key >> 16); + return key; + } + + // Thomas Wang's 64 bit Mix Function: http://www.cris.com/~Ttwang/tech/inthash.htm + inline unsigned intHash(uint64_t key) + { + key += ~(key << 32); + key ^= (key >> 22); + key += ~(key << 13); + key ^= (key >> 8); + key += (key << 3); + key ^= (key >> 15); + key += ~(key << 27); + key ^= (key >> 31); + return static_cast<unsigned>(key); + } + + template<typename T> struct IntHash { + static unsigned hash(T key) { return intHash(static_cast<typename IntTypes<sizeof(T)>::UnsignedType>(key)); } + static bool equal(T a, T b) { return a == b; } + static const bool safeToCompareToEmptyOrDeleted = true; + }; + + template<typename T> struct FloatHash { + static unsigned hash(T key) + { + union { + T key; + typename IntTypes<sizeof(T)>::UnsignedType bits; + } u; + u.key = key; + return intHash(u.bits); + } + static bool equal(T a, T b) { return a == b; } + static const bool safeToCompareToEmptyOrDeleted = true; + }; + + // pointer identity hash function + + template<typename T> struct PtrHash { + static unsigned hash(T key) + { +#if COMPILER(MSVC) +#pragma warning(push) +#pragma warning(disable: 4244) // work around what seems to be a bug in MSVC's conversion warnings +#endif + return IntHash<uintptr_t>::hash(reinterpret_cast<uintptr_t>(key)); +#if COMPILER(MSVC) +#pragma warning(pop) +#endif + } + static bool equal(T a, T b) { return a == b; } + static const bool safeToCompareToEmptyOrDeleted = true; + }; + template<typename P> struct PtrHash<RefPtr<P> > : PtrHash<P*> { + using PtrHash<P*>::hash; + static unsigned hash(const RefPtr<P>& key) { return hash(key.get()); } + using PtrHash<P*>::equal; + static bool equal(const RefPtr<P>& a, const RefPtr<P>& b) { return a == b; } + static bool equal(P* a, const RefPtr<P>& b) { return a == b; } + static bool equal(const RefPtr<P>& a, P* b) { return a == b; } + }; + + // default hash function for each type + + template<typename T> struct DefaultHash; + + template<typename T, typename U> struct PairHash { + static unsigned hash(const std::pair<T, U>& p) + { + return intHash((static_cast<uint64_t>(DefaultHash<T>::Hash::hash(p.first)) << 32 | DefaultHash<U>::Hash::hash(p.second))); + } + static bool equal(const std::pair<T, U>& a, const std::pair<T, U>& b) + { + return DefaultHash<T>::Hash::equal(a.first, b.first) && DefaultHash<U>::Hash::equal(a.second, b.second); + } + static const bool safeToCompareToEmptyOrDeleted = DefaultHash<T>::Hash::safeToCompareToEmptyOrDeleted + && DefaultHash<U>::Hash::safeToCompareToEmptyOrDeleted; + }; + + // make IntHash the default hash function for many integer types + + template<> struct DefaultHash<short> { typedef IntHash<unsigned> Hash; }; + template<> struct DefaultHash<unsigned short> { typedef IntHash<unsigned> Hash; }; + template<> struct DefaultHash<int> { typedef IntHash<unsigned> Hash; }; + template<> struct DefaultHash<unsigned> { typedef IntHash<unsigned> Hash; }; + template<> struct DefaultHash<long> { typedef IntHash<unsigned long> Hash; }; + template<> struct DefaultHash<unsigned long> { typedef IntHash<unsigned long> Hash; }; + template<> struct DefaultHash<long long> { typedef IntHash<unsigned long long> Hash; }; + template<> struct DefaultHash<unsigned long long> { typedef IntHash<unsigned long long> Hash; }; + +#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED) + template<> struct DefaultHash<wchar_t> { typedef IntHash<wchar_t> Hash; }; +#endif + + template<> struct DefaultHash<float> { typedef FloatHash<float> Hash; }; + template<> struct DefaultHash<double> { typedef FloatHash<double> Hash; }; + + // make PtrHash the default hash function for pointer types that don't specialize + + template<typename P> struct DefaultHash<P*> { typedef PtrHash<P*> Hash; }; + template<typename P> struct DefaultHash<RefPtr<P> > { typedef PtrHash<RefPtr<P> > Hash; }; + + template<typename T, typename U> struct DefaultHash<std::pair<T, U> > { typedef PairHash<T, U> Hash; }; + + // Golden ratio - arbitrary start value to avoid mapping all 0's to all 0's + static const unsigned stringHashingStartValue = 0x9e3779b9U; + +} // namespace WTF + +using WTF::DefaultHash; +using WTF::IntHash; +using WTF::PtrHash; + +#endif // WTF_HashFunctions_h diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/HashIterators.h b/src/3rdparty/webkit/JavaScriptCore/wtf/HashIterators.h new file mode 100644 index 0000000..682c83b --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/HashIterators.h @@ -0,0 +1,216 @@ +/* + * Copyright (C) 2007 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef WTF_HashIterators_h +#define WTF_HashIterators_h + +namespace WTF { + + template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableConstKeysIterator; + template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableConstValuesIterator; + template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableKeysIterator; + template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableValuesIterator; + + template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableConstIteratorAdapter<HashTableType, std::pair<KeyType, MappedType> > { + private: + typedef std::pair<KeyType, MappedType> ValueType; + public: + typedef HashTableConstKeysIterator<HashTableType, KeyType, MappedType> Keys; + typedef HashTableConstValuesIterator<HashTableType, KeyType, MappedType> Values; + + HashTableConstIteratorAdapter(const typename HashTableType::const_iterator& impl) : m_impl(impl) {} + + const ValueType* get() const { return (const ValueType*)m_impl.get(); } + const ValueType& operator*() const { return *get(); } + const ValueType* operator->() const { return get(); } + + HashTableConstIteratorAdapter& operator++() { ++m_impl; return *this; } + // postfix ++ intentionally omitted + + Keys keys() { return Keys(*this); } + Values values() { return Values(*this); } + + typename HashTableType::const_iterator m_impl; + }; + + template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableIteratorAdapter<HashTableType, std::pair<KeyType, MappedType> > { + private: + typedef std::pair<KeyType, MappedType> ValueType; + public: + typedef HashTableKeysIterator<HashTableType, KeyType, MappedType> Keys; + typedef HashTableValuesIterator<HashTableType, KeyType, MappedType> Values; + + HashTableIteratorAdapter(const typename HashTableType::iterator& impl) : m_impl(impl) {} + + ValueType* get() const { return (ValueType*)m_impl.get(); } + ValueType& operator*() const { return *get(); } + ValueType* operator->() const { return get(); } + + HashTableIteratorAdapter& operator++() { ++m_impl; return *this; } + // postfix ++ intentionally omitted + + operator HashTableConstIteratorAdapter<HashTableType, ValueType>() { + typename HashTableType::const_iterator i = m_impl; + return i; + } + + Keys keys() { return Keys(*this); } + Values values() { return Values(*this); } + + typename HashTableType::iterator m_impl; + }; + + template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableConstKeysIterator { + private: + typedef HashTableConstIteratorAdapter<HashTableType, std::pair<KeyType, MappedType> > ConstIterator; + + public: + HashTableConstKeysIterator(const ConstIterator& impl) : m_impl(impl) {} + + const KeyType* get() const { return &(m_impl.get()->first); } + const KeyType& operator*() const { return *get(); } + const KeyType* operator->() const { return get(); } + + HashTableConstKeysIterator& operator++() { ++m_impl; return *this; } + // postfix ++ intentionally omitted + + ConstIterator m_impl; + }; + + template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableConstValuesIterator { + private: + typedef HashTableConstIteratorAdapter<HashTableType, std::pair<KeyType, MappedType> > ConstIterator; + + public: + HashTableConstValuesIterator(const ConstIterator& impl) : m_impl(impl) {} + + const MappedType* get() const { return &(m_impl.get()->second); } + const MappedType& operator*() const { return *get(); } + const MappedType* operator->() const { return get(); } + + HashTableConstValuesIterator& operator++() { ++m_impl; return *this; } + // postfix ++ intentionally omitted + + ConstIterator m_impl; + }; + + template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableKeysIterator { + private: + typedef HashTableIteratorAdapter<HashTableType, std::pair<KeyType, MappedType> > Iterator; + typedef HashTableConstIteratorAdapter<HashTableType, std::pair<KeyType, MappedType> > ConstIterator; + + public: + HashTableKeysIterator(const Iterator& impl) : m_impl(impl) {} + + KeyType* get() const { return &(m_impl.get()->first); } + KeyType& operator*() const { return *get(); } + KeyType* operator->() const { return get(); } + + HashTableKeysIterator& operator++() { ++m_impl; return *this; } + // postfix ++ intentionally omitted + + operator HashTableConstKeysIterator<HashTableType, KeyType, MappedType>() { + ConstIterator i = m_impl; + return i; + } + + Iterator m_impl; + }; + + template<typename HashTableType, typename KeyType, typename MappedType> struct HashTableValuesIterator { + private: + typedef HashTableIteratorAdapter<HashTableType, std::pair<KeyType, MappedType> > Iterator; + typedef HashTableConstIteratorAdapter<HashTableType, std::pair<KeyType, MappedType> > ConstIterator; + + public: + HashTableValuesIterator(const Iterator& impl) : m_impl(impl) {} + + MappedType* get() const { return &(m_impl.get()->second); } + MappedType& operator*() const { return *get(); } + MappedType* operator->() const { return get(); } + + HashTableValuesIterator& operator++() { ++m_impl; return *this; } + // postfix ++ intentionally omitted + + operator HashTableConstValuesIterator<HashTableType, KeyType, MappedType>() { + ConstIterator i = m_impl; + return i; + } + + Iterator m_impl; + }; + + template<typename T, typename U, typename V> + inline bool operator==(const HashTableConstKeysIterator<T, U, V>& a, const HashTableConstKeysIterator<T, U, V>& b) + { + return a.m_impl == b.m_impl; + } + + template<typename T, typename U, typename V> + inline bool operator!=(const HashTableConstKeysIterator<T, U, V>& a, const HashTableConstKeysIterator<T, U, V>& b) + { + return a.m_impl != b.m_impl; + } + + template<typename T, typename U, typename V> + inline bool operator==(const HashTableConstValuesIterator<T, U, V>& a, const HashTableConstValuesIterator<T, U, V>& b) + { + return a.m_impl == b.m_impl; + } + + template<typename T, typename U, typename V> + inline bool operator!=(const HashTableConstValuesIterator<T, U, V>& a, const HashTableConstValuesIterator<T, U, V>& b) + { + return a.m_impl != b.m_impl; + } + + template<typename T, typename U, typename V> + inline bool operator==(const HashTableKeysIterator<T, U, V>& a, const HashTableKeysIterator<T, U, V>& b) + { + return a.m_impl == b.m_impl; + } + + template<typename T, typename U, typename V> + inline bool operator!=(const HashTableKeysIterator<T, U, V>& a, const HashTableKeysIterator<T, U, V>& b) + { + return a.m_impl != b.m_impl; + } + + template<typename T, typename U, typename V> + inline bool operator==(const HashTableValuesIterator<T, U, V>& a, const HashTableValuesIterator<T, U, V>& b) + { + return a.m_impl == b.m_impl; + } + + template<typename T, typename U, typename V> + inline bool operator!=(const HashTableValuesIterator<T, U, V>& a, const HashTableValuesIterator<T, U, V>& b) + { + return a.m_impl != b.m_impl; + } + + +} // namespace WTF + +#endif // WTF_HashIterators_h diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/HashMap.h b/src/3rdparty/webkit/JavaScriptCore/wtf/HashMap.h new file mode 100644 index 0000000..c5b75ff --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/HashMap.h @@ -0,0 +1,334 @@ +/* + * Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_HashMap_h +#define WTF_HashMap_h + +#include "HashTable.h" + +namespace WTF { + + template<typename PairType> struct PairFirstExtractor; + + template<typename KeyArg, typename MappedArg, typename HashArg = typename DefaultHash<KeyArg>::Hash, + typename KeyTraitsArg = HashTraits<KeyArg>, typename MappedTraitsArg = HashTraits<MappedArg> > + class HashMap { + private: + typedef KeyTraitsArg KeyTraits; + typedef MappedTraitsArg MappedTraits; + typedef PairHashTraits<KeyTraits, MappedTraits> ValueTraits; + + public: + typedef typename KeyTraits::TraitType KeyType; + typedef typename MappedTraits::TraitType MappedType; + typedef typename ValueTraits::TraitType ValueType; + + private: + typedef HashArg HashFunctions; + + typedef HashTable<KeyType, ValueType, PairFirstExtractor<ValueType>, + HashFunctions, ValueTraits, KeyTraits> HashTableType; + + public: + typedef HashTableIteratorAdapter<HashTableType, ValueType> iterator; + typedef HashTableConstIteratorAdapter<HashTableType, ValueType> const_iterator; + + void swap(HashMap&); + + int size() const; + int capacity() const; + bool isEmpty() const; + + // iterators iterate over pairs of keys and values + iterator begin(); + iterator end(); + const_iterator begin() const; + const_iterator end() const; + + iterator find(const KeyType&); + const_iterator find(const KeyType&) const; + bool contains(const KeyType&) const; + MappedType get(const KeyType&) const; + + // replaces value but not key if key is already present + // return value is a pair of the iterator to the key location, + // and a boolean that's true if a new value was actually added + pair<iterator, bool> set(const KeyType&, const MappedType&); + + // does nothing if key is already present + // return value is a pair of the iterator to the key location, + // and a boolean that's true if a new value was actually added + pair<iterator, bool> add(const KeyType&, const MappedType&); + + void remove(const KeyType&); + void remove(iterator); + void clear(); + + MappedType take(const KeyType&); // efficient combination of get with remove + + private: + pair<iterator, bool> inlineAdd(const KeyType&, const MappedType&); + + HashTableType m_impl; + }; + + template<typename PairType> struct PairFirstExtractor { + static const typename PairType::first_type& extract(const PairType& p) { return p.first; } + }; + + template<typename ValueType, typename ValueTraits, typename HashFunctions> + struct HashMapTranslator { + typedef typename ValueType::first_type KeyType; + typedef typename ValueType::second_type MappedType; + + static unsigned hash(const KeyType& key) { return HashFunctions::hash(key); } + static bool equal(const KeyType& a, const KeyType& b) { return HashFunctions::equal(a, b); } + static void translate(ValueType& location, const KeyType& key, const MappedType& mapped) + { + location.first = key; + location.second = mapped; + } + }; + + template<typename T, typename U, typename V, typename W, typename X> + inline void HashMap<T, U, V, W, X>::swap(HashMap& other) + { + m_impl.swap(other.m_impl); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline int HashMap<T, U, V, W, X>::size() const + { + return m_impl.size(); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline int HashMap<T, U, V, W, X>::capacity() const + { + return m_impl.capacity(); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline bool HashMap<T, U, V, W, X>::isEmpty() const + { + return m_impl.isEmpty(); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline typename HashMap<T, U, V, W, X>::iterator HashMap<T, U, V, W, X>::begin() + { + return m_impl.begin(); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline typename HashMap<T, U, V, W, X>::iterator HashMap<T, U, V, W, X>::end() + { + return m_impl.end(); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline typename HashMap<T, U, V, W, X>::const_iterator HashMap<T, U, V, W, X>::begin() const + { + return m_impl.begin(); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline typename HashMap<T, U, V, W, X>::const_iterator HashMap<T, U, V, W, X>::end() const + { + return m_impl.end(); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline typename HashMap<T, U, V, W, X>::iterator HashMap<T, U, V, W, X>::find(const KeyType& key) + { + return m_impl.find(key); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline typename HashMap<T, U, V, W, X>::const_iterator HashMap<T, U, V, W, X>::find(const KeyType& key) const + { + return m_impl.find(key); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline bool HashMap<T, U, V, W, X>::contains(const KeyType& key) const + { + return m_impl.contains(key); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline pair<typename HashMap<T, U, V, W, X>::iterator, bool> + HashMap<T, U, V, W, X>::inlineAdd(const KeyType& key, const MappedType& mapped) + { + typedef HashMapTranslator<ValueType, ValueTraits, HashFunctions> TranslatorType; + return m_impl.template add<KeyType, MappedType, TranslatorType>(key, mapped); + } + + template<typename T, typename U, typename V, typename W, typename X> + pair<typename HashMap<T, U, V, W, X>::iterator, bool> + HashMap<T, U, V, W, X>::set(const KeyType& key, const MappedType& mapped) + { + pair<iterator, bool> result = inlineAdd(key, mapped); + if (!result.second) { + // add call above didn't change anything, so set the mapped value + result.first->second = mapped; + } + return result; + } + + template<typename T, typename U, typename V, typename W, typename X> + pair<typename HashMap<T, U, V, W, X>::iterator, bool> + HashMap<T, U, V, W, X>::add(const KeyType& key, const MappedType& mapped) + { + return inlineAdd(key, mapped); + } + + template<typename T, typename U, typename V, typename W, typename MappedTraits> + typename HashMap<T, U, V, W, MappedTraits>::MappedType + HashMap<T, U, V, W, MappedTraits>::get(const KeyType& key) const + { + ValueType* entry = const_cast<HashTableType&>(m_impl).lookup(key); + if (!entry) + return MappedTraits::emptyValue(); + return entry->second; + } + + template<typename T, typename U, typename V, typename W, typename X> + inline void HashMap<T, U, V, W, X>::remove(iterator it) + { + if (it.m_impl == m_impl.end()) + return; + m_impl.checkTableConsistency(); + m_impl.removeWithoutEntryConsistencyCheck(it.m_impl); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline void HashMap<T, U, V, W, X>::remove(const KeyType& key) + { + remove(find(key)); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline void HashMap<T, U, V, W, X>::clear() + { + m_impl.clear(); + } + + template<typename T, typename U, typename V, typename W, typename MappedTraits> + typename HashMap<T, U, V, W, MappedTraits>::MappedType + HashMap<T, U, V, W, MappedTraits>::take(const KeyType& key) + { + // This can probably be made more efficient to avoid ref/deref churn. + iterator it = find(key); + if (it == end()) + return MappedTraits::emptyValue(); + typename HashMap<T, U, V, W, MappedTraits>::MappedType result = it->second; + remove(it); + return result; + } + + template<typename T, typename U, typename V, typename W, typename X> + bool operator==(const HashMap<T, U, V, W, X>& a, const HashMap<T, U, V, W, X>& b) + { + if (a.size() != b.size()) + return false; + + typedef typename HashMap<T, U, V, W, X>::const_iterator const_iterator; + + const_iterator end = a.end(); + const_iterator notFound = b.end(); + for (const_iterator it = a.begin(); it != end; ++it) { + const_iterator bPos = b.find(it->first); + if (bPos == notFound || it->second != bPos->second) + return false; + } + + return true; + } + + template<typename T, typename U, typename V, typename W, typename X> + inline bool operator!=(const HashMap<T, U, V, W, X>& a, const HashMap<T, U, V, W, X>& b) + { + return !(a == b); + } + + template<typename MappedType, typename HashTableType> + void deleteAllPairSeconds(HashTableType& collection) + { + typedef typename HashTableType::const_iterator iterator; + iterator end = collection.end(); + for (iterator it = collection.begin(); it != end; ++it) + delete it->second; + } + + template<typename T, typename U, typename V, typename W, typename X> + inline void deleteAllValues(const HashMap<T, U, V, W, X>& collection) + { + deleteAllPairSeconds<typename HashMap<T, U, V, W, X>::MappedType>(collection); + } + + template<typename KeyType, typename HashTableType> + void deleteAllPairFirsts(HashTableType& collection) + { + typedef typename HashTableType::const_iterator iterator; + iterator end = collection.end(); + for (iterator it = collection.begin(); it != end; ++it) + delete it->first; + } + + template<typename T, typename U, typename V, typename W, typename X> + inline void deleteAllKeys(const HashMap<T, U, V, W, X>& collection) + { + deleteAllPairFirsts<typename HashMap<T, U, V, W, X>::KeyType>(collection); + } + + template<typename T, typename U, typename V, typename W, typename X, typename Y> + inline void copyKeysToVector(const HashMap<T, U, V, W, X>& collection, Y& vector) + { + typedef typename HashMap<T, U, V, W, X>::const_iterator::Keys iterator; + + vector.resize(collection.size()); + + iterator it = collection.begin().keys(); + iterator end = collection.end().keys(); + for (unsigned i = 0; it != end; ++it, ++i) + vector[i] = *it; + } + + template<typename T, typename U, typename V, typename W, typename X, typename Y> + inline void copyValuesToVector(const HashMap<T, U, V, W, X>& collection, Y& vector) + { + typedef typename HashMap<T, U, V, W, X>::const_iterator::Values iterator; + + vector.resize(collection.size()); + + iterator it = collection.begin().values(); + iterator end = collection.end().values(); + for (unsigned i = 0; it != end; ++it, ++i) + vector[i] = *it; + } + +} // namespace WTF + +using WTF::HashMap; + +#include "RefPtrHashMap.h" + +#endif /* WTF_HashMap_h */ diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/HashSet.h b/src/3rdparty/webkit/JavaScriptCore/wtf/HashSet.h new file mode 100644 index 0000000..da99f2c --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/HashSet.h @@ -0,0 +1,271 @@ +/* + * Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_HashSet_h +#define WTF_HashSet_h + +#include "HashTable.h" + +namespace WTF { + + template<typename Value, typename HashFunctions, typename Traits> class HashSet; + template<typename Value, typename HashFunctions, typename Traits> + void deleteAllValues(const HashSet<Value, HashFunctions, Traits>&); + + template<typename T> struct IdentityExtractor; + + template<typename ValueArg, typename HashArg = typename DefaultHash<ValueArg>::Hash, + typename TraitsArg = HashTraits<ValueArg> > class HashSet { + private: + typedef HashArg HashFunctions; + typedef TraitsArg ValueTraits; + + public: + typedef typename ValueTraits::TraitType ValueType; + + private: + typedef HashTable<ValueType, ValueType, IdentityExtractor<ValueType>, + HashFunctions, ValueTraits, ValueTraits> HashTableType; + + public: + typedef HashTableIteratorAdapter<HashTableType, ValueType> iterator; + typedef HashTableConstIteratorAdapter<HashTableType, ValueType> const_iterator; + + void swap(HashSet&); + + int size() const; + int capacity() const; + bool isEmpty() const; + + iterator begin(); + iterator end(); + const_iterator begin() const; + const_iterator end() const; + + iterator find(const ValueType&); + const_iterator find(const ValueType&) const; + bool contains(const ValueType&) const; + + // An alternate version of find() that finds the object by hashing and comparing + // with some other type, to avoid the cost of type conversion. HashTranslator + // must have the following function members: + // static unsigned hash(const T&); + // static bool equal(const ValueType&, const T&); + template<typename T, typename HashTranslator> iterator find(const T&); + template<typename T, typename HashTranslator> const_iterator find(const T&) const; + template<typename T, typename HashTranslator> bool contains(const T&) const; + + // The return value is a pair of an interator to the new value's location, + // and a bool that is true if an new entry was added. + pair<iterator, bool> add(const ValueType&); + + // An alternate version of add() that finds the object by hashing and comparing + // with some other type, to avoid the cost of type conversion if the object is already + // in the table. HashTranslator must have the following methods: + // static unsigned hash(const T&); + // static bool equal(const ValueType&, const T&); + // static translate(ValueType&, const T&, unsigned hashCode); + template<typename T, typename HashTranslator> pair<iterator, bool> add(const T&); + + void remove(const ValueType&); + void remove(iterator); + void clear(); + + private: + friend void deleteAllValues<>(const HashSet&); + + HashTableType m_impl; + }; + + template<typename T> struct IdentityExtractor { + static const T& extract(const T& t) { return t; } + }; + + template<typename ValueType, typename ValueTraits, typename T, typename Translator> + struct HashSetTranslatorAdapter { + static unsigned hash(const T& key) { return Translator::hash(key); } + static bool equal(const ValueType& a, const T& b) { return Translator::equal(a, b); } + static void translate(ValueType& location, const T& key, const T&, unsigned hashCode) + { + Translator::translate(location, key, hashCode); + } + }; + + template<typename T, typename U, typename V> + inline void HashSet<T, U, V>::swap(HashSet& other) + { + m_impl.swap(other.m_impl); + } + + template<typename T, typename U, typename V> + inline int HashSet<T, U, V>::size() const + { + return m_impl.size(); + } + + template<typename T, typename U, typename V> + inline int HashSet<T, U, V>::capacity() const + { + return m_impl.capacity(); + } + + template<typename T, typename U, typename V> + inline bool HashSet<T, U, V>::isEmpty() const + { + return m_impl.isEmpty(); + } + + template<typename T, typename U, typename V> + inline typename HashSet<T, U, V>::iterator HashSet<T, U, V>::begin() + { + return m_impl.begin(); + } + + template<typename T, typename U, typename V> + inline typename HashSet<T, U, V>::iterator HashSet<T, U, V>::end() + { + return m_impl.end(); + } + + template<typename T, typename U, typename V> + inline typename HashSet<T, U, V>::const_iterator HashSet<T, U, V>::begin() const + { + return m_impl.begin(); + } + + template<typename T, typename U, typename V> + inline typename HashSet<T, U, V>::const_iterator HashSet<T, U, V>::end() const + { + return m_impl.end(); + } + + template<typename T, typename U, typename V> + inline typename HashSet<T, U, V>::iterator HashSet<T, U, V>::find(const ValueType& value) + { + return m_impl.find(value); + } + + template<typename T, typename U, typename V> + inline typename HashSet<T, U, V>::const_iterator HashSet<T, U, V>::find(const ValueType& value) const + { + return m_impl.find(value); + } + + template<typename T, typename U, typename V> + inline bool HashSet<T, U, V>::contains(const ValueType& value) const + { + return m_impl.contains(value); + } + + template<typename Value, typename HashFunctions, typename Traits> + template<typename T, typename Translator> + typename HashSet<Value, HashFunctions, Traits>::iterator + inline HashSet<Value, HashFunctions, Traits>::find(const T& value) + { + typedef HashSetTranslatorAdapter<ValueType, ValueTraits, T, Translator> Adapter; + return m_impl.template find<T, Adapter>(value); + } + + template<typename Value, typename HashFunctions, typename Traits> + template<typename T, typename Translator> + typename HashSet<Value, HashFunctions, Traits>::const_iterator + inline HashSet<Value, HashFunctions, Traits>::find(const T& value) const + { + typedef HashSetTranslatorAdapter<ValueType, ValueTraits, T, Translator> Adapter; + return m_impl.template find<T, Adapter>(value); + } + + template<typename Value, typename HashFunctions, typename Traits> + template<typename T, typename Translator> + inline bool HashSet<Value, HashFunctions, Traits>::contains(const T& value) const + { + typedef HashSetTranslatorAdapter<ValueType, ValueTraits, T, Translator> Adapter; + return m_impl.template contains<T, Adapter>(value); + } + + template<typename T, typename U, typename V> + pair<typename HashSet<T, U, V>::iterator, bool> HashSet<T, U, V>::add(const ValueType& value) + { + return m_impl.add(value); + } + + template<typename Value, typename HashFunctions, typename Traits> + template<typename T, typename Translator> + pair<typename HashSet<Value, HashFunctions, Traits>::iterator, bool> + HashSet<Value, HashFunctions, Traits>::add(const T& value) + { + typedef HashSetTranslatorAdapter<ValueType, ValueTraits, T, Translator> Adapter; + return m_impl.template addPassingHashCode<T, T, Adapter>(value, value); + } + + template<typename T, typename U, typename V> + inline void HashSet<T, U, V>::remove(iterator it) + { + if (it.m_impl == m_impl.end()) + return; + m_impl.checkTableConsistency(); + m_impl.removeWithoutEntryConsistencyCheck(it.m_impl); + } + + template<typename T, typename U, typename V> + inline void HashSet<T, U, V>::remove(const ValueType& value) + { + remove(find(value)); + } + + template<typename T, typename U, typename V> + inline void HashSet<T, U, V>::clear() + { + m_impl.clear(); + } + + template<typename ValueType, typename HashTableType> + void deleteAllValues(HashTableType& collection) + { + typedef typename HashTableType::const_iterator iterator; + iterator end = collection.end(); + for (iterator it = collection.begin(); it != end; ++it) + delete *it; + } + + template<typename T, typename U, typename V> + inline void deleteAllValues(const HashSet<T, U, V>& collection) + { + deleteAllValues<typename HashSet<T, U, V>::ValueType>(collection.m_impl); + } + + template<typename T, typename U, typename V, typename W> + inline void copyToVector(const HashSet<T, U, V>& collection, W& vector) + { + typedef typename HashSet<T, U, V>::const_iterator iterator; + + vector.resize(collection.size()); + + iterator it = collection.begin(); + iterator end = collection.end(); + for (unsigned i = 0; it != end; ++it, ++i) + vector[i] = *it; + } + +} // namespace WTF + +using WTF::HashSet; + +#endif /* WTF_HashSet_h */ diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/HashTable.cpp b/src/3rdparty/webkit/JavaScriptCore/wtf/HashTable.cpp new file mode 100644 index 0000000..71d3f86 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/HashTable.cpp @@ -0,0 +1,69 @@ +/* + Copyright (C) 2005 Apple Inc. All rights reserved. + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Library General Public + License as published by the Free Software Foundation; either + version 2 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Library General Public License for more details. + + You should have received a copy of the GNU Library General Public License + along with this library; see the file COPYING.LIB. If not, write to + the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + Boston, MA 02110-1301, USA. +*/ + +#include "config.h" +#include "HashTable.h" + +namespace WTF { + +#if DUMP_HASHTABLE_STATS + +int HashTableStats::numAccesses; +int HashTableStats::numCollisions; +int HashTableStats::collisionGraph[4096]; +int HashTableStats::maxCollisions; +int HashTableStats::numRehashes; +int HashTableStats::numRemoves; +int HashTableStats::numReinserts; + +static HashTableStats logger; + +static Mutex& hashTableStatsMutex() +{ + AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); + return mutex; +} + +HashTableStats::~HashTableStats() +{ + // Don't lock hashTableStatsMutex here because it can cause deadlocks at shutdown + // if any thread was killed while holding the mutex. + printf("\nWTF::HashTable statistics\n\n"); + printf("%d accesses\n", numAccesses); + printf("%d total collisions, average %.2f probes per access\n", numCollisions, 1.0 * (numAccesses + numCollisions) / numAccesses); + printf("longest collision chain: %d\n", maxCollisions); + for (int i = 1; i <= maxCollisions; i++) { + printf(" %d lookups with exactly %d collisions (%.2f%% , %.2f%% with this many or more)\n", collisionGraph[i], i, 100.0 * (collisionGraph[i] - collisionGraph[i+1]) / numAccesses, 100.0 * collisionGraph[i] / numAccesses); + } + printf("%d rehashes\n", numRehashes); + printf("%d reinserts\n", numReinserts); +} + +void HashTableStats::recordCollisionAtCount(int count) +{ + MutexLocker lock(hashTableStatsMutex()); + if (count > maxCollisions) + maxCollisions = count; + numCollisions++; + collisionGraph[count]++; +} + +#endif + +} // namespace WTF diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/HashTable.h b/src/3rdparty/webkit/JavaScriptCore/wtf/HashTable.h new file mode 100644 index 0000000..3b283f8 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/HashTable.h @@ -0,0 +1,1158 @@ +/* + * Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved. + * Copyright (C) 2008 David Levin <levin@chromium.org> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_HashTable_h +#define WTF_HashTable_h + +#include "FastMalloc.h" +#include "HashTraits.h" +#include <wtf/Assertions.h> +#include <wtf/Threading.h> + +namespace WTF { + +#define DUMP_HASHTABLE_STATS 0 +#define CHECK_HASHTABLE_CONSISTENCY 0 + +#ifdef NDEBUG +#define CHECK_HASHTABLE_ITERATORS 0 +#define CHECK_HASHTABLE_USE_AFTER_DESTRUCTION 0 +#else +#define CHECK_HASHTABLE_ITERATORS 1 +#define CHECK_HASHTABLE_USE_AFTER_DESTRUCTION 1 +#endif + +#if DUMP_HASHTABLE_STATS + + struct HashTableStats { + ~HashTableStats(); + // All of the variables are accessed in ~HashTableStats when the static struct is destroyed. + + // The following variables are all atomically incremented when modified. + static int numAccesses; + static int numRehashes; + static int numRemoves; + static int numReinserts; + + // The following variables are only modified in the recordCollisionAtCount method within a mutex. + static int maxCollisions; + static int numCollisions; + static int collisionGraph[4096]; + + static void recordCollisionAtCount(int count); + }; + +#endif + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + class HashTable; + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + class HashTableIterator; + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + class HashTableConstIterator; + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + void addIterator(const HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>*, + HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>*); + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + void removeIterator(HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>*); + +#if !CHECK_HASHTABLE_ITERATORS + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + inline void addIterator(const HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>*, + HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>*) { } + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + inline void removeIterator(HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>*) { } + +#endif + + typedef enum { HashItemKnownGood } HashItemKnownGoodTag; + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + class HashTableConstIterator { + private: + typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> HashTableType; + typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> iterator; + typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> const_iterator; + typedef Value ValueType; + typedef const ValueType& ReferenceType; + typedef const ValueType* PointerType; + + friend class HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>; + friend class HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>; + + void skipEmptyBuckets() + { + while (m_position != m_endPosition && HashTableType::isEmptyOrDeletedBucket(*m_position)) + ++m_position; + } + + HashTableConstIterator(const HashTableType* table, PointerType position, PointerType endPosition) + : m_position(position), m_endPosition(endPosition) + { + addIterator(table, this); + skipEmptyBuckets(); + } + + HashTableConstIterator(const HashTableType* table, PointerType position, PointerType endPosition, HashItemKnownGoodTag) + : m_position(position), m_endPosition(endPosition) + { + addIterator(table, this); + } + + public: + HashTableConstIterator() + { + addIterator(0, this); + } + + // default copy, assignment and destructor are OK if CHECK_HASHTABLE_ITERATORS is 0 + +#if CHECK_HASHTABLE_ITERATORS + ~HashTableConstIterator() + { + removeIterator(this); + } + + HashTableConstIterator(const const_iterator& other) + : m_position(other.m_position), m_endPosition(other.m_endPosition) + { + addIterator(other.m_table, this); + } + + const_iterator& operator=(const const_iterator& other) + { + m_position = other.m_position; + m_endPosition = other.m_endPosition; + + removeIterator(this); + addIterator(other.m_table, this); + + return *this; + } +#endif + + PointerType get() const + { + checkValidity(); + return m_position; + } + ReferenceType operator*() const { return *get(); } + PointerType operator->() const { return get(); } + + const_iterator& operator++() + { + checkValidity(); + ASSERT(m_position != m_endPosition); + ++m_position; + skipEmptyBuckets(); + return *this; + } + + // postfix ++ intentionally omitted + + // Comparison. + bool operator==(const const_iterator& other) const + { + checkValidity(other); + return m_position == other.m_position; + } + bool operator!=(const const_iterator& other) const + { + checkValidity(other); + return m_position != other.m_position; + } + + private: + void checkValidity() const + { +#if CHECK_HASHTABLE_ITERATORS + ASSERT(m_table); +#endif + } + + +#if CHECK_HASHTABLE_ITERATORS + void checkValidity(const const_iterator& other) const + { + ASSERT(m_table); + ASSERT(other.m_table); + ASSERT(m_table == other.m_table); + } +#else + void checkValidity(const const_iterator&) const { } +#endif + + PointerType m_position; + PointerType m_endPosition; + +#if CHECK_HASHTABLE_ITERATORS + public: + // Any modifications of the m_next or m_previous of an iterator that is in a linked list of a HashTable::m_iterator, + // should be guarded with m_table->m_mutex. + mutable const HashTableType* m_table; + mutable const_iterator* m_next; + mutable const_iterator* m_previous; +#endif + }; + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + class HashTableIterator { + private: + typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> HashTableType; + typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> iterator; + typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> const_iterator; + typedef Value ValueType; + typedef ValueType& ReferenceType; + typedef ValueType* PointerType; + + friend class HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>; + + HashTableIterator(HashTableType* table, PointerType pos, PointerType end) : m_iterator(table, pos, end) { } + HashTableIterator(HashTableType* table, PointerType pos, PointerType end, HashItemKnownGoodTag tag) : m_iterator(table, pos, end, tag) { } + + public: + HashTableIterator() { } + + // default copy, assignment and destructor are OK + + PointerType get() const { return const_cast<PointerType>(m_iterator.get()); } + ReferenceType operator*() const { return *get(); } + PointerType operator->() const { return get(); } + + iterator& operator++() { ++m_iterator; return *this; } + + // postfix ++ intentionally omitted + + // Comparison. + bool operator==(const iterator& other) const { return m_iterator == other.m_iterator; } + bool operator!=(const iterator& other) const { return m_iterator != other.m_iterator; } + + operator const_iterator() const { return m_iterator; } + + private: + const_iterator m_iterator; + }; + + using std::swap; + +#if !COMPILER(MSVC) + // Visual C++ has a swap for pairs defined. + + // swap pairs by component, in case of pair members that specialize swap + template<typename T, typename U> inline void swap(pair<T, U>& a, pair<T, U>& b) + { + swap(a.first, b.first); + swap(a.second, b.second); + } +#endif + + template<typename T, bool useSwap> struct Mover; + template<typename T> struct Mover<T, true> { static void move(T& from, T& to) { swap(from, to); } }; + template<typename T> struct Mover<T, false> { static void move(T& from, T& to) { to = from; } }; + + template<typename Key, typename Value, typename HashFunctions> class IdentityHashTranslator { + public: + static unsigned hash(const Key& key) { return HashFunctions::hash(key); } + static bool equal(const Key& a, const Key& b) { return HashFunctions::equal(a, b); } + static void translate(Value& location, const Key&, const Value& value) { location = value; } + }; + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + class HashTable { + public: + typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> iterator; + typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> const_iterator; + typedef Traits ValueTraits; + typedef Key KeyType; + typedef Value ValueType; + typedef IdentityHashTranslator<Key, Value, HashFunctions> IdentityTranslatorType; + + HashTable(); + ~HashTable() + { + invalidateIterators(); + deallocateTable(m_table, m_tableSize); +#if CHECK_HASHTABLE_USE_AFTER_DESTRUCTION + m_table = (ValueType*)(uintptr_t)0xbbadbeef; +#endif + } + + HashTable(const HashTable&); + void swap(HashTable&); + HashTable& operator=(const HashTable&); + + iterator begin() { return makeIterator(m_table); } + iterator end() { return makeKnownGoodIterator(m_table + m_tableSize); } + const_iterator begin() const { return makeConstIterator(m_table); } + const_iterator end() const { return makeKnownGoodConstIterator(m_table + m_tableSize); } + + int size() const { return m_keyCount; } + int capacity() const { return m_tableSize; } + bool isEmpty() const { return !m_keyCount; } + + pair<iterator, bool> add(const ValueType& value) { return add<KeyType, ValueType, IdentityTranslatorType>(Extractor::extract(value), value); } + + // A special version of add() that finds the object by hashing and comparing + // with some other type, to avoid the cost of type conversion if the object is already + // in the table. + template<typename T, typename Extra, typename HashTranslator> pair<iterator, bool> add(const T& key, const Extra&); + template<typename T, typename Extra, typename HashTranslator> pair<iterator, bool> addPassingHashCode(const T& key, const Extra&); + + iterator find(const KeyType& key) { return find<KeyType, IdentityTranslatorType>(key); } + const_iterator find(const KeyType& key) const { return find<KeyType, IdentityTranslatorType>(key); } + bool contains(const KeyType& key) const { return contains<KeyType, IdentityTranslatorType>(key); } + + template <typename T, typename HashTranslator> iterator find(const T&); + template <typename T, typename HashTranslator> const_iterator find(const T&) const; + template <typename T, typename HashTranslator> bool contains(const T&) const; + + void remove(const KeyType&); + void remove(iterator); + void removeWithoutEntryConsistencyCheck(iterator); + void clear(); + + static bool isEmptyBucket(const ValueType& value) { return Extractor::extract(value) == KeyTraits::emptyValue(); } + static bool isDeletedBucket(const ValueType& value) { return KeyTraits::isDeletedValue(Extractor::extract(value)); } + static bool isEmptyOrDeletedBucket(const ValueType& value) { return isEmptyBucket(value) || isDeletedBucket(value); } + + ValueType* lookup(const Key& key) { return lookup<Key, IdentityTranslatorType>(key); } + template<typename T, typename HashTranslator> ValueType* lookup(const T&); + +#if CHECK_HASHTABLE_CONSISTENCY + void checkTableConsistency() const; +#else + static void checkTableConsistency() { } +#endif + + private: + static ValueType* allocateTable(int size); + static void deallocateTable(ValueType* table, int size); + + typedef pair<ValueType*, bool> LookupType; + typedef pair<LookupType, unsigned> FullLookupType; + + LookupType lookupForWriting(const Key& key) { return lookupForWriting<Key, IdentityTranslatorType>(key); }; + template<typename T, typename HashTranslator> FullLookupType fullLookupForWriting(const T&); + template<typename T, typename HashTranslator> LookupType lookupForWriting(const T&); + + template<typename T, typename HashTranslator> void checkKey(const T&); + + void removeAndInvalidateWithoutEntryConsistencyCheck(ValueType*); + void removeAndInvalidate(ValueType*); + void remove(ValueType*); + + bool shouldExpand() const { return (m_keyCount + m_deletedCount) * m_maxLoad >= m_tableSize; } + bool mustRehashInPlace() const { return m_keyCount * m_minLoad < m_tableSize * 2; } + bool shouldShrink() const { return m_keyCount * m_minLoad < m_tableSize && m_tableSize > m_minTableSize; } + void expand(); + void shrink() { rehash(m_tableSize / 2); } + + void rehash(int newTableSize); + void reinsert(ValueType&); + + static void initializeBucket(ValueType& bucket) { new (&bucket) ValueType(Traits::emptyValue()); } + static void deleteBucket(ValueType& bucket) { bucket.~ValueType(); Traits::constructDeletedValue(bucket); } + + FullLookupType makeLookupResult(ValueType* position, bool found, unsigned hash) + { return FullLookupType(LookupType(position, found), hash); } + + iterator makeIterator(ValueType* pos) { return iterator(this, pos, m_table + m_tableSize); } + const_iterator makeConstIterator(ValueType* pos) const { return const_iterator(this, pos, m_table + m_tableSize); } + iterator makeKnownGoodIterator(ValueType* pos) { return iterator(this, pos, m_table + m_tableSize, HashItemKnownGood); } + const_iterator makeKnownGoodConstIterator(ValueType* pos) const { return const_iterator(this, pos, m_table + m_tableSize, HashItemKnownGood); } + +#if CHECK_HASHTABLE_CONSISTENCY + void checkTableConsistencyExceptSize() const; +#else + static void checkTableConsistencyExceptSize() { } +#endif + +#if CHECK_HASHTABLE_ITERATORS + void invalidateIterators(); +#else + static void invalidateIterators() { } +#endif + + static const int m_minTableSize = 64; + static const int m_maxLoad = 2; + static const int m_minLoad = 6; + + ValueType* m_table; + int m_tableSize; + int m_tableSizeMask; + int m_keyCount; + int m_deletedCount; + +#if CHECK_HASHTABLE_ITERATORS + public: + // All access to m_iterators should be guarded with m_mutex. + mutable const_iterator* m_iterators; + mutable Mutex m_mutex; +#endif + }; + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + inline HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::HashTable() + : m_table(0) + , m_tableSize(0) + , m_tableSizeMask(0) + , m_keyCount(0) + , m_deletedCount(0) +#if CHECK_HASHTABLE_ITERATORS + , m_iterators(0) +#endif + { + } + + static inline unsigned doubleHash(unsigned key) + { + key = ~key + (key >> 23); + key ^= (key << 12); + key ^= (key >> 7); + key ^= (key << 2); + key ^= (key >> 20); + return key; + } + +#if ASSERT_DISABLED + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + template<typename T, typename HashTranslator> + inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::checkKey(const T&) + { + } + +#else + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + template<typename T, typename HashTranslator> + void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::checkKey(const T& key) + { + if (!HashFunctions::safeToCompareToEmptyOrDeleted) + return; + ASSERT(!HashTranslator::equal(KeyTraits::emptyValue(), key)); + ValueType deletedValue = Traits::emptyValue(); + deletedValue.~ValueType(); + Traits::constructDeletedValue(deletedValue); + ASSERT(!HashTranslator::equal(Extractor::extract(deletedValue), key)); + new (&deletedValue) ValueType(Traits::emptyValue()); + } + +#endif + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + template<typename T, typename HashTranslator> + inline Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::lookup(const T& key) + { + checkKey<T, HashTranslator>(key); + + int k = 0; + int sizeMask = m_tableSizeMask; + ValueType* table = m_table; + unsigned h = HashTranslator::hash(key); + int i = h & sizeMask; + + if (!table) + return 0; + +#if DUMP_HASHTABLE_STATS + atomicIncrement(&HashTableStats::numAccesses); + int probeCount = 0; +#endif + + while (1) { + ValueType* entry = table + i; + + // we count on the compiler to optimize out this branch + if (HashFunctions::safeToCompareToEmptyOrDeleted) { + if (HashTranslator::equal(Extractor::extract(*entry), key)) + return entry; + + if (isEmptyBucket(*entry)) + return 0; + } else { + if (isEmptyBucket(*entry)) + return 0; + + if (!isDeletedBucket(*entry) && HashTranslator::equal(Extractor::extract(*entry), key)) + return entry; + } +#if DUMP_HASHTABLE_STATS + ++probeCount; + HashTableStats::recordCollisionAtCount(probeCount); +#endif + if (k == 0) + k = 1 | doubleHash(h); + i = (i + k) & sizeMask; + } + } + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + template<typename T, typename HashTranslator> + inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::LookupType HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::lookupForWriting(const T& key) + { + ASSERT(m_table); + checkKey<T, HashTranslator>(key); + + int k = 0; + ValueType* table = m_table; + int sizeMask = m_tableSizeMask; + unsigned h = HashTranslator::hash(key); + int i = h & sizeMask; + +#if DUMP_HASHTABLE_STATS + atomicIncrement(&HashTableStats::numAccesses); + int probeCount = 0; +#endif + + ValueType* deletedEntry = 0; + + while (1) { + ValueType* entry = table + i; + + // we count on the compiler to optimize out this branch + if (HashFunctions::safeToCompareToEmptyOrDeleted) { + if (isEmptyBucket(*entry)) + return LookupType(deletedEntry ? deletedEntry : entry, false); + + if (HashTranslator::equal(Extractor::extract(*entry), key)) + return LookupType(entry, true); + + if (isDeletedBucket(*entry)) + deletedEntry = entry; + } else { + if (isEmptyBucket(*entry)) + return LookupType(deletedEntry ? deletedEntry : entry, false); + + if (isDeletedBucket(*entry)) + deletedEntry = entry; + else if (HashTranslator::equal(Extractor::extract(*entry), key)) + return LookupType(entry, true); + } +#if DUMP_HASHTABLE_STATS + ++probeCount; + HashTableStats::recordCollisionAtCount(probeCount); +#endif + if (k == 0) + k = 1 | doubleHash(h); + i = (i + k) & sizeMask; + } + } + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + template<typename T, typename HashTranslator> + inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::FullLookupType HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::fullLookupForWriting(const T& key) + { + ASSERT(m_table); + checkKey<T, HashTranslator>(key); + + int k = 0; + ValueType* table = m_table; + int sizeMask = m_tableSizeMask; + unsigned h = HashTranslator::hash(key); + int i = h & sizeMask; + +#if DUMP_HASHTABLE_STATS + atomicIncrement(&HashTableStats::numAccesses); + int probeCount = 0; +#endif + + ValueType* deletedEntry = 0; + + while (1) { + ValueType* entry = table + i; + + // we count on the compiler to optimize out this branch + if (HashFunctions::safeToCompareToEmptyOrDeleted) { + if (isEmptyBucket(*entry)) + return makeLookupResult(deletedEntry ? deletedEntry : entry, false, h); + + if (HashTranslator::equal(Extractor::extract(*entry), key)) + return makeLookupResult(entry, true, h); + + if (isDeletedBucket(*entry)) + deletedEntry = entry; + } else { + if (isEmptyBucket(*entry)) + return makeLookupResult(deletedEntry ? deletedEntry : entry, false, h); + + if (isDeletedBucket(*entry)) + deletedEntry = entry; + else if (HashTranslator::equal(Extractor::extract(*entry), key)) + return makeLookupResult(entry, true, h); + } +#if DUMP_HASHTABLE_STATS + ++probeCount; + HashTableStats::recordCollisionAtCount(probeCount); +#endif + if (k == 0) + k = 1 | doubleHash(h); + i = (i + k) & sizeMask; + } + } + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + template<typename T, typename Extra, typename HashTranslator> + inline pair<typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::iterator, bool> HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::add(const T& key, const Extra& extra) + { + checkKey<T, HashTranslator>(key); + + invalidateIterators(); + + if (!m_table) + expand(); + + checkTableConsistency(); + + ASSERT(m_table); + + int k = 0; + ValueType* table = m_table; + int sizeMask = m_tableSizeMask; + unsigned h = HashTranslator::hash(key); + int i = h & sizeMask; + +#if DUMP_HASHTABLE_STATS + atomicIncrement(&HashTableStats::numAccesses); + int probeCount = 0; +#endif + + ValueType* deletedEntry = 0; + ValueType* entry; + while (1) { + entry = table + i; + + // we count on the compiler to optimize out this branch + if (HashFunctions::safeToCompareToEmptyOrDeleted) { + if (isEmptyBucket(*entry)) + break; + + if (HashTranslator::equal(Extractor::extract(*entry), key)) + return std::make_pair(makeKnownGoodIterator(entry), false); + + if (isDeletedBucket(*entry)) + deletedEntry = entry; + } else { + if (isEmptyBucket(*entry)) + break; + + if (isDeletedBucket(*entry)) + deletedEntry = entry; + else if (HashTranslator::equal(Extractor::extract(*entry), key)) + return std::make_pair(makeKnownGoodIterator(entry), false); + } +#if DUMP_HASHTABLE_STATS + ++probeCount; + HashTableStats::recordCollisionAtCount(probeCount); +#endif + if (k == 0) + k = 1 | doubleHash(h); + i = (i + k) & sizeMask; + } + + if (deletedEntry) { + initializeBucket(*deletedEntry); + entry = deletedEntry; + --m_deletedCount; + } + + HashTranslator::translate(*entry, key, extra); + + ++m_keyCount; + + if (shouldExpand()) { + // FIXME: This makes an extra copy on expand. Probably not that bad since + // expand is rare, but would be better to have a version of expand that can + // follow a pivot entry and return the new position. + KeyType enteredKey = Extractor::extract(*entry); + expand(); + pair<iterator, bool> p = std::make_pair(find(enteredKey), true); + ASSERT(p.first != end()); + return p; + } + + checkTableConsistency(); + + return std::make_pair(makeKnownGoodIterator(entry), true); + } + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + template<typename T, typename Extra, typename HashTranslator> + inline pair<typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::iterator, bool> HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::addPassingHashCode(const T& key, const Extra& extra) + { + checkKey<T, HashTranslator>(key); + + invalidateIterators(); + + if (!m_table) + expand(); + + checkTableConsistency(); + + FullLookupType lookupResult = fullLookupForWriting<T, HashTranslator>(key); + + ValueType* entry = lookupResult.first.first; + bool found = lookupResult.first.second; + unsigned h = lookupResult.second; + + if (found) + return std::make_pair(makeKnownGoodIterator(entry), false); + + if (isDeletedBucket(*entry)) { + initializeBucket(*entry); + --m_deletedCount; + } + + HashTranslator::translate(*entry, key, extra, h); + ++m_keyCount; + if (shouldExpand()) { + // FIXME: This makes an extra copy on expand. Probably not that bad since + // expand is rare, but would be better to have a version of expand that can + // follow a pivot entry and return the new position. + KeyType enteredKey = Extractor::extract(*entry); + expand(); + pair<iterator, bool> p = std::make_pair(find(enteredKey), true); + ASSERT(p.first != end()); + return p; + } + + checkTableConsistency(); + + return std::make_pair(makeKnownGoodIterator(entry), true); + } + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::reinsert(ValueType& entry) + { + ASSERT(m_table); + ASSERT(!lookupForWriting(Extractor::extract(entry)).second); + ASSERT(!isDeletedBucket(*(lookupForWriting(Extractor::extract(entry)).first))); +#if DUMP_HASHTABLE_STATS + atomicIncrement(&HashTableStats::numReinserts); +#endif + + Mover<ValueType, Traits::needsDestruction>::move(entry, *lookupForWriting(Extractor::extract(entry)).first); + } + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + template <typename T, typename HashTranslator> + typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::iterator HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::find(const T& key) + { + if (!m_table) + return end(); + + ValueType* entry = lookup<T, HashTranslator>(key); + if (!entry) + return end(); + + return makeKnownGoodIterator(entry); + } + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + template <typename T, typename HashTranslator> + typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::const_iterator HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::find(const T& key) const + { + if (!m_table) + return end(); + + ValueType* entry = const_cast<HashTable*>(this)->lookup<T, HashTranslator>(key); + if (!entry) + return end(); + + return makeKnownGoodConstIterator(entry); + } + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + template <typename T, typename HashTranslator> + bool HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::contains(const T& key) const + { + if (!m_table) + return false; + + return const_cast<HashTable*>(this)->lookup<T, HashTranslator>(key); + } + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::removeAndInvalidateWithoutEntryConsistencyCheck(ValueType* pos) + { + invalidateIterators(); + remove(pos); + } + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::removeAndInvalidate(ValueType* pos) + { + invalidateIterators(); + checkTableConsistency(); + remove(pos); + } + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::remove(ValueType* pos) + { +#if DUMP_HASHTABLE_STATS + atomicIncrement(&HashTableStats::numRemoves); +#endif + + deleteBucket(*pos); + ++m_deletedCount; + --m_keyCount; + + if (shouldShrink()) + shrink(); + + checkTableConsistency(); + } + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::remove(iterator it) + { + if (it == end()) + return; + + removeAndInvalidate(const_cast<ValueType*>(it.m_iterator.m_position)); + } + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::removeWithoutEntryConsistencyCheck(iterator it) + { + if (it == end()) + return; + + removeAndInvalidateWithoutEntryConsistencyCheck(const_cast<ValueType*>(it.m_iterator.m_position)); + } + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::remove(const KeyType& key) + { + remove(find(key)); + } + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::allocateTable(int size) + { + // would use a template member function with explicit specializations here, but + // gcc doesn't appear to support that + if (Traits::emptyValueIsZero) + return static_cast<ValueType*>(fastZeroedMalloc(size * sizeof(ValueType))); + ValueType* result = static_cast<ValueType*>(fastMalloc(size * sizeof(ValueType))); + for (int i = 0; i < size; i++) + initializeBucket(result[i]); + return result; + } + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::deallocateTable(ValueType* table, int size) + { + if (Traits::needsDestruction) { + for (int i = 0; i < size; ++i) { + if (!isDeletedBucket(table[i])) + table[i].~ValueType(); + } + } + fastFree(table); + } + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::expand() + { + int newSize; + if (m_tableSize == 0) + newSize = m_minTableSize; + else if (mustRehashInPlace()) + newSize = m_tableSize; + else + newSize = m_tableSize * 2; + + rehash(newSize); + } + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::rehash(int newTableSize) + { + checkTableConsistencyExceptSize(); + + int oldTableSize = m_tableSize; + ValueType* oldTable = m_table; + +#if DUMP_HASHTABLE_STATS + if (oldTableSize != 0) + atomicIncrement(&HashTableStats::numRehashes); +#endif + + m_tableSize = newTableSize; + m_tableSizeMask = newTableSize - 1; + m_table = allocateTable(newTableSize); + + for (int i = 0; i != oldTableSize; ++i) + if (!isEmptyOrDeletedBucket(oldTable[i])) + reinsert(oldTable[i]); + + m_deletedCount = 0; + + deallocateTable(oldTable, oldTableSize); + + checkTableConsistency(); + } + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::clear() + { + invalidateIterators(); + deallocateTable(m_table, m_tableSize); + m_table = 0; + m_tableSize = 0; + m_tableSizeMask = 0; + m_keyCount = 0; + } + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::HashTable(const HashTable& other) + : m_table(0) + , m_tableSize(0) + , m_tableSizeMask(0) + , m_keyCount(0) + , m_deletedCount(0) +#if CHECK_HASHTABLE_ITERATORS + , m_iterators(0) +#endif + { + // Copy the hash table the dumb way, by adding each element to the new table. + // It might be more efficient to copy the table slots, but it's not clear that efficiency is needed. + const_iterator end = other.end(); + for (const_iterator it = other.begin(); it != end; ++it) + add(*it); + } + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::swap(HashTable& other) + { + invalidateIterators(); + other.invalidateIterators(); + + ValueType* tmp_table = m_table; + m_table = other.m_table; + other.m_table = tmp_table; + + int tmp_tableSize = m_tableSize; + m_tableSize = other.m_tableSize; + other.m_tableSize = tmp_tableSize; + + int tmp_tableSizeMask = m_tableSizeMask; + m_tableSizeMask = other.m_tableSizeMask; + other.m_tableSizeMask = tmp_tableSizeMask; + + int tmp_keyCount = m_keyCount; + m_keyCount = other.m_keyCount; + other.m_keyCount = tmp_keyCount; + + int tmp_deletedCount = m_deletedCount; + m_deletedCount = other.m_deletedCount; + other.m_deletedCount = tmp_deletedCount; + } + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>& HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::operator=(const HashTable& other) + { + HashTable tmp(other); + swap(tmp); + return *this; + } + +#if CHECK_HASHTABLE_CONSISTENCY + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::checkTableConsistency() const + { + checkTableConsistencyExceptSize(); + ASSERT(!shouldExpand()); + ASSERT(!shouldShrink()); + } + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::checkTableConsistencyExceptSize() const + { + if (!m_table) + return; + + int count = 0; + int deletedCount = 0; + for (int j = 0; j < m_tableSize; ++j) { + ValueType* entry = m_table + j; + if (isEmptyBucket(*entry)) + continue; + + if (isDeletedBucket(*entry)) { + ++deletedCount; + continue; + } + + const_iterator it = find(Extractor::extract(*entry)); + ASSERT(entry == it.m_position); + ++count; + } + + ASSERT(count == m_keyCount); + ASSERT(deletedCount == m_deletedCount); + ASSERT(m_tableSize >= m_minTableSize); + ASSERT(m_tableSizeMask); + ASSERT(m_tableSize == m_tableSizeMask + 1); + } + +#endif // CHECK_HASHTABLE_CONSISTENCY + +#if CHECK_HASHTABLE_ITERATORS + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::invalidateIterators() + { + MutexLocker lock(m_mutex); + const_iterator* next; + for (const_iterator* p = m_iterators; p; p = next) { + next = p->m_next; + p->m_table = 0; + p->m_next = 0; + p->m_previous = 0; + } + m_iterators = 0; + } + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + void addIterator(const HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>* table, + HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>* it) + { + it->m_table = table; + it->m_previous = 0; + + // Insert iterator at head of doubly-linked list of iterators. + if (!table) { + it->m_next = 0; + } else { + MutexLocker lock(table->m_mutex); + ASSERT(table->m_iterators != it); + it->m_next = table->m_iterators; + table->m_iterators = it; + if (it->m_next) { + ASSERT(!it->m_next->m_previous); + it->m_next->m_previous = it; + } + } + } + + template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> + void removeIterator(HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>* it) + { + typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> HashTableType; + typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> const_iterator; + + // Delete iterator from doubly-linked list of iterators. + if (!it->m_table) { + ASSERT(!it->m_next); + ASSERT(!it->m_previous); + } else { + MutexLocker lock(it->m_table->m_mutex); + if (it->m_next) { + ASSERT(it->m_next->m_previous == it); + it->m_next->m_previous = it->m_previous; + } + if (it->m_previous) { + ASSERT(it->m_table->m_iterators != it); + ASSERT(it->m_previous->m_next == it); + it->m_previous->m_next = it->m_next; + } else { + ASSERT(it->m_table->m_iterators == it); + it->m_table->m_iterators = it->m_next; + } + } + + it->m_table = 0; + it->m_next = 0; + it->m_previous = 0; + } + +#endif // CHECK_HASHTABLE_ITERATORS + + // iterator adapters + + template<typename HashTableType, typename ValueType> struct HashTableConstIteratorAdapter { + HashTableConstIteratorAdapter(const typename HashTableType::const_iterator& impl) : m_impl(impl) {} + + const ValueType* get() const { return (const ValueType*)m_impl.get(); } + const ValueType& operator*() const { return *get(); } + const ValueType* operator->() const { return get(); } + + HashTableConstIteratorAdapter& operator++() { ++m_impl; return *this; } + // postfix ++ intentionally omitted + + typename HashTableType::const_iterator m_impl; + }; + + template<typename HashTableType, typename ValueType> struct HashTableIteratorAdapter { + HashTableIteratorAdapter(const typename HashTableType::iterator& impl) : m_impl(impl) {} + + ValueType* get() const { return (ValueType*)m_impl.get(); } + ValueType& operator*() const { return *get(); } + ValueType* operator->() const { return get(); } + + HashTableIteratorAdapter& operator++() { ++m_impl; return *this; } + // postfix ++ intentionally omitted + + operator HashTableConstIteratorAdapter<HashTableType, ValueType>() { + typename HashTableType::const_iterator i = m_impl; + return i; + } + + typename HashTableType::iterator m_impl; + }; + + template<typename T, typename U> + inline bool operator==(const HashTableConstIteratorAdapter<T, U>& a, const HashTableConstIteratorAdapter<T, U>& b) + { + return a.m_impl == b.m_impl; + } + + template<typename T, typename U> + inline bool operator!=(const HashTableConstIteratorAdapter<T, U>& a, const HashTableConstIteratorAdapter<T, U>& b) + { + return a.m_impl != b.m_impl; + } + + template<typename T, typename U> + inline bool operator==(const HashTableIteratorAdapter<T, U>& a, const HashTableIteratorAdapter<T, U>& b) + { + return a.m_impl == b.m_impl; + } + + template<typename T, typename U> + inline bool operator!=(const HashTableIteratorAdapter<T, U>& a, const HashTableIteratorAdapter<T, U>& b) + { + return a.m_impl != b.m_impl; + } + +} // namespace WTF + +#include "HashIterators.h" + +#endif // WTF_HashTable_h diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/HashTraits.h b/src/3rdparty/webkit/JavaScriptCore/wtf/HashTraits.h new file mode 100644 index 0000000..b3c0b7a --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/HashTraits.h @@ -0,0 +1,156 @@ +/* + * Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_HashTraits_h +#define WTF_HashTraits_h + +#include "Assertions.h" +#include "HashFunctions.h" +#include <utility> +#include <limits> + +namespace WTF { + + using std::pair; + using std::make_pair; + + template<typename T> struct IsInteger { static const bool value = false; }; + template<> struct IsInteger<bool> { static const bool value = true; }; + template<> struct IsInteger<char> { static const bool value = true; }; + template<> struct IsInteger<signed char> { static const bool value = true; }; + template<> struct IsInteger<unsigned char> { static const bool value = true; }; + template<> struct IsInteger<short> { static const bool value = true; }; + template<> struct IsInteger<unsigned short> { static const bool value = true; }; + template<> struct IsInteger<int> { static const bool value = true; }; + template<> struct IsInteger<unsigned int> { static const bool value = true; }; + template<> struct IsInteger<long> { static const bool value = true; }; + template<> struct IsInteger<unsigned long> { static const bool value = true; }; + template<> struct IsInteger<long long> { static const bool value = true; }; + template<> struct IsInteger<unsigned long long> { static const bool value = true; }; + +#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED) + template<> struct IsInteger<wchar_t> { static const bool value = true; }; +#endif + + COMPILE_ASSERT(IsInteger<bool>::value, WTF_IsInteger_bool_true); + COMPILE_ASSERT(IsInteger<char>::value, WTF_IsInteger_char_true); + COMPILE_ASSERT(IsInteger<signed char>::value, WTF_IsInteger_signed_char_true); + COMPILE_ASSERT(IsInteger<unsigned char>::value, WTF_IsInteger_unsigned_char_true); + COMPILE_ASSERT(IsInteger<short>::value, WTF_IsInteger_short_true); + COMPILE_ASSERT(IsInteger<unsigned short>::value, WTF_IsInteger_unsigned_short_true); + COMPILE_ASSERT(IsInteger<int>::value, WTF_IsInteger_int_true); + COMPILE_ASSERT(IsInteger<unsigned int>::value, WTF_IsInteger_unsigned_int_true); + COMPILE_ASSERT(IsInteger<long>::value, WTF_IsInteger_long_true); + COMPILE_ASSERT(IsInteger<unsigned long>::value, WTF_IsInteger_unsigned_long_true); + COMPILE_ASSERT(IsInteger<long long>::value, WTF_IsInteger_long_long_true); + COMPILE_ASSERT(IsInteger<unsigned long long>::value, WTF_IsInteger_unsigned_long_long_true); + +#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED) + COMPILE_ASSERT(IsInteger<wchar_t>::value, WTF_IsInteger_wchar_t_true); +#endif + + COMPILE_ASSERT(!IsInteger<char*>::value, WTF_IsInteger_char_pointer_false); + COMPILE_ASSERT(!IsInteger<const char* >::value, WTF_IsInteger_const_char_pointer_false); + COMPILE_ASSERT(!IsInteger<volatile char* >::value, WTF_IsInteger_volatile_char_pointer__false); + COMPILE_ASSERT(!IsInteger<double>::value, WTF_IsInteger_double_false); + COMPILE_ASSERT(!IsInteger<float>::value, WTF_IsInteger_float_false); + + template<typename T> struct HashTraits; + + template<bool isInteger, typename T> struct GenericHashTraitsBase; + + template<typename T> struct GenericHashTraitsBase<false, T> { + static const bool emptyValueIsZero = false; + static const bool needsDestruction = true; + }; + + // Default integer traits disallow both 0 and -1 as keys (max value instead of -1 for unsigned). + template<typename T> struct GenericHashTraitsBase<true, T> { + static const bool emptyValueIsZero = true; + static const bool needsDestruction = false; + static void constructDeletedValue(T& slot) { slot = static_cast<T>(-1); } + static bool isDeletedValue(T value) { return value == static_cast<T>(-1); } + }; + + template<typename T> struct GenericHashTraits : GenericHashTraitsBase<IsInteger<T>::value, T> { + typedef T TraitType; + static T emptyValue() { return T(); } + }; + + template<typename T> struct HashTraits : GenericHashTraits<T> { }; + + template<typename T> struct FloatHashTraits : GenericHashTraits<T> { + static const bool needsDestruction = false; + static T emptyValue() { return std::numeric_limits<T>::infinity(); } + static void constructDeletedValue(T& slot) { slot = -std::numeric_limits<T>::infinity(); } + static bool isDeletedValue(T value) { return value == -std::numeric_limits<T>::infinity(); } + }; + + template<> struct HashTraits<float> : FloatHashTraits<float> { }; + template<> struct HashTraits<double> : FloatHashTraits<double> { }; + + // Default unsigned traits disallow both 0 and max as keys -- use these traits to allow zero and disallow max - 1. + template<typename T> struct UnsignedWithZeroKeyHashTraits : GenericHashTraits<T> { + static const bool emptyValueIsZero = false; + static const bool needsDestruction = false; + static T emptyValue() { return std::numeric_limits<T>::max(); } + static void constructDeletedValue(T& slot) { slot = std::numeric_limits<T>::max() - 1; } + static bool isDeletedValue(T value) { return value == std::numeric_limits<T>::max() - 1; } + }; + + template<typename P> struct HashTraits<P*> : GenericHashTraits<P*> { + static const bool emptyValueIsZero = true; + static const bool needsDestruction = false; + static void constructDeletedValue(P*& slot) { slot = reinterpret_cast<P*>(-1); } + static bool isDeletedValue(P* value) { return value == reinterpret_cast<P*>(-1); } + }; + + template<typename P> struct HashTraits<RefPtr<P> > : GenericHashTraits<RefPtr<P> > { + static const bool emptyValueIsZero = true; + static void constructDeletedValue(RefPtr<P>& slot) { new (&slot) RefPtr<P>(HashTableDeletedValue); } + static bool isDeletedValue(const RefPtr<P>& value) { return value.isHashTableDeletedValue(); } + }; + + // special traits for pairs, helpful for their use in HashMap implementation + + template<typename FirstTraitsArg, typename SecondTraitsArg> + struct PairHashTraits : GenericHashTraits<pair<typename FirstTraitsArg::TraitType, typename SecondTraitsArg::TraitType> > { + typedef FirstTraitsArg FirstTraits; + typedef SecondTraitsArg SecondTraits; + typedef pair<typename FirstTraits::TraitType, typename SecondTraits::TraitType> TraitType; + + static const bool emptyValueIsZero = FirstTraits::emptyValueIsZero && SecondTraits::emptyValueIsZero; + static TraitType emptyValue() { return make_pair(FirstTraits::emptyValue(), SecondTraits::emptyValue()); } + + static const bool needsDestruction = FirstTraits::needsDestruction || SecondTraits::needsDestruction; + + static void constructDeletedValue(TraitType& slot) { FirstTraits::constructDeletedValue(slot.first); } + static bool isDeletedValue(const TraitType& value) { return FirstTraits::isDeletedValue(value.first); } + }; + + template<typename First, typename Second> + struct HashTraits<pair<First, Second> > : public PairHashTraits<HashTraits<First>, HashTraits<Second> > { }; + +} // namespace WTF + +using WTF::HashTraits; +using WTF::PairHashTraits; + +#endif // WTF_HashTraits_h diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/ListHashSet.h b/src/3rdparty/webkit/JavaScriptCore/wtf/ListHashSet.h new file mode 100644 index 0000000..38cc998 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/ListHashSet.h @@ -0,0 +1,616 @@ +/* + * Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_ListHashSet_h +#define WTF_ListHashSet_h + +#include "Assertions.h" +#include "HashSet.h" +#include "OwnPtr.h" + +namespace WTF { + + // ListHashSet: Just like HashSet, this class provides a Set + // interface - a collection of unique objects with O(1) insertion, + // removal and test for containership. However, it also has an + // order - iterating it will always give back values in the order + // in which they are added. + + // In theory it would be possible to add prepend, insertAfter + // and an append that moves the element to the end even if already present, + // but unclear yet if these are needed. + + template<typename Value, typename HashFunctions> class ListHashSet; + + template<typename T> struct IdentityExtractor; + + template<typename Value, typename HashFunctions> + void deleteAllValues(const ListHashSet<Value, HashFunctions>&); + + template<typename ValueArg, typename HashArg> class ListHashSetIterator; + template<typename ValueArg, typename HashArg> class ListHashSetConstIterator; + + template<typename ValueArg> struct ListHashSetNode; + template<typename ValueArg> struct ListHashSetNodeAllocator; + template<typename ValueArg, typename HashArg> struct ListHashSetNodeHashFunctions; + + template<typename ValueArg, typename HashArg = typename DefaultHash<ValueArg>::Hash> class ListHashSet { + private: + typedef ListHashSetNode<ValueArg> Node; + typedef ListHashSetNodeAllocator<ValueArg> NodeAllocator; + + typedef HashTraits<Node*> NodeTraits; + typedef ListHashSetNodeHashFunctions<ValueArg, HashArg> NodeHash; + + typedef HashTable<Node*, Node*, IdentityExtractor<Node*>, NodeHash, NodeTraits, NodeTraits> ImplType; + typedef HashTableIterator<Node*, Node*, IdentityExtractor<Node*>, NodeHash, NodeTraits, NodeTraits> ImplTypeIterator; + typedef HashTableConstIterator<Node*, Node*, IdentityExtractor<Node*>, NodeHash, NodeTraits, NodeTraits> ImplTypeConstIterator; + + typedef HashArg HashFunctions; + + public: + typedef ValueArg ValueType; + typedef ListHashSetIterator<ValueType, HashArg> iterator; + typedef ListHashSetConstIterator<ValueType, HashArg> const_iterator; + + friend class ListHashSetConstIterator<ValueType, HashArg>; + + ListHashSet(); + ListHashSet(const ListHashSet&); + ListHashSet& operator=(const ListHashSet&); + ~ListHashSet(); + + void swap(ListHashSet&); + + int size() const; + int capacity() const; + bool isEmpty() const; + + iterator begin(); + iterator end(); + const_iterator begin() const; + const_iterator end() const; + + iterator find(const ValueType&); + const_iterator find(const ValueType&) const; + bool contains(const ValueType&) const; + + // the return value is a pair of an iterator to the new value's location, + // and a bool that is true if an new entry was added + pair<iterator, bool> add(const ValueType&); + + pair<iterator, bool> insertBefore(const ValueType& beforeValue, const ValueType& newValue); + pair<iterator, bool> insertBefore(iterator it, const ValueType&); + + void remove(const ValueType&); + void remove(iterator); + void clear(); + + private: + void unlinkAndDelete(Node*); + void appendNode(Node*); + void insertNodeBefore(Node* beforeNode, Node* newNode); + void deleteAllNodes(); + iterator makeIterator(Node*); + const_iterator makeConstIterator(Node*) const; + + friend void deleteAllValues<>(const ListHashSet&); + + ImplType m_impl; + Node* m_head; + Node* m_tail; + OwnPtr<NodeAllocator> m_allocator; + }; + + template<typename ValueArg> struct ListHashSetNodeAllocator { + typedef ListHashSetNode<ValueArg> Node; + typedef ListHashSetNodeAllocator<ValueArg> NodeAllocator; + + ListHashSetNodeAllocator() + : m_freeList(pool()) + , m_isDoneWithInitialFreeList(false) + { + memset(m_pool.pool, 0, sizeof(m_pool.pool)); + } + + Node* allocate() + { + Node* result = m_freeList; + + if (!result) + return static_cast<Node*>(fastMalloc(sizeof(Node))); + + ASSERT(!result->m_isAllocated); + + Node* next = result->m_next; + ASSERT(!next || !next->m_isAllocated); + if (!next && !m_isDoneWithInitialFreeList) { + next = result + 1; + if (next == pastPool()) { + m_isDoneWithInitialFreeList = true; + next = 0; + } else { + ASSERT(inPool(next)); + ASSERT(!next->m_isAllocated); + } + } + m_freeList = next; + + return result; + } + + void deallocate(Node* node) + { + if (inPool(node)) { +#ifndef NDEBUG + node->m_isAllocated = false; +#endif + node->m_next = m_freeList; + m_freeList = node; + return; + } + + fastFree(node); + } + + private: + Node* pool() { return reinterpret_cast<Node*>(m_pool.pool); } + Node* pastPool() { return pool() + m_poolSize; } + + bool inPool(Node* node) + { + return node >= pool() && node < pastPool(); + } + + Node* m_freeList; + bool m_isDoneWithInitialFreeList; + static const size_t m_poolSize = 256; + union { + char pool[sizeof(Node) * m_poolSize]; + double forAlignment; + } m_pool; + }; + + template<typename ValueArg> struct ListHashSetNode { + typedef ListHashSetNodeAllocator<ValueArg> NodeAllocator; + + ListHashSetNode(ValueArg value) + : m_value(value) + , m_prev(0) + , m_next(0) +#ifndef NDEBUG + , m_isAllocated(true) +#endif + { + } + + void* operator new(size_t, NodeAllocator* allocator) + { + return allocator->allocate(); + } + void destroy(NodeAllocator* allocator) + { + this->~ListHashSetNode(); + allocator->deallocate(this); + } + + ValueArg m_value; + ListHashSetNode* m_prev; + ListHashSetNode* m_next; + +#ifndef NDEBUG + bool m_isAllocated; +#endif + }; + + template<typename ValueArg, typename HashArg> struct ListHashSetNodeHashFunctions { + typedef ListHashSetNode<ValueArg> Node; + + static unsigned hash(Node* const& key) { return HashArg::hash(key->m_value); } + static bool equal(Node* const& a, Node* const& b) { return HashArg::equal(a->m_value, b->m_value); } + static const bool safeToCompareToEmptyOrDeleted = false; + }; + + template<typename ValueArg, typename HashArg> class ListHashSetIterator { + private: + typedef ListHashSet<ValueArg, HashArg> ListHashSetType; + typedef ListHashSetIterator<ValueArg, HashArg> iterator; + typedef ListHashSetConstIterator<ValueArg, HashArg> const_iterator; + typedef ListHashSetNode<ValueArg> Node; + typedef ValueArg ValueType; + typedef ValueType& ReferenceType; + typedef ValueType* PointerType; + + friend class ListHashSet<ValueArg, HashArg>; + + ListHashSetIterator(const ListHashSetType* set, Node* position) : m_iterator(set, position) { } + + public: + ListHashSetIterator() { } + + // default copy, assignment and destructor are OK + + PointerType get() const { return const_cast<PointerType>(m_iterator.get()); } + ReferenceType operator*() const { return *get(); } + PointerType operator->() const { return get(); } + + iterator& operator++() { ++m_iterator; return *this; } + + // postfix ++ intentionally omitted + + iterator& operator--() { --m_iterator; return *this; } + + // postfix -- intentionally omitted + + // Comparison. + bool operator==(const iterator& other) const { return m_iterator == other.m_iterator; } + bool operator!=(const iterator& other) const { return m_iterator != other.m_iterator; } + + operator const_iterator() const { return m_iterator; } + + private: + Node* node() { return m_iterator.node(); } + + const_iterator m_iterator; + }; + + template<typename ValueArg, typename HashArg> class ListHashSetConstIterator { + private: + typedef ListHashSet<ValueArg, HashArg> ListHashSetType; + typedef ListHashSetIterator<ValueArg, HashArg> iterator; + typedef ListHashSetConstIterator<ValueArg, HashArg> const_iterator; + typedef ListHashSetNode<ValueArg> Node; + typedef ValueArg ValueType; + typedef const ValueType& ReferenceType; + typedef const ValueType* PointerType; + + friend class ListHashSet<ValueArg, HashArg>; + friend class ListHashSetIterator<ValueArg, HashArg>; + + ListHashSetConstIterator(const ListHashSetType* set, Node* position) + : m_set(set) + , m_position(position) + { + } + + public: + ListHashSetConstIterator() + { + } + + PointerType get() const + { + return &m_position->m_value; + } + ReferenceType operator*() const { return *get(); } + PointerType operator->() const { return get(); } + + const_iterator& operator++() + { + ASSERT(m_position != 0); + m_position = m_position->m_next; + return *this; + } + + // postfix ++ intentionally omitted + + const_iterator& operator--() + { + ASSERT(m_position != m_set->m_head); + if (!m_position) + m_position = m_set->m_tail; + else + m_position = m_position->m_prev; + return *this; + } + + // postfix -- intentionally omitted + + // Comparison. + bool operator==(const const_iterator& other) const + { + return m_position == other.m_position; + } + bool operator!=(const const_iterator& other) const + { + return m_position != other.m_position; + } + + private: + Node* node() { return m_position; } + + const ListHashSetType* m_set; + Node* m_position; + }; + + + template<typename ValueType, typename HashFunctions> + struct ListHashSetTranslator { + private: + typedef ListHashSetNode<ValueType> Node; + typedef ListHashSetNodeAllocator<ValueType> NodeAllocator; + public: + static unsigned hash(const ValueType& key) { return HashFunctions::hash(key); } + static bool equal(Node* const& a, const ValueType& b) { return HashFunctions::equal(a->m_value, b); } + static void translate(Node*& location, const ValueType& key, NodeAllocator* allocator) + { + location = new (allocator) Node(key); + } + }; + + template<typename T, typename U> + inline ListHashSet<T, U>::ListHashSet() + : m_head(0) + , m_tail(0) + , m_allocator(new NodeAllocator) + { + } + + template<typename T, typename U> + inline ListHashSet<T, U>::ListHashSet(const ListHashSet& other) + : m_head(0) + , m_tail(0) + , m_allocator(new NodeAllocator) + { + const_iterator end = other.end(); + for (const_iterator it = other.begin(); it != end; ++it) + add(*it); + } + + template<typename T, typename U> + inline ListHashSet<T, U>& ListHashSet<T, U>::operator=(const ListHashSet& other) + { + ListHashSet tmp(other); + swap(tmp); + return *this; + } + + template<typename T, typename U> + inline void ListHashSet<T, U>::swap(ListHashSet& other) + { + m_impl.swap(other.m_impl); + std::swap(m_head, other.m_head); + std::swap(m_tail, other.m_tail); + m_allocator.swap(other.m_allocator); + } + + template<typename T, typename U> + inline ListHashSet<T, U>::~ListHashSet() + { + deleteAllNodes(); + } + + template<typename T, typename U> + inline int ListHashSet<T, U>::size() const + { + return m_impl.size(); + } + + template<typename T, typename U> + inline int ListHashSet<T, U>::capacity() const + { + return m_impl.capacity(); + } + + template<typename T, typename U> + inline bool ListHashSet<T, U>::isEmpty() const + { + return m_impl.isEmpty(); + } + + template<typename T, typename U> + inline typename ListHashSet<T, U>::iterator ListHashSet<T, U>::begin() + { + return makeIterator(m_head); + } + + template<typename T, typename U> + inline typename ListHashSet<T, U>::iterator ListHashSet<T, U>::end() + { + return makeIterator(0); + } + + template<typename T, typename U> + inline typename ListHashSet<T, U>::const_iterator ListHashSet<T, U>::begin() const + { + return makeConstIterator(m_head); + } + + template<typename T, typename U> + inline typename ListHashSet<T, U>::const_iterator ListHashSet<T, U>::end() const + { + return makeConstIterator(0); + } + + template<typename T, typename U> + inline typename ListHashSet<T, U>::iterator ListHashSet<T, U>::find(const ValueType& value) + { + typedef ListHashSetTranslator<ValueType, HashFunctions> Translator; + ImplTypeIterator it = m_impl.template find<ValueType, Translator>(value); + if (it == m_impl.end()) + return end(); + return makeIterator(*it); + } + + template<typename T, typename U> + inline typename ListHashSet<T, U>::const_iterator ListHashSet<T, U>::find(const ValueType& value) const + { + typedef ListHashSetTranslator<ValueType, HashFunctions> Translator; + ImplTypeConstIterator it = m_impl.template find<ValueType, Translator>(value); + if (it == m_impl.end()) + return end(); + return makeConstIterator(*it); + } + + template<typename T, typename U> + inline bool ListHashSet<T, U>::contains(const ValueType& value) const + { + typedef ListHashSetTranslator<ValueType, HashFunctions> Translator; + return m_impl.template contains<ValueType, Translator>(value); + } + + template<typename T, typename U> + pair<typename ListHashSet<T, U>::iterator, bool> ListHashSet<T, U>::add(const ValueType &value) + { + typedef ListHashSetTranslator<ValueType, HashFunctions> Translator; + pair<typename ImplType::iterator, bool> result = m_impl.template add<ValueType, NodeAllocator*, Translator>(value, m_allocator.get()); + if (result.second) + appendNode(*result.first); + return std::make_pair(makeIterator(*result.first), result.second); + } + + template<typename T, typename U> + pair<typename ListHashSet<T, U>::iterator, bool> ListHashSet<T, U>::insertBefore(iterator it, const ValueType& newValue) + { + typedef ListHashSetTranslator<ValueType, HashFunctions> Translator; + pair<typename ImplType::iterator, bool> result = m_impl.template add<ValueType, NodeAllocator*, Translator>(newValue, m_allocator.get()); + if (result.second) + insertNodeBefore(it.node(), *result.first); + return std::make_pair(makeIterator(*result.first), result.second); + + } + + template<typename T, typename U> + pair<typename ListHashSet<T, U>::iterator, bool> ListHashSet<T, U>::insertBefore(const ValueType& beforeValue, const ValueType& newValue) + { + return insertBefore(find(beforeValue), newValue); + } + + template<typename T, typename U> + inline void ListHashSet<T, U>::remove(iterator it) + { + if (it == end()) + return; + m_impl.remove(it.node()); + unlinkAndDelete(it.node()); + } + + template<typename T, typename U> + inline void ListHashSet<T, U>::remove(const ValueType& value) + { + remove(find(value)); + } + + template<typename T, typename U> + inline void ListHashSet<T, U>::clear() + { + deleteAllNodes(); + m_impl.clear(); + m_head = 0; + m_tail = 0; + } + + template<typename T, typename U> + void ListHashSet<T, U>::unlinkAndDelete(Node* node) + { + if (!node->m_prev) { + ASSERT(node == m_head); + m_head = node->m_next; + } else { + ASSERT(node != m_head); + node->m_prev->m_next = node->m_next; + } + + if (!node->m_next) { + ASSERT(node == m_tail); + m_tail = node->m_prev; + } else { + ASSERT(node != m_tail); + node->m_next->m_prev = node->m_prev; + } + + node->destroy(m_allocator.get()); + } + + template<typename T, typename U> + void ListHashSet<T, U>::appendNode(Node* node) + { + node->m_prev = m_tail; + node->m_next = 0; + + if (m_tail) { + ASSERT(m_head); + m_tail->m_next = node; + } else { + ASSERT(!m_head); + m_head = node; + } + + m_tail = node; + } + + template<typename T, typename U> + void ListHashSet<T, U>::insertNodeBefore(Node* beforeNode, Node* newNode) + { + if (!beforeNode) + return appendNode(newNode); + + newNode->m_next = beforeNode; + newNode->m_prev = beforeNode->m_prev; + if (beforeNode->m_prev) + beforeNode->m_prev->m_next = newNode; + beforeNode->m_prev = newNode; + + if (!newNode->m_prev) + m_head = newNode; + } + + template<typename T, typename U> + void ListHashSet<T, U>::deleteAllNodes() + { + if (!m_head) + return; + + for (Node* node = m_head, *next = m_head->m_next; node; node = next, next = node ? node->m_next : 0) + node->destroy(m_allocator.get()); + } + + template<typename T, typename U> + inline ListHashSetIterator<T, U> ListHashSet<T, U>::makeIterator(Node* position) + { + return ListHashSetIterator<T, U>(this, position); + } + + template<typename T, typename U> + inline ListHashSetConstIterator<T, U> ListHashSet<T, U>::makeConstIterator(Node* position) const + { + return ListHashSetConstIterator<T, U>(this, position); + } + + template<bool, typename ValueType, typename HashTableType> + void deleteAllValues(HashTableType& collection) + { + typedef typename HashTableType::const_iterator iterator; + iterator end = collection.end(); + for (iterator it = collection.begin(); it != end; ++it) + delete (*it)->m_value; + } + + template<typename T, typename U> + inline void deleteAllValues(const ListHashSet<T, U>& collection) + { + deleteAllValues<true, typename ListHashSet<T, U>::ValueType>(collection.m_impl); + } + +} // namespace WTF + +using WTF::ListHashSet; + +#endif /* WTF_ListHashSet_h */ diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/ListRefPtr.h b/src/3rdparty/webkit/JavaScriptCore/wtf/ListRefPtr.h new file mode 100644 index 0000000..9f9a354 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/ListRefPtr.h @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2005, 2006, 2008 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_ListRefPtr_h +#define WTF_ListRefPtr_h + +#include <wtf/RefPtr.h> + +namespace WTF { + + // Specialized version of RefPtr desgined for use in singly-linked lists. + // Derefs the list iteratively to avoid recursive derefing that can overflow the stack. + template <typename T> class ListRefPtr : public RefPtr<T> { + public: + ListRefPtr() : RefPtr<T>() {} + ListRefPtr(T* ptr) : RefPtr<T>(ptr) {} + ListRefPtr(const RefPtr<T>& o) : RefPtr<T>(o) {} + // see comment in PassRefPtr.h for why this takes const reference + template <typename U> ListRefPtr(const PassRefPtr<U>& o) : RefPtr<T>(o) {} + + ~ListRefPtr() + { + RefPtr<T> reaper = this->release(); + while (reaper && reaper->hasOneRef()) + reaper = reaper->releaseNext(); // implicitly protects reaper->next, then derefs reaper + } + + ListRefPtr& operator=(T* optr) { RefPtr<T>::operator=(optr); return *this; } + ListRefPtr& operator=(const RefPtr<T>& o) { RefPtr<T>::operator=(o); return *this; } + ListRefPtr& operator=(const PassRefPtr<T>& o) { RefPtr<T>::operator=(o); return *this; } + template <typename U> ListRefPtr& operator=(const RefPtr<U>& o) { RefPtr<T>::operator=(o); return *this; } + template <typename U> ListRefPtr& operator=(const PassRefPtr<U>& o) { RefPtr<T>::operator=(o); return *this; } + }; + + template <typename T> inline T* getPtr(const ListRefPtr<T>& p) + { + return p.get(); + } + +} // namespace WTF + +using WTF::ListRefPtr; + +#endif // WTF_ListRefPtr_h diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/Locker.h b/src/3rdparty/webkit/JavaScriptCore/wtf/Locker.h new file mode 100644 index 0000000..9feec1f --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/Locker.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef Locker_h +#define Locker_h + +#include <wtf/Noncopyable.h> + +namespace WTF { + +template <typename T> class Locker : Noncopyable { +public: + Locker(T& lockable) : m_lockable(lockable) { m_lockable.lock(); } + ~Locker() { m_lockable.unlock(); } +private: + T& m_lockable; +}; + +} + +using WTF::Locker; + +#endif diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/MainThread.cpp b/src/3rdparty/webkit/JavaScriptCore/wtf/MainThread.cpp new file mode 100644 index 0000000..c7a6caa --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/MainThread.cpp @@ -0,0 +1,142 @@ +/* + * Copyright (C) 2007, 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "MainThread.h" + +#include "StdLibExtras.h" +#include "Threading.h" +#include "Vector.h" + +namespace WTF { + +struct FunctionWithContext { + MainThreadFunction* function; + void* context; + ThreadCondition* syncFlag; + + FunctionWithContext(MainThreadFunction* function = 0, void* context = 0, ThreadCondition* syncFlag = 0) + : function(function) + , context(context) + , syncFlag(syncFlag) + { + } +}; + +typedef Vector<FunctionWithContext> FunctionQueue; + +static bool callbacksPaused; // This global variable is only accessed from main thread. + +Mutex& mainThreadFunctionQueueMutex() +{ + DEFINE_STATIC_LOCAL(Mutex, staticMutex, ()); + return staticMutex; +} + +static FunctionQueue& functionQueue() +{ + DEFINE_STATIC_LOCAL(FunctionQueue, staticFunctionQueue, ()); + return staticFunctionQueue; +} + +#if !PLATFORM(WIN) +void initializeMainThread() +{ + mainThreadFunctionQueueMutex(); +} +#endif + +void dispatchFunctionsFromMainThread() +{ + ASSERT(isMainThread()); + + if (callbacksPaused) + return; + + FunctionQueue queueCopy; + { + MutexLocker locker(mainThreadFunctionQueueMutex()); + queueCopy.swap(functionQueue()); + } + + for (unsigned i = 0; i < queueCopy.size(); ++i) { + FunctionWithContext& invocation = queueCopy[i]; + invocation.function(invocation.context); + if (invocation.syncFlag) + invocation.syncFlag->signal(); + } +} + +void callOnMainThread(MainThreadFunction* function, void* context) +{ + ASSERT(function); + + { + MutexLocker locker(mainThreadFunctionQueueMutex()); + functionQueue().append(FunctionWithContext(function, context)); + } + + scheduleDispatchFunctionsOnMainThread(); +} + +void callOnMainThreadAndWait(MainThreadFunction* function, void* context) +{ + ASSERT(function); + + if (isMainThread()) { + function(context); + return; + } + + ThreadCondition syncFlag; + Mutex conditionMutex; + + { + MutexLocker locker(mainThreadFunctionQueueMutex()); + functionQueue().append(FunctionWithContext(function, context, &syncFlag)); + conditionMutex.lock(); + } + + scheduleDispatchFunctionsOnMainThread(); + syncFlag.wait(conditionMutex); +} + +void setMainThreadCallbacksPaused(bool paused) +{ + ASSERT(isMainThread()); + + if (callbacksPaused == paused) + return; + + callbacksPaused = paused; + + if (!callbacksPaused) + scheduleDispatchFunctionsOnMainThread(); +} + +} // namespace WTF diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/MainThread.h b/src/3rdparty/webkit/JavaScriptCore/wtf/MainThread.h new file mode 100644 index 0000000..953b986 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/MainThread.h @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2007, 2008 Apple Inc. All rights reserved. + * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef MainThread_h +#define MainThread_h + +namespace WTF { + +class Mutex; + +typedef void MainThreadFunction(void*); + +void callOnMainThread(MainThreadFunction*, void* context); +void callOnMainThreadAndWait(MainThreadFunction*, void* context); + +void setMainThreadCallbacksPaused(bool paused); + +// Must be called from the main thread (Darwin is an exception to this rule). +void initializeMainThread(); + +// These functions are internal to the callOnMainThread implementation. +void dispatchFunctionsFromMainThread(); +void scheduleDispatchFunctionsOnMainThread(); +Mutex& mainThreadFunctionQueueMutex(); + +} // namespace WTF + +using WTF::callOnMainThread; +using WTF::setMainThreadCallbacksPaused; + +#endif // MainThread_h diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/MallocZoneSupport.h b/src/3rdparty/webkit/JavaScriptCore/wtf/MallocZoneSupport.h new file mode 100644 index 0000000..62df145 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/MallocZoneSupport.h @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2007 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef MallocZoneSupport_h +#define MallocZoneSupport_h + +#include <malloc/malloc.h> + +namespace WTF { + +class RemoteMemoryReader { + task_t m_task; + memory_reader_t* m_reader; + +public: + RemoteMemoryReader(task_t task, memory_reader_t* reader) + : m_task(task) + , m_reader(reader) + { } + + void* operator()(vm_address_t address, size_t size) const + { + void* output; + kern_return_t err = (*m_reader)(m_task, address, size, static_cast<void**>(&output)); + ASSERT(!err); + if (err) + output = 0; + return output; + } + + template <typename T> + T* operator()(T* address, size_t size=sizeof(T)) const + { + return static_cast<T*>((*this)(reinterpret_cast<vm_address_t>(address), size)); + } +}; + +} // namespace WTF + +#endif // MallocZoneSupport_h diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/MathExtras.h b/src/3rdparty/webkit/JavaScriptCore/wtf/MathExtras.h new file mode 100644 index 0000000..76488b4 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/MathExtras.h @@ -0,0 +1,180 @@ +/* + * Copyright (C) 2006, 2007, 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef WTF_MathExtras_h +#define WTF_MathExtras_h + +#include <math.h> +#include <stdlib.h> + +#if PLATFORM(SOLARIS) +#include <ieeefp.h> +#endif + +#if PLATFORM(OPENBSD) +#include <sys/types.h> +#include <machine/ieee.h> +#endif + +#if COMPILER(MSVC) +#if PLATFORM(WIN_CE) +#include <stdlib.h> +#else +#include <xmath.h> +#endif +#include <limits> + +#if HAVE(FLOAT_H) +#include <float.h> +#endif + +#endif + +#ifndef M_PI +const double piDouble = 3.14159265358979323846; +const float piFloat = 3.14159265358979323846f; +#else +const double piDouble = M_PI; +const float piFloat = static_cast<float>(M_PI); +#endif + +#ifndef M_PI_4 +const double piOverFourDouble = 0.785398163397448309616; +const float piOverFourFloat = 0.785398163397448309616f; +#else +const double piOverFourDouble = M_PI_4; +const float piOverFourFloat = static_cast<float>(M_PI_4); +#endif + +#if PLATFORM(DARWIN) + +// Work around a bug in the Mac OS X libc where ceil(-0.1) return +0. +inline double wtf_ceil(double x) { return copysign(ceil(x), x); } + +#define ceil(x) wtf_ceil(x) + +#endif + +#if PLATFORM(SOLARIS) + +#ifndef isfinite +inline bool isfinite(double x) { return finite(x) && !isnand(x); } +#endif +#ifndef isinf +inline bool isinf(double x) { return !finite(x) && !isnand(x); } +#endif +#ifndef signbit +inline bool signbit(double x) { return x < 0.0; } // FIXME: Wrong for negative 0. +#endif + +#endif + +#if PLATFORM(OPENBSD) + +#ifndef isfinite +inline bool isfinite(double x) { return finite(x); } +#endif +#ifndef signbit +inline bool signbit(double x) { struct ieee_double *p = (struct ieee_double *)&x; return p->dbl_sign; } +#endif + +#endif + +#if COMPILER(MSVC) || COMPILER(RVCT) + +inline long lround(double num) { return static_cast<long>(num > 0 ? num + 0.5 : ceil(num - 0.5)); } +inline long lroundf(float num) { return static_cast<long>(num > 0 ? num + 0.5f : ceilf(num - 0.5f)); } +inline double round(double num) { return num > 0 ? floor(num + 0.5) : ceil(num - 0.5); } +inline float roundf(float num) { return num > 0 ? floorf(num + 0.5f) : ceilf(num - 0.5f); } +inline double trunc(double num) { return num > 0 ? floor(num) : ceil(num); } + +#endif + +#if COMPILER(MSVC) + +inline bool isinf(double num) { return !_finite(num) && !_isnan(num); } +inline bool isnan(double num) { return !!_isnan(num); } +inline bool signbit(double num) { return _copysign(1.0, num) < 0; } + +inline double nextafter(double x, double y) { return _nextafter(x, y); } +inline float nextafterf(float x, float y) { return x > y ? x - FLT_EPSILON : x + FLT_EPSILON; } + +inline double copysign(double x, double y) { return _copysign(x, y); } +inline int isfinite(double x) { return _finite(x); } + +// Work around a bug in Win, where atan2(+-infinity, +-infinity) yields NaN instead of specific values. +inline double wtf_atan2(double x, double y) +{ + double posInf = std::numeric_limits<double>::infinity(); + double negInf = -std::numeric_limits<double>::infinity(); + double nan = std::numeric_limits<double>::quiet_NaN(); + + double result = nan; + + if (x == posInf && y == posInf) + result = piOverFourDouble; + else if (x == posInf && y == negInf) + result = 3 * piOverFourDouble; + else if (x == negInf && y == posInf) + result = -piOverFourDouble; + else if (x == negInf && y == negInf) + result = -3 * piOverFourDouble; + else + result = ::atan2(x, y); + + return result; +} + +// Work around a bug in the Microsoft CRT, where fmod(x, +-infinity) yields NaN instead of x. +inline double wtf_fmod(double x, double y) { return (!isinf(x) && isinf(y)) ? x : fmod(x, y); } + +// Work around a bug in the Microsoft CRT, where pow(NaN, 0) yields NaN instead of 1. +inline double wtf_pow(double x, double y) { return y == 0 ? 1 : pow(x, y); } + +#define atan2(x, y) wtf_atan2(x, y) +#define fmod(x, y) wtf_fmod(x, y) +#define pow(x, y) wtf_pow(x, y) + +#endif // COMPILER(MSVC) + +inline double deg2rad(double d) { return d * piDouble / 180.0; } +inline double rad2deg(double r) { return r * 180.0 / piDouble; } +inline double deg2grad(double d) { return d * 400.0 / 360.0; } +inline double grad2deg(double g) { return g * 360.0 / 400.0; } +inline double turn2deg(double t) { return t * 360.0; } +inline double deg2turn(double d) { return d / 360.0; } +inline double rad2grad(double r) { return r * 200.0 / piDouble; } +inline double grad2rad(double g) { return g * piDouble / 200.0; } + +inline float deg2rad(float d) { return d * piFloat / 180.0f; } +inline float rad2deg(float r) { return r * 180.0f / piFloat; } +inline float deg2grad(float d) { return d * 400.0f / 360.0f; } +inline float grad2deg(float g) { return g * 360.0f / 400.0f; } +inline float turn2deg(float t) { return t * 360.0f; } +inline float deg2turn(float d) { return d / 360.0f; } +inline float rad2grad(float r) { return r * 200.0f / piFloat; } +inline float grad2rad(float g) { return g * piFloat / 200.0f; } + +#endif // #ifndef WTF_MathExtras_h diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/MessageQueue.h b/src/3rdparty/webkit/JavaScriptCore/wtf/MessageQueue.h new file mode 100644 index 0000000..481211d --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/MessageQueue.h @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef MessageQueue_h +#define MessageQueue_h + +#include <wtf/Assertions.h> +#include <wtf/Deque.h> +#include <wtf/Noncopyable.h> +#include <wtf/Threading.h> + +namespace WTF { + + template<typename DataType> + class MessageQueue : Noncopyable { + public: + MessageQueue() : m_killed(false) {} + + void append(const DataType&); + void prepend(const DataType&); + bool waitForMessage(DataType&); + void kill(); + + bool tryGetMessage(DataType&); + bool killed() const; + + // The result of isEmpty() is only valid if no other thread is manipulating the queue at the same time. + bool isEmpty(); + + private: + mutable Mutex m_mutex; + ThreadCondition m_condition; + Deque<DataType> m_queue; + bool m_killed; + }; + + template<typename DataType> + inline void MessageQueue<DataType>::append(const DataType& message) + { + MutexLocker lock(m_mutex); + m_queue.append(message); + m_condition.signal(); + } + + template<typename DataType> + inline void MessageQueue<DataType>::prepend(const DataType& message) + { + MutexLocker lock(m_mutex); + m_queue.prepend(message); + m_condition.signal(); + } + + template<typename DataType> + inline bool MessageQueue<DataType>::waitForMessage(DataType& result) + { + MutexLocker lock(m_mutex); + + while (!m_killed && m_queue.isEmpty()) + m_condition.wait(m_mutex); + + if (m_killed) + return false; + + ASSERT(!m_queue.isEmpty()); + result = m_queue.first(); + m_queue.removeFirst(); + return true; + } + + template<typename DataType> + inline bool MessageQueue<DataType>::tryGetMessage(DataType& result) + { + MutexLocker lock(m_mutex); + if (m_killed) + return false; + if (m_queue.isEmpty()) + return false; + + result = m_queue.first(); + m_queue.removeFirst(); + return true; + } + + template<typename DataType> + inline bool MessageQueue<DataType>::isEmpty() + { + MutexLocker lock(m_mutex); + if (m_killed) + return true; + return m_queue.isEmpty(); + } + + template<typename DataType> + inline void MessageQueue<DataType>::kill() + { + MutexLocker lock(m_mutex); + m_killed = true; + m_condition.broadcast(); + } + + template<typename DataType> + inline bool MessageQueue<DataType>::killed() const + { + MutexLocker lock(m_mutex); + return m_killed; + } +} + +using WTF::MessageQueue; + +#endif // MessageQueue_h diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/Noncopyable.h b/src/3rdparty/webkit/JavaScriptCore/wtf/Noncopyable.h new file mode 100644 index 0000000..f241c7c --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/Noncopyable.h @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2006 Apple Computer, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_Noncopyable_h +#define WTF_Noncopyable_h + +// We don't want argument-dependent lookup to pull in everything from the WTF +// namespace when you use Noncopyable, so put it in its own namespace. + +namespace WTFNoncopyable { + + class Noncopyable { + Noncopyable(const Noncopyable&); + Noncopyable& operator=(const Noncopyable&); + protected: + Noncopyable() { } + ~Noncopyable() { } + }; + +} // namespace WTFNoncopyable + +using WTFNoncopyable::Noncopyable; + +#endif // WTF_Noncopyable_h diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/NotFound.h b/src/3rdparty/webkit/JavaScriptCore/wtf/NotFound.h new file mode 100644 index 0000000..f0bb866 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/NotFound.h @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2008 Apple Inc. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef NotFound_h +#define NotFound_h + +namespace WTF { + + const size_t notFound = static_cast<size_t>(-1); + +} // namespace WTF + +#endif // NotFound_h diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/OwnArrayPtr.h b/src/3rdparty/webkit/JavaScriptCore/wtf/OwnArrayPtr.h new file mode 100644 index 0000000..344f813 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/OwnArrayPtr.h @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2006 Apple Computer, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_OwnArrayPtr_h +#define WTF_OwnArrayPtr_h + +#include <algorithm> +#include <wtf/Assertions.h> +#include <wtf/Noncopyable.h> + +namespace WTF { + + template <typename T> class OwnArrayPtr : Noncopyable { + public: + explicit OwnArrayPtr(T* ptr = 0) : m_ptr(ptr) { } + ~OwnArrayPtr() { safeDelete(); } + + T* get() const { return m_ptr; } + T* release() { T* ptr = m_ptr; m_ptr = 0; return ptr; } + + void set(T* ptr) { ASSERT(m_ptr != ptr); safeDelete(); m_ptr = ptr; } + void clear() { safeDelete(); m_ptr = 0; } + + T& operator*() const { ASSERT(m_ptr); return *m_ptr; } + T* operator->() const { ASSERT(m_ptr); return m_ptr; } + + T& operator[](std::ptrdiff_t i) const { ASSERT(m_ptr); ASSERT(i >= 0); return m_ptr[i]; } + + bool operator!() const { return !m_ptr; } + + // This conversion operator allows implicit conversion to bool but not to other integer types. + typedef T* OwnArrayPtr::*UnspecifiedBoolType; + operator UnspecifiedBoolType() const { return m_ptr ? &OwnArrayPtr::m_ptr : 0; } + + void swap(OwnArrayPtr& o) { std::swap(m_ptr, o.m_ptr); } + + private: + void safeDelete() { typedef char known[sizeof(T) ? 1 : -1]; if (sizeof(known)) delete [] m_ptr; } + + T* m_ptr; + }; + + template <typename T> inline void swap(OwnArrayPtr<T>& a, OwnArrayPtr<T>& b) { a.swap(b); } + + template <typename T> inline T* getPtr(const OwnArrayPtr<T>& p) + { + return p.get(); + } + +} // namespace WTF + +using WTF::OwnArrayPtr; + +#endif // WTF_OwnArrayPtr_h diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/OwnPtr.h b/src/3rdparty/webkit/JavaScriptCore/wtf/OwnPtr.h new file mode 100644 index 0000000..256b55c --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/OwnPtr.h @@ -0,0 +1,129 @@ +/* + * Copyright (C) 2006, 2007 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_OwnPtr_h +#define WTF_OwnPtr_h + +#include <algorithm> +#include <memory> +#include <wtf/Assertions.h> +#include <wtf/Noncopyable.h> + +#if PLATFORM(WIN) + +typedef struct HBITMAP__* HBITMAP; +typedef struct HBRUSH__* HBRUSH; +typedef struct HFONT__* HFONT; +typedef struct HPALETTE__* HPALETTE; +typedef struct HPEN__* HPEN; +typedef struct HRGN__* HRGN; + +#endif + +namespace WTF { + + // Unlike most of our smart pointers, OwnPtr can take either the pointer type or the pointed-to type. + + // FIXME: Share a single RemovePointer class template with RetainPtr. + template <typename T> struct OwnPtrRemovePointer { typedef T type; }; + template <typename T> struct OwnPtrRemovePointer<T*> { typedef T type; }; + + template <typename T> inline void deleteOwnedPtr(T* ptr) + { + typedef char known[sizeof(T) ? 1 : -1]; + if (sizeof(known)) + delete ptr; + } + +#if PLATFORM(WIN) + void deleteOwnedPtr(HBITMAP); + void deleteOwnedPtr(HBRUSH); + void deleteOwnedPtr(HFONT); + void deleteOwnedPtr(HPALETTE); + void deleteOwnedPtr(HPEN); + void deleteOwnedPtr(HRGN); +#endif + + template <typename T> class OwnPtr : Noncopyable { + public: + typedef typename OwnPtrRemovePointer<T>::type ValueType; + typedef ValueType* PtrType; + + explicit OwnPtr(PtrType ptr = 0) : m_ptr(ptr) { } + OwnPtr(std::auto_ptr<ValueType> autoPtr) : m_ptr(autoPtr.release()) { } + ~OwnPtr() { deleteOwnedPtr(m_ptr); } + + PtrType get() const { return m_ptr; } + PtrType release() { PtrType ptr = m_ptr; m_ptr = 0; return ptr; } + + // FIXME: This should be renamed to adopt. + void set(PtrType ptr) { ASSERT(!ptr || m_ptr != ptr); deleteOwnedPtr(m_ptr); m_ptr = ptr; } + + void adopt(std::auto_ptr<ValueType> autoPtr) { ASSERT(!autoPtr.get() || m_ptr != autoPtr.get()); deleteOwnedPtr(m_ptr); m_ptr = autoPtr.release(); } + + void clear() { deleteOwnedPtr(m_ptr); m_ptr = 0; } + + ValueType& operator*() const { ASSERT(m_ptr); return *m_ptr; } + PtrType operator->() const { ASSERT(m_ptr); return m_ptr; } + + bool operator!() const { return !m_ptr; } + + // This conversion operator allows implicit conversion to bool but not to other integer types. + typedef PtrType OwnPtr::*UnspecifiedBoolType; + operator UnspecifiedBoolType() const { return m_ptr ? &OwnPtr::m_ptr : 0; } + + void swap(OwnPtr& o) { std::swap(m_ptr, o.m_ptr); } + + private: + PtrType m_ptr; + }; + + template <typename T> inline void swap(OwnPtr<T>& a, OwnPtr<T>& b) { a.swap(b); } + + template <typename T, typename U> inline bool operator==(const OwnPtr<T>& a, U* b) + { + return a.get() == b; + } + + template <typename T, typename U> inline bool operator==(T* a, const OwnPtr<U>& b) + { + return a == b.get(); + } + + template <typename T, typename U> inline bool operator!=(const OwnPtr<T>& a, U* b) + { + return a.get() != b; + } + + template <typename T, typename U> inline bool operator!=(T* a, const OwnPtr<U>& b) + { + return a != b.get(); + } + + template <typename T> inline typename OwnPtr<T>::PtrType getPtr(const OwnPtr<T>& p) + { + return p.get(); + } + +} // namespace WTF + +using WTF::OwnPtr; + +#endif // WTF_OwnPtr_h diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/OwnPtrWin.cpp b/src/3rdparty/webkit/JavaScriptCore/wtf/OwnPtrWin.cpp new file mode 100644 index 0000000..b08d7dc --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/OwnPtrWin.cpp @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2007 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "OwnPtr.h" + +#include <windows.h> + +namespace WTF { + +void deleteOwnedPtr(HBITMAP ptr) +{ + if (ptr) + DeleteObject(ptr); +} + +void deleteOwnedPtr(HBRUSH ptr) +{ + if (ptr) + DeleteObject(ptr); +} + +void deleteOwnedPtr(HFONT ptr) +{ + if (ptr) + DeleteObject(ptr); +} + +void deleteOwnedPtr(HPALETTE ptr) +{ + if (ptr) + DeleteObject(ptr); +} + +void deleteOwnedPtr(HPEN ptr) +{ + if (ptr) + DeleteObject(ptr); +} + +void deleteOwnedPtr(HRGN ptr) +{ + if (ptr) + DeleteObject(ptr); +} + +} diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/PassRefPtr.h b/src/3rdparty/webkit/JavaScriptCore/wtf/PassRefPtr.h new file mode 100644 index 0000000..c1dc309 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/PassRefPtr.h @@ -0,0 +1,195 @@ +/* + * Copyright (C) 2005, 2006, 2007 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_PassRefPtr_h +#define WTF_PassRefPtr_h + +#include "AlwaysInline.h" + +namespace WTF { + + template<typename T> class RefPtr; + template<typename T> class PassRefPtr; + template <typename T> PassRefPtr<T> adoptRef(T*); + + template<typename T> class PassRefPtr { + public: + PassRefPtr() : m_ptr(0) {} + PassRefPtr(T* ptr) : m_ptr(ptr) { if (ptr) ptr->ref(); } + // It somewhat breaks the type system to allow transfer of ownership out of + // a const PassRefPtr. However, it makes it much easier to work with PassRefPtr + // temporaries, and we don't really have a need to use real const PassRefPtrs + // anyway. + PassRefPtr(const PassRefPtr& o) : m_ptr(o.releaseRef()) {} + template <typename U> PassRefPtr(const PassRefPtr<U>& o) : m_ptr(o.releaseRef()) { } + + ALWAYS_INLINE ~PassRefPtr() { if (T* ptr = m_ptr) ptr->deref(); } + + template <class U> + PassRefPtr(const RefPtr<U>& o) : m_ptr(o.get()) { if (T* ptr = m_ptr) ptr->ref(); } + + T* get() const { return m_ptr; } + + void clear() { if (T* ptr = m_ptr) ptr->deref(); m_ptr = 0; } + T* releaseRef() const { T* tmp = m_ptr; m_ptr = 0; return tmp; } + + T& operator*() const { return *m_ptr; } + T* operator->() const { return m_ptr; } + + bool operator!() const { return !m_ptr; } + + // This conversion operator allows implicit conversion to bool but not to other integer types. +#if COMPILER(WINSCW) + operator bool() const { return m_ptr; } +#else + typedef T* PassRefPtr::*UnspecifiedBoolType; + operator UnspecifiedBoolType() const { return m_ptr ? &PassRefPtr::m_ptr : 0; } +#endif + PassRefPtr& operator=(T*); + PassRefPtr& operator=(const PassRefPtr&); + template <typename U> PassRefPtr& operator=(const PassRefPtr<U>&); + template <typename U> PassRefPtr& operator=(const RefPtr<U>&); + + friend PassRefPtr adoptRef<T>(T*); + private: + // adopting constructor + PassRefPtr(T* ptr, bool) : m_ptr(ptr) {} + mutable T* m_ptr; + }; + + template <typename T> template <typename U> inline PassRefPtr<T>& PassRefPtr<T>::operator=(const RefPtr<U>& o) + { + T* optr = o.get(); + if (optr) + optr->ref(); + T* ptr = m_ptr; + m_ptr = optr; + if (ptr) + ptr->deref(); + return *this; + } + + template <typename T> inline PassRefPtr<T>& PassRefPtr<T>::operator=(T* optr) + { + if (optr) + optr->ref(); + T* ptr = m_ptr; + m_ptr = optr; + if (ptr) + ptr->deref(); + return *this; + } + + template <typename T> inline PassRefPtr<T>& PassRefPtr<T>::operator=(const PassRefPtr<T>& ref) + { + T* ptr = m_ptr; + m_ptr = ref.releaseRef(); + if (ptr) + ptr->deref(); + return *this; + } + + template <typename T> template <typename U> inline PassRefPtr<T>& PassRefPtr<T>::operator=(const PassRefPtr<U>& ref) + { + T* ptr = m_ptr; + m_ptr = ref.releaseRef(); + if (ptr) + ptr->deref(); + return *this; + } + + template <typename T, typename U> inline bool operator==(const PassRefPtr<T>& a, const PassRefPtr<U>& b) + { + return a.get() == b.get(); + } + + template <typename T, typename U> inline bool operator==(const PassRefPtr<T>& a, const RefPtr<U>& b) + { + return a.get() == b.get(); + } + + template <typename T, typename U> inline bool operator==(const RefPtr<T>& a, const PassRefPtr<U>& b) + { + return a.get() == b.get(); + } + + template <typename T, typename U> inline bool operator==(const PassRefPtr<T>& a, U* b) + { + return a.get() == b; + } + + template <typename T, typename U> inline bool operator==(T* a, const PassRefPtr<U>& b) + { + return a == b.get(); + } + + template <typename T, typename U> inline bool operator!=(const PassRefPtr<T>& a, const PassRefPtr<U>& b) + { + return a.get() != b.get(); + } + + template <typename T, typename U> inline bool operator!=(const PassRefPtr<T>& a, const RefPtr<U>& b) + { + return a.get() != b.get(); + } + + template <typename T, typename U> inline bool operator!=(const RefPtr<T>& a, const PassRefPtr<U>& b) + { + return a.get() != b.get(); + } + + template <typename T, typename U> inline bool operator!=(const PassRefPtr<T>& a, U* b) + { + return a.get() != b; + } + + template <typename T, typename U> inline bool operator!=(T* a, const PassRefPtr<U>& b) + { + return a != b.get(); + } + + template <typename T> inline PassRefPtr<T> adoptRef(T* p) + { + return PassRefPtr<T>(p, true); + } + + template <typename T, typename U> inline PassRefPtr<T> static_pointer_cast(const PassRefPtr<U>& p) + { + return adoptRef(static_cast<T*>(p.releaseRef())); + } + + template <typename T, typename U> inline PassRefPtr<T> const_pointer_cast(const PassRefPtr<U>& p) + { + return adoptRef(const_cast<T*>(p.releaseRef())); + } + + template <typename T> inline T* getPtr(const PassRefPtr<T>& p) + { + return p.get(); + } + +} // namespace WTF + +using WTF::PassRefPtr; +using WTF::adoptRef; +using WTF::static_pointer_cast; +using WTF::const_pointer_cast; + +#endif // WTF_PassRefPtr_h diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/Platform.h b/src/3rdparty/webkit/JavaScriptCore/wtf/Platform.h new file mode 100644 index 0000000..6c27123 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/Platform.h @@ -0,0 +1,505 @@ +/* + * Copyright (C) 2006, 2007, 2008, 2009 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef WTF_Platform_h +#define WTF_Platform_h + +/* PLATFORM handles OS, operating environment, graphics API, and CPU */ +#define PLATFORM(WTF_FEATURE) (defined( WTF_PLATFORM_##WTF_FEATURE ) && WTF_PLATFORM_##WTF_FEATURE) +#define COMPILER(WTF_FEATURE) (defined( WTF_COMPILER_##WTF_FEATURE ) && WTF_COMPILER_##WTF_FEATURE) +#define HAVE(WTF_FEATURE) (defined( HAVE_##WTF_FEATURE ) && HAVE_##WTF_FEATURE) +#define USE(WTF_FEATURE) (defined( WTF_USE_##WTF_FEATURE ) && WTF_USE_##WTF_FEATURE) +#define ENABLE(WTF_FEATURE) (defined( ENABLE_##WTF_FEATURE ) && ENABLE_##WTF_FEATURE) + +/* Operating systems - low-level dependencies */ + +/* PLATFORM(DARWIN) */ +/* Operating system level dependencies for Mac OS X / Darwin that should */ +/* be used regardless of operating environment */ +#ifdef __APPLE__ +#define WTF_PLATFORM_DARWIN 1 +#endif + +/* PLATFORM(WIN_OS) */ +/* Operating system level dependencies for Windows that should be used */ +/* regardless of operating environment */ +#if defined(WIN32) || defined(_WIN32) +#define WTF_PLATFORM_WIN_OS 1 +#endif + +/* PLATFORM(WIN_CE) */ +/* Operating system level dependencies for Windows CE that should be used */ +/* regardless of operating environment */ +/* Note that for this platform PLATFORM(WIN_OS) is also defined. */ +#if defined(_WIN32_WCE) +#define WTF_PLATFORM_WIN_CE 1 +#include <ce_time.h> +#endif + +/* PLATFORM(FREEBSD) */ +/* Operating system level dependencies for FreeBSD-like systems that */ +/* should be used regardless of operating environment */ +#ifdef __FreeBSD__ +#define WTF_PLATFORM_FREEBSD 1 +#endif + +/* PLATFORM(OPENBSD) */ +/* Operating system level dependencies for OpenBSD systems that */ +/* should be used regardless of operating environment */ +#ifdef __OpenBSD__ +#define WTF_PLATFORM_OPENBSD 1 +#endif + +/* PLATFORM(SOLARIS) */ +/* Operating system level dependencies for Solaris that should be used */ +/* regardless of operating environment */ +#if defined(sun) || defined(__sun) +#define WTF_PLATFORM_SOLARIS 1 +#endif + +#if defined (__S60__) || defined (__SYMBIAN32__) +/* we are cross-compiling, it is not really windows */ +#undef WTF_PLATFORM_WIN_OS +#undef WTF_PLATFORM_WIN +#undef WTF_PLATFORM_CAIRO +#define WTF_PLATFORM_S60 1 +#define WTF_PLATFORM_SYMBIAN 1 +#endif + + +/* PLATFORM(NETBSD) */ +/* Operating system level dependencies for NetBSD that should be used */ +/* regardless of operating environment */ +#if defined(__NetBSD__) +#define WTF_PLATFORM_NETBSD 1 +#endif + +/* PLATFORM(UNIX) */ +/* Operating system level dependencies for Unix-like systems that */ +/* should be used regardless of operating environment */ +#if PLATFORM(DARWIN) \ + || PLATFORM(FREEBSD) \ + || PLATFORM(S60) \ + || PLATFORM(NETBSD) \ + || defined(unix) \ + || defined(__unix) \ + || defined(__unix__) \ + || defined(_AIX) +#define WTF_PLATFORM_UNIX 1 +#endif + +/* Operating environments */ + +/* PLATFORM(CHROMIUM) */ +/* PLATFORM(QT) */ +/* PLATFORM(GTK) */ +/* PLATFORM(MAC) */ +/* PLATFORM(WIN) */ +#if defined(BUILDING_CHROMIUM__) +#define WTF_PLATFORM_CHROMIUM 1 +#elif defined(BUILDING_QT__) +#define WTF_PLATFORM_QT 1 + +/* PLATFORM(KDE) */ +#if defined(BUILDING_KDE__) +#define WTF_PLATFORM_KDE 1 +#endif + +#elif defined(BUILDING_WX__) +#define WTF_PLATFORM_WX 1 +#elif defined(BUILDING_GTK__) +#define WTF_PLATFORM_GTK 1 +#elif PLATFORM(DARWIN) +#define WTF_PLATFORM_MAC 1 +#elif PLATFORM(WIN_OS) +#define WTF_PLATFORM_WIN 1 +#endif + +/* Graphics engines */ + +/* PLATFORM(CG) and PLATFORM(CI) */ +#if PLATFORM(MAC) +#define WTF_PLATFORM_CG 1 +#define WTF_PLATFORM_CI 1 +#endif + +/* PLATFORM(SKIA) for Win/Linux, CG/CI for Mac */ +#if PLATFORM(CHROMIUM) +#if PLATFORM(DARWIN) +#define WTF_PLATFORM_CG 1 +#define WTF_PLATFORM_CI 1 +#define WTF_USE_ATSUI 1 +#else +#define WTF_PLATFORM_SKIA 1 +#endif +#endif + +/* Makes PLATFORM(WIN) default to PLATFORM(CAIRO) */ +/* FIXME: This should be changed from a blacklist to a whitelist */ +#if !PLATFORM(MAC) && !PLATFORM(QT) && !PLATFORM(WX) && !PLATFORM(CHROMIUM) +#define WTF_PLATFORM_CAIRO 1 +#endif + +/* CPU */ + +/* PLATFORM(PPC) */ +#if defined(__ppc__) \ + || defined(__PPC__) \ + || defined(__powerpc__) \ + || defined(__powerpc) \ + || defined(__POWERPC__) \ + || defined(_M_PPC) \ + || defined(__PPC) +#define WTF_PLATFORM_PPC 1 +#define WTF_PLATFORM_BIG_ENDIAN 1 +#endif + +/* PLATFORM(PPC64) */ +#if defined(__ppc64__) \ + || defined(__PPC64__) +#define WTF_PLATFORM_PPC64 1 +#define WTF_PLATFORM_BIG_ENDIAN 1 +#endif + +/* PLATFORM(ARM) */ +#if defined(arm) \ + || defined(__arm__) +#define WTF_PLATFORM_ARM 1 +#if defined(__ARMEB__) +#define WTF_PLATFORM_BIG_ENDIAN 1 +#elif !defined(__ARM_EABI__) && !defined(__ARMEB__) && !defined(__VFP_FP__) +#define WTF_PLATFORM_MIDDLE_ENDIAN 1 +#endif +#if !defined(__ARM_EABI__) +#define WTF_PLATFORM_FORCE_PACK 1 +#endif +#endif + +/* PLATFORM(X86) */ +#if defined(__i386__) \ + || defined(i386) \ + || defined(_M_IX86) \ + || defined(_X86_) \ + || defined(__THW_INTEL) +#define WTF_PLATFORM_X86 1 +#endif + +/* PLATFORM(X86_64) */ +#if defined(__x86_64__) \ + || defined(__ia64__) \ + || defined(_M_X64) +#define WTF_PLATFORM_X86_64 1 +#endif + +/* PLATFORM(SPARC64) */ +#if defined(__sparc64__) +#define WTF_PLATFORM_SPARC64 1 +#define WTF_PLATFORM_BIG_ENDIAN 1 +#endif + +/* PLATFORM(WIN_CE) && PLATFORM(QT) + We can not determine the endianess at compile time. For + Qt for Windows CE the endianess is specified in the + device specific makespec +*/ +#if PLATFORM(WIN_CE) && PLATFORM(QT) +# include <QtGlobal> +# undef WTF_PLATFORM_BIG_ENDIAN +# undef WTF_PLATFORM_MIDDLE_ENDIAN +# if Q_BYTE_ORDER == Q_BIG_EDIAN +# define WTF_PLATFORM_BIG_ENDIAN 1 +# endif +#endif + +/* Compiler */ + +/* COMPILER(MSVC) */ +#if defined(_MSC_VER) +#define WTF_COMPILER_MSVC 1 +#if _MSC_VER < 1400 +#define WTF_COMPILER_MSVC7 1 +#endif +#endif + +/* COMPILER(RVCT) */ +#if defined(__CC_ARM) || defined(__ARMCC__) +#define WTF_COMPILER_RVCT 1 +#endif + +/* COMPILER(GCC) */ +/* --gnu option of the RVCT compiler also defines __GNUC__ */ +#if defined(__GNUC__) && !COMPILER(RVCT) +#define WTF_COMPILER_GCC 1 +#endif + +/* COMPILER(MINGW) */ +#if defined(MINGW) || defined(__MINGW32__) +#define WTF_COMPILER_MINGW 1 +#endif + +/* COMPILER(BORLAND) */ +/* not really fully supported - is this relevant any more? */ +#if defined(__BORLANDC__) +#define WTF_COMPILER_BORLAND 1 +#endif + +/* COMPILER(CYGWIN) */ +/* not really fully supported - is this relevant any more? */ +#if defined(__CYGWIN__) +#define WTF_COMPILER_CYGWIN 1 +#endif + +/* COMPILER(WINSCW) */ +#if defined(__WINSCW__) +#define WTF_COMPILER_WINSCW 1 +#endif + +#if (PLATFORM(MAC) || PLATFORM(WIN)) && !defined(ENABLE_JSC_MULTIPLE_THREADS) +#define ENABLE_JSC_MULTIPLE_THREADS 1 +#endif + +/* for Unicode, KDE uses Qt */ +#if PLATFORM(KDE) || PLATFORM(QT) +#define WTF_USE_QT4_UNICODE 1 +#elif PLATFORM(SYMBIAN) +#define WTF_USE_SYMBIAN_UNICODE 1 +#elif PLATFORM(GTK) +/* The GTK+ Unicode backend is configurable */ +#else +#define WTF_USE_ICU_UNICODE 1 +#endif + +#if PLATFORM(MAC) +#define WTF_PLATFORM_CF 1 +#define WTF_USE_PTHREADS 1 +#if !defined(ENABLE_MAC_JAVA_BRIDGE) +#define ENABLE_MAC_JAVA_BRIDGE 1 +#endif +#if !defined(ENABLE_DASHBOARD_SUPPORT) +#define ENABLE_DASHBOARD_SUPPORT 1 +#endif +#define HAVE_READLINE 1 +#if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) +#define HAVE_DTRACE 1 +#endif +#endif + +#if PLATFORM(CHROMIUM) && PLATFORM(DARWIN) +#define WTF_PLATFORM_CF 1 +#define WTF_USE_PTHREADS 1 +#endif + +#if PLATFORM(WIN) +#define WTF_USE_WININET 1 +#endif + +#if PLATFORM(WX) +#define WTF_USE_CURL 1 +#define WTF_USE_PTHREADS 1 +#endif + +#if PLATFORM(GTK) +#if HAVE(PTHREAD_H) +#define WTF_USE_PTHREADS 1 +#endif +#endif + +#if !defined(HAVE_ACCESSIBILITY) +#if PLATFORM(MAC) || PLATFORM(WIN) || PLATFORM(GTK) || PLATFORM(CHROMIUM) +#define HAVE_ACCESSIBILITY 1 +#endif +#endif /* !defined(HAVE_ACCESSIBILITY) */ + +#if COMPILER(GCC) +#define HAVE_COMPUTED_GOTO 1 +#endif + +#if PLATFORM(DARWIN) + +#define HAVE_ERRNO_H 1 +#define HAVE_MMAP 1 +#define HAVE_MERGESORT 1 +#define HAVE_SBRK 1 +#define HAVE_STRINGS_H 1 +#define HAVE_SYS_PARAM_H 1 +#define HAVE_SYS_TIME_H 1 +#define HAVE_SYS_TIMEB_H 1 + +#elif PLATFORM(WIN_OS) + +#define HAVE_FLOAT_H 1 +#if PLATFORM(WIN_CE) +#define HAVE_ERRNO_H 0 +#else +#define HAVE_SYS_TIMEB_H 1 +#endif +#define HAVE_VIRTUALALLOC 1 + +#elif PLATFORM(SYMBIAN) + +#define HAVE_ERRNO_H 1 +#define HAVE_MMAP 0 +#define HAVE_SBRK 1 + +#define HAVE_SYS_TIME_H 1 +#define HAVE_STRINGS_H 1 + +#if !COMPILER(RVCT) +#define HAVE_SYS_PARAM_H 1 +#endif + +#else + +/* FIXME: is this actually used or do other platforms generate their own config.h? */ + +#define HAVE_ERRNO_H 1 +#define HAVE_MMAP 1 +#define HAVE_SBRK 1 +#define HAVE_STRINGS_H 1 +#define HAVE_SYS_PARAM_H 1 +#define HAVE_SYS_TIME_H 1 + +#endif + +/* ENABLE macro defaults */ + +#if !defined(ENABLE_ICONDATABASE) +#define ENABLE_ICONDATABASE 1 +#endif + +#if !defined(ENABLE_DATABASE) +#define ENABLE_DATABASE 1 +#endif + +#if !defined(ENABLE_JAVASCRIPT_DEBUGGER) +#define ENABLE_JAVASCRIPT_DEBUGGER 1 +#endif + +#if !defined(ENABLE_FTPDIR) +#define ENABLE_FTPDIR 1 +#endif + +#if !defined(ENABLE_DASHBOARD_SUPPORT) +#define ENABLE_DASHBOARD_SUPPORT 0 +#endif + +#if !defined(ENABLE_MAC_JAVA_BRIDGE) +#define ENABLE_MAC_JAVA_BRIDGE 0 +#endif + +#if !defined(ENABLE_NETSCAPE_PLUGIN_API) +#define ENABLE_NETSCAPE_PLUGIN_API 1 +#endif + +#if !defined(ENABLE_OPCODE_STATS) +#define ENABLE_OPCODE_STATS 0 +#endif + +#if !defined(ENABLE_CODEBLOCK_SAMPLING) +#define ENABLE_CODEBLOCK_SAMPLING 0 +#endif + +#if ENABLE(CODEBLOCK_SAMPLING) && !defined(ENABLE_OPCODE_SAMPLING) +#define ENABLE_OPCODE_SAMPLING 1 +#endif + +#if !defined(ENABLE_OPCODE_SAMPLING) +#define ENABLE_OPCODE_SAMPLING 0 +#endif + +#if !defined(ENABLE_GEOLOCATION) +#define ENABLE_GEOLOCATION 0 +#endif + +#if !defined(ENABLE_TEXT_CARET) +#define ENABLE_TEXT_CARET 1 +#endif + +#if !defined(ENABLE_JIT) +/* x86-64 support is under development. */ +#if PLATFORM(X86_64) && PLATFORM(MAC) + #define ENABLE_JIT 0 + #define WTF_USE_JIT_STUB_ARGUMENT_REGISTER 1 + #define ENABLE_JIT_OPTIMIZE_CALL 1 + #define ENABLE_JIT_OPTIMIZE_PROPERTY_ACCESS 1 + #define WTF_USE_ALTERNATE_JSIMMEDIATE 1 +/* The JIT is tested & working on x86 Mac */ +#elif PLATFORM(X86) && PLATFORM(MAC) + #define ENABLE_JIT 1 + #define WTF_USE_JIT_STUB_ARGUMENT_VA_LIST 1 + #define ENABLE_JIT_OPTIMIZE_CALL 1 + #define ENABLE_JIT_OPTIMIZE_PROPERTY_ACCESS 1 + #define ENABLE_JIT_OPTIMIZE_ARITHMETIC 1 +/* The JIT is tested & working on x86 Windows */ +#elif PLATFORM(X86) && PLATFORM(WIN) + #define ENABLE_JIT 1 + #define WTF_USE_JIT_STUB_ARGUMENT_REGISTER 1 + #define ENABLE_JIT_OPTIMIZE_CALL 1 + #define ENABLE_JIT_OPTIMIZE_PROPERTY_ACCESS 1 + #define ENABLE_JIT_OPTIMIZE_ARITHMETIC 1 +#endif +#endif + +#if ENABLE(JIT) +#if !(USE(JIT_STUB_ARGUMENT_VA_LIST) || USE(JIT_STUB_ARGUMENT_REGISTER) || USE(JIT_STUB_ARGUMENT_STACK)) +#error Please define one of the JIT_STUB_ARGUMENT settings. +#elif (USE(JIT_STUB_ARGUMENT_VA_LIST) && USE(JIT_STUB_ARGUMENT_REGISTER)) \ + || (USE(JIT_STUB_ARGUMENT_VA_LIST) && USE(JIT_STUB_ARGUMENT_STACK)) \ + || (USE(JIT_STUB_ARGUMENT_REGISTER) && USE(JIT_STUB_ARGUMENT_STACK)) +#error Please do not define more than one of the JIT_STUB_ARGUMENT settings. +#endif +#endif + +/* WREC supports x86 & x86-64, and has been tested on Mac and Windows ('cept on 64-bit on Mac). */ +#if (!defined(ENABLE_WREC) && PLATFORM(X86) && PLATFORM(MAC)) \ + || (!defined(ENABLE_WREC) && PLATFORM(X86_64) && PLATFORM(MAC)) \ + || (!defined(ENABLE_WREC) && PLATFORM(X86) && PLATFORM(WIN)) +#define ENABLE_WREC 1 +#endif + +#if ENABLE(JIT) || ENABLE(WREC) +#define ENABLE_ASSEMBLER 1 +#endif + +#if !defined(ENABLE_PAN_SCROLLING) && PLATFORM(WIN_OS) +#define ENABLE_PAN_SCROLLING 1 +#endif + +#if !defined(ENABLE_ACTIVEX_TYPE_CONVERSION_WMPLAYER) +#define ENABLE_ACTIVEX_TYPE_CONVERSION_WMPLAYER 1 +#endif + +/* Use the QtXmlStreamReader implementation for XMLTokenizer */ +#if PLATFORM(QT) +#if !ENABLE(XSLT) +#define WTF_USE_QXMLSTREAM 1 +#endif +#endif + +#if !PLATFORM(QT) +#define WTF_USE_FONT_FAST_PATH 1 +#endif + +#endif /* WTF_Platform_h */ diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/RandomNumber.cpp b/src/3rdparty/webkit/JavaScriptCore/wtf/RandomNumber.cpp new file mode 100644 index 0000000..e569227 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/RandomNumber.cpp @@ -0,0 +1,87 @@ +/* + * Copyright (C) 2006, 2007, 2008 Apple Inc. All rights reserved. + * (C) 2008 Torch Mobile Inc. All rights reserved. (http://www.torchmobile.com/) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "RandomNumber.h" + +#include "RandomNumberSeed.h" + +#include <limits> +#include <limits.h> +#include <stdint.h> +#include <stdlib.h> + +namespace WTF { + +double randomNumber() +{ +#if !ENABLE(JSC_MULTIPLE_THREADS) + static bool s_initialized = false; + if (!s_initialized) { + initializeRandomNumberGenerator(); + s_initialized = true; + } +#endif + + uint32_t part1; + uint32_t part2; + uint64_t fullRandom; +#if COMPILER(MSVC) && defined(_CRT_RAND_S) + rand_s(&part1); + rand_s(&part2); + fullRandom = part1; + fullRandom <<= 32; + fullRandom |= part2; +#elif PLATFORM(DARWIN) + part1 = arc4random(); + part2 = arc4random(); + fullRandom = part1; + fullRandom <<= 32; + fullRandom |= part2; +#elif PLATFORM(UNIX) + part1 = random() & (RAND_MAX - 1); + part2 = random() & (RAND_MAX - 1); + // random only provides 31 bits + fullRandom = part1; + fullRandom <<= 31; + fullRandom |= part2; +#else + part1 = rand() & (RAND_MAX - 1); + part2 = rand() & (RAND_MAX - 1); + // rand only provides 31 bits, and the low order bits of that aren't very random + // so we take the high 26 bits of part 1, and the high 27 bits of part2. + part1 >>= 5; // drop the low 5 bits + part2 >>= 4; // drop the low 4 bits + fullRandom = part1; + fullRandom <<= 27; + fullRandom |= part2; +#endif + // Mask off the low 53bits + fullRandom &= (1LL << 53) - 1; + return static_cast<double>(fullRandom)/static_cast<double>(1LL << 53); +} + +} diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/RandomNumber.h b/src/3rdparty/webkit/JavaScriptCore/wtf/RandomNumber.h new file mode 100644 index 0000000..3540344 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/RandomNumber.h @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2008 Torch Mobile Inc. All rights reserved. (http://www.torchmobile.com/) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef WTF_RandomNumber_h +#define WTF_RandomNumber_h + +namespace WTF { + + // Returns a pseudo-random number in the range [0, 1) + double randomNumber(); + +} + +#endif diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/RandomNumberSeed.h b/src/3rdparty/webkit/JavaScriptCore/wtf/RandomNumberSeed.h new file mode 100644 index 0000000..c7af3d6 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/RandomNumberSeed.h @@ -0,0 +1,66 @@ +/* + * Copyright (C) 2008 Torch Mobile Inc. All rights reserved. (http://www.torchmobile.com/) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef WTF_RandomNumberSeed_h +#define WTF_RandomNumberSeed_h + +#include <stdlib.h> +#include <time.h> + +#if HAVE(SYS_TIME_H) +#include <sys/time.h> +#endif + +#if PLATFORM(UNIX) +#include <sys/types.h> +#include <unistd.h> +#endif + +#if PLATFORM(WIN_CE) +#include <ce_time.h> +#endif + +// Internal JavaScriptCore usage only +namespace WTF { + +inline void initializeRandomNumberGenerator() +{ +#if PLATFORM(DARWIN) + // On Darwin we use arc4random which initialises itself. +#elif COMPILER(MSVC) && defined(_CRT_RAND_S) + // On Windows we use rand_s which intialises itself +#elif PLATFORM(UNIX) + // srandomdev is not guaranteed to exist on linux so we use this poor seed, this should be improved + timeval time; + gettimeofday(&time, 0); + srandom(static_cast<unsigned>(time.tv_usec * getpid())); +#else + srand(static_cast<unsigned>(time(0))); +#endif +} + +} + +#endif diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/RefCounted.h b/src/3rdparty/webkit/JavaScriptCore/wtf/RefCounted.h new file mode 100644 index 0000000..ac8e167 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/RefCounted.h @@ -0,0 +1,107 @@ +/* + * Copyright (C) 2006, 2007, 2008 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef RefCounted_h +#define RefCounted_h + +#include <wtf/Assertions.h> +#include <wtf/Noncopyable.h> + +namespace WTF { + +// This base class holds the non-template methods and attributes. +// The RefCounted class inherits from it reducing the template bloat +// generated by the compiler (technique called template hoisting). +class RefCountedBase : Noncopyable { +public: + void ref() + { + ASSERT(!m_deletionHasBegun); + ++m_refCount; + } + + bool hasOneRef() const + { + ASSERT(!m_deletionHasBegun); + return m_refCount == 1; + } + + int refCount() const + { + return m_refCount; + } + +protected: + RefCountedBase(int initialRefCount) + : m_refCount(initialRefCount) +#ifndef NDEBUG + , m_deletionHasBegun(false) +#endif + { + } + + ~RefCountedBase() {} + + // Returns whether the pointer should be freed or not. + bool derefBase() + { + ASSERT(!m_deletionHasBegun); + ASSERT(m_refCount > 0); + if (m_refCount == 1) { +#ifndef NDEBUG + m_deletionHasBegun = true; +#endif + return true; + } + + --m_refCount; + return false; + } + +protected: + int m_refCount; +#ifndef NDEBUG + bool m_deletionHasBegun; +#endif +}; + + +template<class T> class RefCounted : public RefCountedBase { +public: + RefCounted(int initialRefCount = 1) + : RefCountedBase(initialRefCount) + { + } + + void deref() + { + if (derefBase()) + delete static_cast<T*>(this); + } + +protected: + ~RefCounted() {} +}; + +} // namespace WTF + +using WTF::RefCounted; + +#endif // RefCounted_h diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/RefCountedLeakCounter.cpp b/src/3rdparty/webkit/JavaScriptCore/wtf/RefCountedLeakCounter.cpp new file mode 100644 index 0000000..80922d3 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/RefCountedLeakCounter.cpp @@ -0,0 +1,100 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#include "config.h" +#include "RefCountedLeakCounter.h" + +#include <wtf/HashCountedSet.h> + +namespace WTF { + +#ifdef NDEBUG + +void RefCountedLeakCounter::suppressMessages(const char*) { } +void RefCountedLeakCounter::cancelMessageSuppression(const char*) { } + +RefCountedLeakCounter::RefCountedLeakCounter(const char*) { } +RefCountedLeakCounter::~RefCountedLeakCounter() { } + +void RefCountedLeakCounter::increment() { } +void RefCountedLeakCounter::decrement() { } + +#else + +#define LOG_CHANNEL_PREFIX Log +static WTFLogChannel LogRefCountedLeaks = { 0x00000000, "", WTFLogChannelOn }; + +typedef HashCountedSet<const char*, PtrHash<const char*> > ReasonSet; +static ReasonSet* leakMessageSuppressionReasons; + +void RefCountedLeakCounter::suppressMessages(const char* reason) +{ + if (!leakMessageSuppressionReasons) + leakMessageSuppressionReasons = new ReasonSet; + leakMessageSuppressionReasons->add(reason); +} + +void RefCountedLeakCounter::cancelMessageSuppression(const char* reason) +{ + ASSERT(leakMessageSuppressionReasons); + ASSERT(leakMessageSuppressionReasons->contains(reason)); + leakMessageSuppressionReasons->remove(reason); +} + +RefCountedLeakCounter::RefCountedLeakCounter(const char* description) + : m_description(description) +{ +} + +RefCountedLeakCounter::~RefCountedLeakCounter() +{ + static bool loggedSuppressionReason; + if (m_count) { + if (!leakMessageSuppressionReasons || leakMessageSuppressionReasons->isEmpty()) + LOG(RefCountedLeaks, "LEAK: %u %s", m_count, m_description); + else if (!loggedSuppressionReason) { + // This logs only one reason. Later we could change it so we log all the reasons. + LOG(RefCountedLeaks, "No leak checking done: %s", leakMessageSuppressionReasons->begin()->first); + loggedSuppressionReason = true; + } + } +} + +void RefCountedLeakCounter::increment() +{ +#if ENABLE(JSC_MULTIPLE_THREADS) + atomicIncrement(&m_count); +#else + ++m_count; +#endif +} + +void RefCountedLeakCounter::decrement() +{ +#if ENABLE(JSC_MULTIPLE_THREADS) + atomicDecrement(&m_count); +#else + --m_count; +#endif +} + +#endif + +} // namespace WTF diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/RefCountedLeakCounter.h b/src/3rdparty/webkit/JavaScriptCore/wtf/RefCountedLeakCounter.h new file mode 100644 index 0000000..57cc283 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/RefCountedLeakCounter.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef RefCountedLeakCounter_h +#define RefCountedLeakCounter_h + +#include "Assertions.h" +#include "Threading.h" + +namespace WTF { + + struct RefCountedLeakCounter { + static void suppressMessages(const char*); + static void cancelMessageSuppression(const char*); + + explicit RefCountedLeakCounter(const char* description); + ~RefCountedLeakCounter(); + + void increment(); + void decrement(); + +#ifndef NDEBUG + private: + volatile int m_count; + const char* m_description; +#endif + }; + +} // namespace WTF + +#endif diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/RefPtr.h b/src/3rdparty/webkit/JavaScriptCore/wtf/RefPtr.h new file mode 100644 index 0000000..929e745 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/RefPtr.h @@ -0,0 +1,205 @@ +/* + * Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_RefPtr_h +#define WTF_RefPtr_h + +#include <algorithm> +#include "AlwaysInline.h" + +namespace WTF { + + enum PlacementNewAdoptType { PlacementNewAdopt }; + + template <typename T> class PassRefPtr; + + enum HashTableDeletedValueType { HashTableDeletedValue }; + + template <typename T> class RefPtr { + public: + RefPtr() : m_ptr(0) { } + RefPtr(T* ptr) : m_ptr(ptr) { if (ptr) ptr->ref(); } + RefPtr(const RefPtr& o) : m_ptr(o.m_ptr) { if (T* ptr = m_ptr) ptr->ref(); } + // see comment in PassRefPtr.h for why this takes const reference + template <typename U> RefPtr(const PassRefPtr<U>&); + + // Special constructor for cases where we overwrite an object in place. + RefPtr(PlacementNewAdoptType) { } + + // Hash table deleted values, which are only constructed and never copied or destroyed. + RefPtr(HashTableDeletedValueType) : m_ptr(hashTableDeletedValue()) { } + bool isHashTableDeletedValue() const { return m_ptr == hashTableDeletedValue(); } + + ~RefPtr() { if (T* ptr = m_ptr) ptr->deref(); } + + template <typename U> RefPtr(const RefPtr<U>& o) : m_ptr(o.get()) { if (T* ptr = m_ptr) ptr->ref(); } + + T* get() const { return m_ptr; } + + void clear() { if (T* ptr = m_ptr) ptr->deref(); m_ptr = 0; } + PassRefPtr<T> release() { PassRefPtr<T> tmp = adoptRef(m_ptr); m_ptr = 0; return tmp; } + + T& operator*() const { return *m_ptr; } + ALWAYS_INLINE T* operator->() const { return m_ptr; } + + bool operator!() const { return !m_ptr; } + + // This conversion operator allows implicit conversion to bool but not to other integer types. +#if COMPILER(WINSCW) + operator bool() const { return m_ptr; } +#else + typedef T* RefPtr::*UnspecifiedBoolType; + operator UnspecifiedBoolType() const { return m_ptr ? &RefPtr::m_ptr : 0; } +#endif + + RefPtr& operator=(const RefPtr&); + RefPtr& operator=(T*); + RefPtr& operator=(const PassRefPtr<T>&); + template <typename U> RefPtr& operator=(const RefPtr<U>&); + template <typename U> RefPtr& operator=(const PassRefPtr<U>&); + + void swap(RefPtr&); + + private: + static T* hashTableDeletedValue() { return reinterpret_cast<T*>(-1); } + + T* m_ptr; + }; + + template <typename T> template <typename U> inline RefPtr<T>::RefPtr(const PassRefPtr<U>& o) + : m_ptr(o.releaseRef()) + { + } + + template <typename T> inline RefPtr<T>& RefPtr<T>::operator=(const RefPtr<T>& o) + { + T* optr = o.get(); + if (optr) + optr->ref(); + T* ptr = m_ptr; + m_ptr = optr; + if (ptr) + ptr->deref(); + return *this; + } + + template <typename T> template <typename U> inline RefPtr<T>& RefPtr<T>::operator=(const RefPtr<U>& o) + { + T* optr = o.get(); + if (optr) + optr->ref(); + T* ptr = m_ptr; + m_ptr = optr; + if (ptr) + ptr->deref(); + return *this; + } + + template <typename T> inline RefPtr<T>& RefPtr<T>::operator=(T* optr) + { + if (optr) + optr->ref(); + T* ptr = m_ptr; + m_ptr = optr; + if (ptr) + ptr->deref(); + return *this; + } + + template <typename T> inline RefPtr<T>& RefPtr<T>::operator=(const PassRefPtr<T>& o) + { + T* ptr = m_ptr; + m_ptr = o.releaseRef(); + if (ptr) + ptr->deref(); + return *this; + } + + template <typename T> template <typename U> inline RefPtr<T>& RefPtr<T>::operator=(const PassRefPtr<U>& o) + { + T* ptr = m_ptr; + m_ptr = o.releaseRef(); + if (ptr) + ptr->deref(); + return *this; + } + + template <class T> inline void RefPtr<T>::swap(RefPtr<T>& o) + { + std::swap(m_ptr, o.m_ptr); + } + + template <class T> inline void swap(RefPtr<T>& a, RefPtr<T>& b) + { + a.swap(b); + } + + template <typename T, typename U> inline bool operator==(const RefPtr<T>& a, const RefPtr<U>& b) + { + return a.get() == b.get(); + } + + template <typename T, typename U> inline bool operator==(const RefPtr<T>& a, U* b) + { + return a.get() == b; + } + + template <typename T, typename U> inline bool operator==(T* a, const RefPtr<U>& b) + { + return a == b.get(); + } + + template <typename T, typename U> inline bool operator!=(const RefPtr<T>& a, const RefPtr<U>& b) + { + return a.get() != b.get(); + } + + template <typename T, typename U> inline bool operator!=(const RefPtr<T>& a, U* b) + { + return a.get() != b; + } + + template <typename T, typename U> inline bool operator!=(T* a, const RefPtr<U>& b) + { + return a != b.get(); + } + + template <typename T, typename U> inline RefPtr<T> static_pointer_cast(const RefPtr<U>& p) + { + return RefPtr<T>(static_cast<T*>(p.get())); + } + + template <typename T, typename U> inline RefPtr<T> const_pointer_cast(const RefPtr<U>& p) + { + return RefPtr<T>(const_cast<T*>(p.get())); + } + + template <typename T> inline T* getPtr(const RefPtr<T>& p) + { + return p.get(); + } + +} // namespace WTF + +using WTF::RefPtr; +using WTF::static_pointer_cast; +using WTF::const_pointer_cast; + +#endif // WTF_RefPtr_h diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/RefPtrHashMap.h b/src/3rdparty/webkit/JavaScriptCore/wtf/RefPtrHashMap.h new file mode 100644 index 0000000..1cbebb4 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/RefPtrHashMap.h @@ -0,0 +1,336 @@ +/* + * Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +namespace WTF { + + // This specialization is a direct copy of HashMap, with overloaded functions + // to allow for lookup by pointer instead of RefPtr, avoiding ref-count churn. + + // FIXME: Find a better way that doesn't require an entire copy of the HashMap template. + + template<typename RawKeyType, typename ValueType, typename ValueTraits, typename HashFunctions> + struct RefPtrHashMapRawKeyTranslator { + typedef typename ValueType::first_type KeyType; + typedef typename ValueType::second_type MappedType; + typedef typename ValueTraits::FirstTraits KeyTraits; + typedef typename ValueTraits::SecondTraits MappedTraits; + + static unsigned hash(RawKeyType key) { return HashFunctions::hash(key); } + static bool equal(const KeyType& a, RawKeyType b) { return HashFunctions::equal(a, b); } + static void translate(ValueType& location, RawKeyType key, const MappedType& mapped) + { + location.first = key; + location.second = mapped; + } + }; + + template<typename T, typename MappedArg, typename HashArg, typename KeyTraitsArg, typename MappedTraitsArg> + class HashMap<RefPtr<T>, MappedArg, HashArg, KeyTraitsArg, MappedTraitsArg> { + private: + typedef KeyTraitsArg KeyTraits; + typedef MappedTraitsArg MappedTraits; + typedef PairHashTraits<KeyTraits, MappedTraits> ValueTraits; + + public: + typedef typename KeyTraits::TraitType KeyType; + typedef T* RawKeyType; + typedef typename MappedTraits::TraitType MappedType; + typedef typename ValueTraits::TraitType ValueType; + + private: + typedef HashArg HashFunctions; + + typedef HashTable<KeyType, ValueType, PairFirstExtractor<ValueType>, + HashFunctions, ValueTraits, KeyTraits> HashTableType; + + typedef RefPtrHashMapRawKeyTranslator<RawKeyType, ValueType, ValueTraits, HashFunctions> + RawKeyTranslator; + + public: + typedef HashTableIteratorAdapter<HashTableType, ValueType> iterator; + typedef HashTableConstIteratorAdapter<HashTableType, ValueType> const_iterator; + + void swap(HashMap&); + + int size() const; + int capacity() const; + bool isEmpty() const; + + // iterators iterate over pairs of keys and values + iterator begin(); + iterator end(); + const_iterator begin() const; + const_iterator end() const; + + iterator find(const KeyType&); + iterator find(RawKeyType); + const_iterator find(const KeyType&) const; + const_iterator find(RawKeyType) const; + bool contains(const KeyType&) const; + bool contains(RawKeyType) const; + MappedType get(const KeyType&) const; + MappedType get(RawKeyType) const; + MappedType inlineGet(RawKeyType) const; + + // replaces value but not key if key is already present + // return value is a pair of the iterator to the key location, + // and a boolean that's true if a new value was actually added + pair<iterator, bool> set(const KeyType&, const MappedType&); + pair<iterator, bool> set(RawKeyType, const MappedType&); + + // does nothing if key is already present + // return value is a pair of the iterator to the key location, + // and a boolean that's true if a new value was actually added + pair<iterator, bool> add(const KeyType&, const MappedType&); + pair<iterator, bool> add(RawKeyType, const MappedType&); + + void remove(const KeyType&); + void remove(RawKeyType); + void remove(iterator); + void clear(); + + MappedType take(const KeyType&); // efficient combination of get with remove + MappedType take(RawKeyType); // efficient combination of get with remove + + private: + pair<iterator, bool> inlineAdd(const KeyType&, const MappedType&); + pair<iterator, bool> inlineAdd(RawKeyType, const MappedType&); + + HashTableType m_impl; + }; + + template<typename T, typename U, typename V, typename W, typename X> + inline void HashMap<RefPtr<T>, U, V, W, X>::swap(HashMap& other) + { + m_impl.swap(other.m_impl); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline int HashMap<RefPtr<T>, U, V, W, X>::size() const + { + return m_impl.size(); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline int HashMap<RefPtr<T>, U, V, W, X>::capacity() const + { + return m_impl.capacity(); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline bool HashMap<RefPtr<T>, U, V, W, X>::isEmpty() const + { + return m_impl.isEmpty(); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline typename HashMap<RefPtr<T>, U, V, W, X>::iterator HashMap<RefPtr<T>, U, V, W, X>::begin() + { + return m_impl.begin(); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline typename HashMap<RefPtr<T>, U, V, W, X>::iterator HashMap<RefPtr<T>, U, V, W, X>::end() + { + return m_impl.end(); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline typename HashMap<RefPtr<T>, U, V, W, X>::const_iterator HashMap<RefPtr<T>, U, V, W, X>::begin() const + { + return m_impl.begin(); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline typename HashMap<RefPtr<T>, U, V, W, X>::const_iterator HashMap<RefPtr<T>, U, V, W, X>::end() const + { + return m_impl.end(); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline typename HashMap<RefPtr<T>, U, V, W, X>::iterator HashMap<RefPtr<T>, U, V, W, X>::find(const KeyType& key) + { + return m_impl.find(key); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline typename HashMap<RefPtr<T>, U, V, W, X>::iterator HashMap<RefPtr<T>, U, V, W, X>::find(RawKeyType key) + { + return m_impl.template find<RawKeyType, RawKeyTranslator>(key); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline typename HashMap<RefPtr<T>, U, V, W, X>::const_iterator HashMap<RefPtr<T>, U, V, W, X>::find(const KeyType& key) const + { + return m_impl.find(key); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline typename HashMap<RefPtr<T>, U, V, W, X>::const_iterator HashMap<RefPtr<T>, U, V, W, X>::find(RawKeyType key) const + { + return m_impl.template find<RawKeyType, RawKeyTranslator>(key); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline bool HashMap<RefPtr<T>, U, V, W, X>::contains(const KeyType& key) const + { + return m_impl.contains(key); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline bool HashMap<RefPtr<T>, U, V, W, X>::contains(RawKeyType key) const + { + return m_impl.template contains<RawKeyType, RawKeyTranslator>(key); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline pair<typename HashMap<RefPtr<T>, U, V, W, X>::iterator, bool> + HashMap<RefPtr<T>, U, V, W, X>::inlineAdd(const KeyType& key, const MappedType& mapped) + { + typedef HashMapTranslator<ValueType, ValueTraits, HashFunctions> TranslatorType; + return m_impl.template add<KeyType, MappedType, TranslatorType>(key, mapped); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline pair<typename HashMap<RefPtr<T>, U, V, W, X>::iterator, bool> + HashMap<RefPtr<T>, U, V, W, X>::inlineAdd(RawKeyType key, const MappedType& mapped) + { + return m_impl.template add<RawKeyType, MappedType, RawKeyTranslator>(key, mapped); + } + + template<typename T, typename U, typename V, typename W, typename X> + pair<typename HashMap<RefPtr<T>, U, V, W, X>::iterator, bool> + HashMap<RefPtr<T>, U, V, W, X>::set(const KeyType& key, const MappedType& mapped) + { + pair<iterator, bool> result = inlineAdd(key, mapped); + if (!result.second) { + // add call above didn't change anything, so set the mapped value + result.first->second = mapped; + } + return result; + } + + template<typename T, typename U, typename V, typename W, typename X> + pair<typename HashMap<RefPtr<T>, U, V, W, X>::iterator, bool> + HashMap<RefPtr<T>, U, V, W, X>::set(RawKeyType key, const MappedType& mapped) + { + pair<iterator, bool> result = inlineAdd(key, mapped); + if (!result.second) { + // add call above didn't change anything, so set the mapped value + result.first->second = mapped; + } + return result; + } + + template<typename T, typename U, typename V, typename W, typename X> + pair<typename HashMap<RefPtr<T>, U, V, W, X>::iterator, bool> + HashMap<RefPtr<T>, U, V, W, X>::add(const KeyType& key, const MappedType& mapped) + { + return inlineAdd(key, mapped); + } + + template<typename T, typename U, typename V, typename W, typename X> + pair<typename HashMap<RefPtr<T>, U, V, W, X>::iterator, bool> + HashMap<RefPtr<T>, U, V, W, X>::add(RawKeyType key, const MappedType& mapped) + { + return inlineAdd(key, mapped); + } + + template<typename T, typename U, typename V, typename W, typename MappedTraits> + typename HashMap<RefPtr<T>, U, V, W, MappedTraits>::MappedType + HashMap<RefPtr<T>, U, V, W, MappedTraits>::get(const KeyType& key) const + { + ValueType* entry = const_cast<HashTableType&>(m_impl).lookup(key); + if (!entry) + return MappedTraits::emptyValue(); + return entry->second; + } + + template<typename T, typename U, typename V, typename W, typename MappedTraits> + typename HashMap<RefPtr<T>, U, V, W, MappedTraits>::MappedType + inline HashMap<RefPtr<T>, U, V, W, MappedTraits>::inlineGet(RawKeyType key) const + { + ValueType* entry = const_cast<HashTableType&>(m_impl).template lookup<RawKeyType, RawKeyTranslator>(key); + if (!entry) + return MappedTraits::emptyValue(); + return entry->second; + } + + template<typename T, typename U, typename V, typename W, typename MappedTraits> + typename HashMap<RefPtr<T>, U, V, W, MappedTraits>::MappedType + HashMap<RefPtr<T>, U, V, W, MappedTraits>::get(RawKeyType key) const + { + return inlineGet(key); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline void HashMap<RefPtr<T>, U, V, W, X>::remove(iterator it) + { + if (it.m_impl == m_impl.end()) + return; + m_impl.checkTableConsistency(); + m_impl.removeWithoutEntryConsistencyCheck(it.m_impl); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline void HashMap<RefPtr<T>, U, V, W, X>::remove(const KeyType& key) + { + remove(find(key)); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline void HashMap<RefPtr<T>, U, V, W, X>::remove(RawKeyType key) + { + remove(find(key)); + } + + template<typename T, typename U, typename V, typename W, typename X> + inline void HashMap<RefPtr<T>, U, V, W, X>::clear() + { + m_impl.clear(); + } + + template<typename T, typename U, typename V, typename W, typename MappedTraits> + typename HashMap<RefPtr<T>, U, V, W, MappedTraits>::MappedType + HashMap<RefPtr<T>, U, V, W, MappedTraits>::take(const KeyType& key) + { + // This can probably be made more efficient to avoid ref/deref churn. + iterator it = find(key); + if (it == end()) + return MappedTraits::emptyValue(); + typename HashMap<RefPtr<T>, U, V, W, MappedTraits>::MappedType result = it->second; + remove(it); + return result; + } + + template<typename T, typename U, typename V, typename W, typename MappedTraits> + typename HashMap<RefPtr<T>, U, V, W, MappedTraits>::MappedType + HashMap<RefPtr<T>, U, V, W, MappedTraits>::take(RawKeyType key) + { + // This can probably be made more efficient to avoid ref/deref churn. + iterator it = find(key); + if (it == end()) + return MappedTraits::emptyValue(); + typename HashMap<RefPtr<T>, U, V, W, MappedTraits>::MappedType result = it->second; + remove(it); + return result; + } + +} // namespace WTF diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/RetainPtr.h b/src/3rdparty/webkit/JavaScriptCore/wtf/RetainPtr.h new file mode 100644 index 0000000..a66a127 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/RetainPtr.h @@ -0,0 +1,210 @@ +/* + * Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef RetainPtr_h +#define RetainPtr_h + +#include <algorithm> +#include <CoreFoundation/CoreFoundation.h> + +#ifdef __OBJC__ +#import <Foundation/Foundation.h> +#endif + +namespace WTF { + + template <typename T> struct RemovePointer { + typedef T type; + }; + + template <typename T> struct RemovePointer<T*> { + typedef T type; + }; + + // Unlike most most of our smart pointers, RetainPtr can take either the pointer type or the pointed-to type, + // so both RetainPtr<NSDictionary> and RetainPtr<CFDictionaryRef> will work. + + enum AdoptCFTag { AdoptCF }; + enum AdoptNSTag { AdoptNS }; + +#ifdef __OBJC__ + inline void adoptNSReference(id ptr) + { + if (ptr) { + CFRetain(ptr); + [ptr release]; + } + } +#endif + + template <typename T> class RetainPtr { + public: + typedef typename RemovePointer<T>::type ValueType; + typedef ValueType* PtrType; + + RetainPtr() : m_ptr(0) {} + RetainPtr(PtrType ptr) : m_ptr(ptr) { if (ptr) CFRetain(ptr); } + + RetainPtr(AdoptCFTag, PtrType ptr) : m_ptr(ptr) { } + RetainPtr(AdoptNSTag, PtrType ptr) : m_ptr(ptr) { adoptNSReference(ptr); } + + RetainPtr(const RetainPtr& o) : m_ptr(o.m_ptr) { if (PtrType ptr = m_ptr) CFRetain(ptr); } + + ~RetainPtr() { if (PtrType ptr = m_ptr) CFRelease(ptr); } + + template <typename U> RetainPtr(const RetainPtr<U>& o) : m_ptr(o.get()) { if (PtrType ptr = m_ptr) CFRetain(ptr); } + + PtrType get() const { return m_ptr; } + + PtrType releaseRef() { PtrType tmp = m_ptr; m_ptr = 0; return tmp; } + + PtrType operator->() const { return m_ptr; } + + bool operator!() const { return !m_ptr; } + + // This conversion operator allows implicit conversion to bool but not to other integer types. + typedef PtrType RetainPtr::*UnspecifiedBoolType; + operator UnspecifiedBoolType() const { return m_ptr ? &RetainPtr::m_ptr : 0; } + + RetainPtr& operator=(const RetainPtr&); + template <typename U> RetainPtr& operator=(const RetainPtr<U>&); + RetainPtr& operator=(PtrType); + template <typename U> RetainPtr& operator=(U*); + + void adoptCF(PtrType); + void adoptNS(PtrType); + + void swap(RetainPtr&); + + private: + PtrType m_ptr; + }; + + template <typename T> inline RetainPtr<T>& RetainPtr<T>::operator=(const RetainPtr<T>& o) + { + PtrType optr = o.get(); + if (optr) + CFRetain(optr); + PtrType ptr = m_ptr; + m_ptr = optr; + if (ptr) + CFRelease(ptr); + return *this; + } + + template <typename T> template <typename U> inline RetainPtr<T>& RetainPtr<T>::operator=(const RetainPtr<U>& o) + { + PtrType optr = o.get(); + if (optr) + CFRetain(optr); + PtrType ptr = m_ptr; + m_ptr = optr; + if (ptr) + CFRelease(ptr); + return *this; + } + + template <typename T> inline RetainPtr<T>& RetainPtr<T>::operator=(PtrType optr) + { + if (optr) + CFRetain(optr); + PtrType ptr = m_ptr; + m_ptr = optr; + if (ptr) + CFRelease(ptr); + return *this; + } + + template <typename T> inline void RetainPtr<T>::adoptCF(PtrType optr) + { + PtrType ptr = m_ptr; + m_ptr = optr; + if (ptr) + CFRelease(ptr); + } + + template <typename T> inline void RetainPtr<T>::adoptNS(PtrType optr) + { + adoptNSReference(optr); + + PtrType ptr = m_ptr; + m_ptr = optr; + if (ptr) + CFRelease(ptr); + } + + template <typename T> template <typename U> inline RetainPtr<T>& RetainPtr<T>::operator=(U* optr) + { + if (optr) + CFRetain(optr); + PtrType ptr = m_ptr; + m_ptr = optr; + if (ptr) + CFRelease(ptr); + return *this; + } + + template <class T> inline void RetainPtr<T>::swap(RetainPtr<T>& o) + { + std::swap(m_ptr, o.m_ptr); + } + + template <class T> inline void swap(RetainPtr<T>& a, RetainPtr<T>& b) + { + a.swap(b); + } + + template <typename T, typename U> inline bool operator==(const RetainPtr<T>& a, const RetainPtr<U>& b) + { + return a.get() == b.get(); + } + + template <typename T, typename U> inline bool operator==(const RetainPtr<T>& a, U* b) + { + return a.get() == b; + } + + template <typename T, typename U> inline bool operator==(T* a, const RetainPtr<U>& b) + { + return a == b.get(); + } + + template <typename T, typename U> inline bool operator!=(const RetainPtr<T>& a, const RetainPtr<U>& b) + { + return a.get() != b.get(); + } + + template <typename T, typename U> inline bool operator!=(const RetainPtr<T>& a, U* b) + { + return a.get() != b; + } + + template <typename T, typename U> inline bool operator!=(T* a, const RetainPtr<U>& b) + { + return a != b.get(); + } + +} // namespace WTF + +using WTF::AdoptCF; +using WTF::AdoptNS; +using WTF::RetainPtr; + +#endif // WTF_RetainPtr_h diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/StdLibExtras.h b/src/3rdparty/webkit/JavaScriptCore/wtf/StdLibExtras.h new file mode 100644 index 0000000..ddc5a58 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/StdLibExtras.h @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2008 Apple Inc. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <wtf/Platform.h> + +// Use these to declare and define a static local variable (static T;) so that +// it is leaked so that its destructors are not called at exit. Using this +// macro also allows workarounds a compiler bug present in Apple's version of GCC 4.0.1. +#if COMPILER(GCC) && defined(__APPLE_CC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 0 && __GNUC_PATCHLEVEL__ == 1 +#define DEFINE_STATIC_LOCAL(type, name, arguments) \ + static type* name##Ptr = new type arguments; \ + type& name = *name##Ptr +#else +#define DEFINE_STATIC_LOCAL(type, name, arguments) \ + static type& name = *new type arguments +#endif diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/StringExtras.h b/src/3rdparty/webkit/JavaScriptCore/wtf/StringExtras.h new file mode 100644 index 0000000..881b066 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/StringExtras.h @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2006 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef WTF_StringExtras_h +#define WTF_StringExtras_h + +#include <stdarg.h> +#include <stdio.h> + +#if COMPILER(MSVC) + +inline int snprintf(char* buffer, size_t count, const char* format, ...) +{ + int result; + va_list args; + va_start(args, format); + result = _vsnprintf(buffer, count, format, args); + va_end(args); + return result; +} + +#if COMPILER(MSVC7) || PLATFORM(WIN_CE) + +inline int vsnprintf(char* buffer, size_t count, const char* format, va_list args) +{ + return _vsnprintf(buffer, count, format, args); +} + +#endif + +#if PLATFORM(WIN_CE) + +inline int strnicmp(const char* string1, const char* string2, size_t count) +{ + return _strnicmp(string1, string2, count); +} + +inline int stricmp(const char* string1, const char* string2) +{ + return _stricmp(string1, string2); +} + +inline char* strdup(const char* strSource) +{ + return _strdup(strSource); +} + +#endif + +inline int strncasecmp(const char* s1, const char* s2, size_t len) +{ + return strnicmp(s1, s2, len); +} + +inline int strcasecmp(const char* s1, const char* s2) +{ + return stricmp(s1, s2); +} + +#endif + +#endif // WTF_StringExtras_h diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/TCPackedCache.h b/src/3rdparty/webkit/JavaScriptCore/wtf/TCPackedCache.h new file mode 100644 index 0000000..0464f8f --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/TCPackedCache.h @@ -0,0 +1,234 @@ +// Copyright (c) 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// --- +// Author: Geoff Pike +// +// This file provides a minimal cache that can hold a <key, value> pair +// with little if any wasted space. The types of the key and value +// must be unsigned integral types or at least have unsigned semantics +// for >>, casting, and similar operations. +// +// Synchronization is not provided. However, the cache is implemented +// as an array of cache entries whose type is chosen at compile time. +// If a[i] is atomic on your hardware for the chosen array type then +// raciness will not necessarily lead to bugginess. The cache entries +// must be large enough to hold a partial key and a value packed +// together. The partial keys are bit strings of length +// kKeybits - kHashbits, and the values are bit strings of length kValuebits. +// +// In an effort to use minimal space, every cache entry represents +// some <key, value> pair; the class provides no way to mark a cache +// entry as empty or uninitialized. In practice, you may want to have +// reserved keys or values to get around this limitation. For example, in +// tcmalloc's PageID-to-sizeclass cache, a value of 0 is used as +// "unknown sizeclass." +// +// Usage Considerations +// -------------------- +// +// kHashbits controls the size of the cache. The best value for +// kHashbits will of course depend on the application. Perhaps try +// tuning the value of kHashbits by measuring different values on your +// favorite benchmark. Also remember not to be a pig; other +// programs that need resources may suffer if you are. +// +// The main uses for this class will be when performance is +// critical and there's a convenient type to hold the cache's +// entries. As described above, the number of bits required +// for a cache entry is (kKeybits - kHashbits) + kValuebits. Suppose +// kKeybits + kValuebits is 43. Then it probably makes sense to +// chose kHashbits >= 11 so that cache entries fit in a uint32. +// +// On the other hand, suppose kKeybits = kValuebits = 64. Then +// using this class may be less worthwhile. You'll probably +// be using 128 bits for each entry anyway, so maybe just pick +// a hash function, H, and use an array indexed by H(key): +// void Put(K key, V value) { a_[H(key)] = pair<K, V>(key, value); } +// V GetOrDefault(K key, V default) { const pair<K, V> &p = a_[H(key)]; ... } +// etc. +// +// Further Details +// --------------- +// +// For caches used only by one thread, the following is true: +// 1. For a cache c, +// (c.Put(key, value), c.GetOrDefault(key, 0)) == value +// and +// (c.Put(key, value), <...>, c.GetOrDefault(key, 0)) == value +// if the elided code contains no c.Put calls. +// +// 2. Has(key) will return false if no <key, value> pair with that key +// has ever been Put. However, a newly initialized cache will have +// some <key, value> pairs already present. When you create a new +// cache, you must specify an "initial value." The initialization +// procedure is equivalent to Clear(initial_value), which is +// equivalent to Put(k, initial_value) for all keys k from 0 to +// 2^kHashbits - 1. +// +// 3. If key and key' differ then the only way Put(key, value) may +// cause Has(key') to change is that Has(key') may change from true to +// false. Furthermore, a Put() call that doesn't change Has(key') +// doesn't change GetOrDefault(key', ...) either. +// +// Implementation details: +// +// This is a direct-mapped cache with 2^kHashbits entries; +// the hash function simply takes the low bits of the key. +// So, we don't have to store the low bits of the key in the entries. +// Instead, an entry is the high bits of a key and a value, packed +// together. E.g., a 20 bit key and a 7 bit value only require +// a uint16 for each entry if kHashbits >= 11. +// +// Alternatives to this scheme will be added as needed. + +#ifndef TCMALLOC_PACKED_CACHE_INL_H__ +#define TCMALLOC_PACKED_CACHE_INL_H__ + +#ifndef WTF_CHANGES +#include "base/basictypes.h" // for COMPILE_ASSERT +#include "base/logging.h" // for DCHECK +#endif + +#ifndef DCHECK_EQ +#define DCHECK_EQ(val1, val2) ASSERT((val1) == (val2)) +#endif + +// A safe way of doing "(1 << n) - 1" -- without worrying about overflow +// Note this will all be resolved to a constant expression at compile-time +#define N_ONES_(IntType, N) \ + ( (N) == 0 ? 0 : ((static_cast<IntType>(1) << ((N)-1))-1 + \ + (static_cast<IntType>(1) << ((N)-1))) ) + +// The types K and V provide upper bounds on the number of valid keys +// and values, but we explicitly require the keys to be less than +// 2^kKeybits and the values to be less than 2^kValuebits. The size of +// the table is controlled by kHashbits, and the type of each entry in +// the cache is T. See also the big comment at the top of the file. +template <int kKeybits, typename T> +class PackedCache { + public: + typedef uintptr_t K; + typedef size_t V; + static const size_t kHashbits = 12; + static const size_t kValuebits = 8; + + explicit PackedCache(V initial_value) { + COMPILE_ASSERT(kKeybits <= sizeof(K) * 8, key_size); + COMPILE_ASSERT(kValuebits <= sizeof(V) * 8, value_size); + COMPILE_ASSERT(kHashbits <= kKeybits, hash_function); + COMPILE_ASSERT(kKeybits - kHashbits + kValuebits <= kTbits, + entry_size_must_be_big_enough); + Clear(initial_value); + } + + void Put(K key, V value) { + DCHECK_EQ(key, key & kKeyMask); + DCHECK_EQ(value, value & kValueMask); + array_[Hash(key)] = static_cast<T>(KeyToUpper(key) | value); + } + + bool Has(K key) const { + DCHECK_EQ(key, key & kKeyMask); + return KeyMatch(array_[Hash(key)], key); + } + + V GetOrDefault(K key, V default_value) const { + // As with other code in this class, we touch array_ as few times + // as we can. Assuming entries are read atomically (e.g., their + // type is uintptr_t on most hardware) then certain races are + // harmless. + DCHECK_EQ(key, key & kKeyMask); + T entry = array_[Hash(key)]; + return KeyMatch(entry, key) ? EntryToValue(entry) : default_value; + } + + void Clear(V value) { + DCHECK_EQ(value, value & kValueMask); + for (int i = 0; i < 1 << kHashbits; i++) { + array_[i] = static_cast<T>(value); + } + } + + private: + // We are going to pack a value and the upper part of a key into + // an entry of type T. The UPPER type is for the upper part of a key, + // after the key has been masked and shifted for inclusion in an entry. + typedef T UPPER; + + static V EntryToValue(T t) { return t & kValueMask; } + + static UPPER EntryToUpper(T t) { return t & kUpperMask; } + + // If v is a V and u is an UPPER then you can create an entry by + // doing u | v. kHashbits determines where in a K to find the upper + // part of the key, and kValuebits determines where in the entry to put + // it. + static UPPER KeyToUpper(K k) { + const int shift = kHashbits - kValuebits; + // Assume kHashbits >= kValuebits. It would be easy to lift this assumption. + return static_cast<T>(k >> shift) & kUpperMask; + } + + // This is roughly the inverse of KeyToUpper(). Some of the key has been + // thrown away, since KeyToUpper() masks off the low bits of the key. + static K UpperToPartialKey(UPPER u) { + DCHECK_EQ(u, u & kUpperMask); + const int shift = kHashbits - kValuebits; + // Assume kHashbits >= kValuebits. It would be easy to lift this assumption. + return static_cast<K>(u) << shift; + } + + static size_t Hash(K key) { + return static_cast<size_t>(key) & N_ONES_(size_t, kHashbits); + } + + // Does the entry's partial key match the relevant part of the given key? + static bool KeyMatch(T entry, K key) { + return ((KeyToUpper(key) ^ entry) & kUpperMask) == 0; + } + + static const size_t kTbits = 8 * sizeof(T); + static const int kUpperbits = kKeybits - kHashbits; + + // For masking a K. + static const K kKeyMask = N_ONES_(K, kKeybits); + + // For masking a T. + static const T kUpperMask = N_ONES_(T, kUpperbits) << kValuebits; + + // For masking a V or a T. + static const V kValueMask = N_ONES_(V, kValuebits); + + T array_[1 << kHashbits]; +}; + +#undef N_ONES_ + +#endif // TCMALLOC_PACKED_CACHE_INL_H__ diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/TCPageMap.h b/src/3rdparty/webkit/JavaScriptCore/wtf/TCPageMap.h new file mode 100644 index 0000000..21a87e4 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/TCPageMap.h @@ -0,0 +1,289 @@ +// Copyright (c) 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// --- +// Author: Sanjay Ghemawat <opensource@google.com> +// +// A data structure used by the caching malloc. It maps from page# to +// a pointer that contains info about that page. We use two +// representations: one for 32-bit addresses, and another for 64 bit +// addresses. Both representations provide the same interface. The +// first representation is implemented as a flat array, the seconds as +// a three-level radix tree that strips away approximately 1/3rd of +// the bits every time. +// +// The BITS parameter should be the number of bits required to hold +// a page number. E.g., with 32 bit pointers and 4K pages (i.e., +// page offset fits in lower 12 bits), BITS == 20. + +#ifndef TCMALLOC_PAGEMAP_H__ +#define TCMALLOC_PAGEMAP_H__ + +#if HAVE(STDINT_H) +#include <stdint.h> +#elif HAVE(INTTYPES_H) +#include <inttypes.h> +#else +#include <sys/types.h> +#endif + +#include <string.h> + +#include "Assertions.h" + +// Single-level array +template <int BITS> +class TCMalloc_PageMap1 { + private: + void** array_; + + public: + typedef uintptr_t Number; + + void init(void* (*allocator)(size_t)) { + array_ = reinterpret_cast<void**>((*allocator)(sizeof(void*) << BITS)); + memset(array_, 0, sizeof(void*) << BITS); + } + + // Ensure that the map contains initialized entries "x .. x+n-1". + // Returns true if successful, false if we could not allocate memory. + bool Ensure(Number x, size_t n) { + // Nothing to do since flat array was allocate at start + return true; + } + + void PreallocateMoreMemory() {} + + // REQUIRES "k" is in range "[0,2^BITS-1]". + // REQUIRES "k" has been ensured before. + // + // Return the current value for KEY. Returns "Value()" if not + // yet set. + void* get(Number k) const { + return array_[k]; + } + + // REQUIRES "k" is in range "[0,2^BITS-1]". + // REQUIRES "k" has been ensured before. + // + // Sets the value for KEY. + void set(Number k, void* v) { + array_[k] = v; + } +}; + +// Two-level radix tree +template <int BITS> +class TCMalloc_PageMap2 { + private: + // Put 32 entries in the root and (2^BITS)/32 entries in each leaf. + static const int ROOT_BITS = 5; + static const int ROOT_LENGTH = 1 << ROOT_BITS; + + static const int LEAF_BITS = BITS - ROOT_BITS; + static const int LEAF_LENGTH = 1 << LEAF_BITS; + + // Leaf node + struct Leaf { + void* values[LEAF_LENGTH]; + }; + + Leaf* root_[ROOT_LENGTH]; // Pointers to 32 child nodes + void* (*allocator_)(size_t); // Memory allocator + + public: + typedef uintptr_t Number; + + void init(void* (*allocator)(size_t)) { + allocator_ = allocator; + memset(root_, 0, sizeof(root_)); + } + + void* get(Number k) const { + ASSERT(k >> BITS == 0); + const Number i1 = k >> LEAF_BITS; + const Number i2 = k & (LEAF_LENGTH-1); + return root_[i1]->values[i2]; + } + + void set(Number k, void* v) { + ASSERT(k >> BITS == 0); + const Number i1 = k >> LEAF_BITS; + const Number i2 = k & (LEAF_LENGTH-1); + root_[i1]->values[i2] = v; + } + + bool Ensure(Number start, size_t n) { + for (Number key = start; key <= start + n - 1; ) { + const Number i1 = key >> LEAF_BITS; + + // Make 2nd level node if necessary + if (root_[i1] == NULL) { + Leaf* leaf = reinterpret_cast<Leaf*>((*allocator_)(sizeof(Leaf))); + if (leaf == NULL) return false; + memset(leaf, 0, sizeof(*leaf)); + root_[i1] = leaf; + } + + // Advance key past whatever is covered by this leaf node + key = ((key >> LEAF_BITS) + 1) << LEAF_BITS; + } + return true; + } + + void PreallocateMoreMemory() { + // Allocate enough to keep track of all possible pages + Ensure(0, 1 << BITS); + } + +#ifdef WTF_CHANGES + template<class Visitor, class MemoryReader> + void visit(const Visitor& visitor, const MemoryReader& reader) + { + for (int i = 0; i < ROOT_LENGTH; i++) { + if (!root_[i]) + continue; + + Leaf* l = reader(reinterpret_cast<Leaf*>(root_[i])); + for (int j = 0; j < LEAF_LENGTH; j += visitor.visit(l->values[j])) + ; + } + } +#endif +}; + +// Three-level radix tree +template <int BITS> +class TCMalloc_PageMap3 { + private: + // How many bits should we consume at each interior level + static const int INTERIOR_BITS = (BITS + 2) / 3; // Round-up + static const int INTERIOR_LENGTH = 1 << INTERIOR_BITS; + + // How many bits should we consume at leaf level + static const int LEAF_BITS = BITS - 2*INTERIOR_BITS; + static const int LEAF_LENGTH = 1 << LEAF_BITS; + + // Interior node + struct Node { + Node* ptrs[INTERIOR_LENGTH]; + }; + + // Leaf node + struct Leaf { + void* values[LEAF_LENGTH]; + }; + + Node* root_; // Root of radix tree + void* (*allocator_)(size_t); // Memory allocator + + Node* NewNode() { + Node* result = reinterpret_cast<Node*>((*allocator_)(sizeof(Node))); + if (result != NULL) { + memset(result, 0, sizeof(*result)); + } + return result; + } + + public: + typedef uintptr_t Number; + + void init(void* (*allocator)(size_t)) { + allocator_ = allocator; + root_ = NewNode(); + } + + void* get(Number k) const { + ASSERT(k >> BITS == 0); + const Number i1 = k >> (LEAF_BITS + INTERIOR_BITS); + const Number i2 = (k >> LEAF_BITS) & (INTERIOR_LENGTH-1); + const Number i3 = k & (LEAF_LENGTH-1); + return reinterpret_cast<Leaf*>(root_->ptrs[i1]->ptrs[i2])->values[i3]; + } + + void set(Number k, void* v) { + ASSERT(k >> BITS == 0); + const Number i1 = k >> (LEAF_BITS + INTERIOR_BITS); + const Number i2 = (k >> LEAF_BITS) & (INTERIOR_LENGTH-1); + const Number i3 = k & (LEAF_LENGTH-1); + reinterpret_cast<Leaf*>(root_->ptrs[i1]->ptrs[i2])->values[i3] = v; + } + + bool Ensure(Number start, size_t n) { + for (Number key = start; key <= start + n - 1; ) { + const Number i1 = key >> (LEAF_BITS + INTERIOR_BITS); + const Number i2 = (key >> LEAF_BITS) & (INTERIOR_LENGTH-1); + + // Make 2nd level node if necessary + if (root_->ptrs[i1] == NULL) { + Node* n = NewNode(); + if (n == NULL) return false; + root_->ptrs[i1] = n; + } + + // Make leaf node if necessary + if (root_->ptrs[i1]->ptrs[i2] == NULL) { + Leaf* leaf = reinterpret_cast<Leaf*>((*allocator_)(sizeof(Leaf))); + if (leaf == NULL) return false; + memset(leaf, 0, sizeof(*leaf)); + root_->ptrs[i1]->ptrs[i2] = reinterpret_cast<Node*>(leaf); + } + + // Advance key past whatever is covered by this leaf node + key = ((key >> LEAF_BITS) + 1) << LEAF_BITS; + } + return true; + } + + void PreallocateMoreMemory() { + } + +#ifdef WTF_CHANGES + template<class Visitor, class MemoryReader> + void visit(const Visitor& visitor, const MemoryReader& reader) { + Node* root = reader(root_); + for (int i = 0; i < INTERIOR_LENGTH; i++) { + if (!root->ptrs[i]) + continue; + + Node* n = reader(root->ptrs[i]); + for (int j = 0; j < INTERIOR_LENGTH; j++) { + if (!n->ptrs[j]) + continue; + + Leaf* l = reader(reinterpret_cast<Leaf*>(n->ptrs[j])); + for (int k = 0; k < LEAF_LENGTH; k += visitor.visit(l->values[k])) + ; + } + } + } +#endif +}; + +#endif // TCMALLOC_PAGEMAP_H__ diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/TCSpinLock.h b/src/3rdparty/webkit/JavaScriptCore/wtf/TCSpinLock.h new file mode 100644 index 0000000..74c02f3 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/TCSpinLock.h @@ -0,0 +1,239 @@ +// Copyright (c) 2005, 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// --- +// Author: Sanjay Ghemawat <opensource@google.com> + +#ifndef TCMALLOC_INTERNAL_SPINLOCK_H__ +#define TCMALLOC_INTERNAL_SPINLOCK_H__ + +#if (PLATFORM(X86) || PLATFORM(PPC)) && (COMPILER(GCC) || COMPILER(MSVC)) + +#include <time.h> /* For nanosleep() */ + +#if !PLATFORM(WIN_OS) +#include <sched.h> /* For sched_yield() */ +#endif + +#if HAVE(STDINT_H) +#include <stdint.h> +#elif HAVE(INTTYPES_H) +#include <inttypes.h> +#else +#include <sys/types.h> +#endif + +#if PLATFORM(WIN_OS) +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include <windows.h> +#endif + +static void TCMalloc_SlowLock(volatile unsigned int* lockword); + +// The following is a struct so that it can be initialized at compile time +struct TCMalloc_SpinLock { + + inline void Lock() { + int r; +#if COMPILER(GCC) +#if PLATFORM(X86) + __asm__ __volatile__ + ("xchgl %0, %1" + : "=r"(r), "=m"(lockword_) + : "0"(1), "m"(lockword_) + : "memory"); +#else + volatile unsigned int *lockword_ptr = &lockword_; + __asm__ __volatile__ + ("1: lwarx %0, 0, %1\n\t" + "stwcx. %2, 0, %1\n\t" + "bne- 1b\n\t" + "isync" + : "=&r" (r), "=r" (lockword_ptr) + : "r" (1), "1" (lockword_ptr) + : "memory"); +#endif +#elif COMPILER(MSVC) + __asm { + mov eax, this ; store &lockword_ (which is this+0) in eax + mov ebx, 1 ; store 1 in ebx + xchg [eax], ebx ; exchange lockword_ and 1 + mov r, ebx ; store old value of lockword_ in r + } +#endif + if (r) TCMalloc_SlowLock(&lockword_); + } + + inline void Unlock() { +#if COMPILER(GCC) +#if PLATFORM(X86) + __asm__ __volatile__ + ("movl $0, %0" + : "=m"(lockword_) + : "m" (lockword_) + : "memory"); +#else + __asm__ __volatile__ + ("isync\n\t" + "eieio\n\t" + "stw %1, %0" +#if PLATFORM(DARWIN) || PLATFORM(PPC) + : "=o" (lockword_) +#else + : "=m" (lockword_) +#endif + : "r" (0) + : "memory"); +#endif +#elif COMPILER(MSVC) + __asm { + mov eax, this ; store &lockword_ (which is this+0) in eax + mov [eax], 0 ; set lockword_ to 0 + } +#endif + } + // Report if we think the lock can be held by this thread. + // When the lock is truly held by the invoking thread + // we will always return true. + // Indended to be used as CHECK(lock.IsHeld()); + inline bool IsHeld() const { + return lockword_ != 0; + } + + inline void Init() { lockword_ = 0; } + + volatile unsigned int lockword_; +}; + +#define SPINLOCK_INITIALIZER { 0 } + +static void TCMalloc_SlowLock(volatile unsigned int* lockword) { +#if !PLATFORM(WIN_OS) + sched_yield(); // Yield immediately since fast path failed +#else + SwitchToThread(); +#endif + while (true) { + int r; +#if COMPILER(GCC) +#if PLATFORM(X86) + __asm__ __volatile__ + ("xchgl %0, %1" + : "=r"(r), "=m"(*lockword) + : "0"(1), "m"(*lockword) + : "memory"); + +#else + int tmp = 1; + __asm__ __volatile__ + ("1: lwarx %0, 0, %1\n\t" + "stwcx. %2, 0, %1\n\t" + "bne- 1b\n\t" + "isync" + : "=&r" (r), "=r" (lockword) + : "r" (tmp), "1" (lockword) + : "memory"); +#endif +#elif COMPILER(MSVC) + __asm { + mov eax, lockword ; assign lockword into eax + mov ebx, 1 ; assign 1 into ebx + xchg [eax], ebx ; exchange *lockword and 1 + mov r, ebx ; store old value of *lockword in r + } +#endif + if (!r) { + return; + } + + // This code was adapted from the ptmalloc2 implementation of + // spinlocks which would sched_yield() upto 50 times before + // sleeping once for a few milliseconds. Mike Burrows suggested + // just doing one sched_yield() outside the loop and always + // sleeping after that. This change helped a great deal on the + // performance of spinlocks under high contention. A test program + // with 10 threads on a dual Xeon (four virtual processors) went + // from taking 30 seconds to 16 seconds. + + // Sleep for a few milliseconds +#if PLATFORM(WIN_OS) + Sleep(2); +#else + struct timespec tm; + tm.tv_sec = 0; + tm.tv_nsec = 2000001; + nanosleep(&tm, NULL); +#endif + } +} + +#else + +#include <pthread.h> + +// Portable version +struct TCMalloc_SpinLock { + pthread_mutex_t private_lock_; + + inline void Init() { + if (pthread_mutex_init(&private_lock_, NULL) != 0) CRASH(); + } + inline void Finalize() { + if (pthread_mutex_destroy(&private_lock_) != 0) CRASH(); + } + inline void Lock() { + if (pthread_mutex_lock(&private_lock_) != 0) CRASH(); + } + inline void Unlock() { + if (pthread_mutex_unlock(&private_lock_) != 0) CRASH(); + } +}; + +#define SPINLOCK_INITIALIZER { PTHREAD_MUTEX_INITIALIZER } + +#endif + +// Corresponding locker object that arranges to acquire a spinlock for +// the duration of a C++ scope. +class TCMalloc_SpinLockHolder { + private: + TCMalloc_SpinLock* lock_; + public: + inline explicit TCMalloc_SpinLockHolder(TCMalloc_SpinLock* l) + : lock_(l) { l->Lock(); } + inline ~TCMalloc_SpinLockHolder() { lock_->Unlock(); } +}; + +// Short-hands for convenient use by tcmalloc.cc +typedef TCMalloc_SpinLock SpinLock; +typedef TCMalloc_SpinLockHolder SpinLockHolder; + +#endif // TCMALLOC_INTERNAL_SPINLOCK_H__ diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/TCSystemAlloc.cpp b/src/3rdparty/webkit/JavaScriptCore/wtf/TCSystemAlloc.cpp new file mode 100644 index 0000000..9c147b2 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/TCSystemAlloc.cpp @@ -0,0 +1,437 @@ +// Copyright (c) 2005, 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// --- +// Author: Sanjay Ghemawat + +#include "config.h" +#if HAVE(STDINT_H) +#include <stdint.h> +#elif HAVE(INTTYPES_H) +#include <inttypes.h> +#else +#include <sys/types.h> +#endif +#if PLATFORM(WIN_OS) +#include "windows.h" +#else +#include <errno.h> +#include <unistd.h> +#include <sys/mman.h> +#endif +#include <fcntl.h> +#include "Assertions.h" +#include "TCSystemAlloc.h" +#include "TCSpinLock.h" +#include "UnusedParam.h" + +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif + +// Structure for discovering alignment +union MemoryAligner { + void* p; + double d; + size_t s; +}; + +static SpinLock spinlock = SPINLOCK_INITIALIZER; + +// Page size is initialized on demand +static size_t pagesize = 0; + +// Configuration parameters. +// +// if use_devmem is true, either use_sbrk or use_mmap must also be true. +// For 2.2 kernels, it looks like the sbrk address space (500MBish) and +// the mmap address space (1300MBish) are disjoint, so we need both allocators +// to get as much virtual memory as possible. +#ifndef WTF_CHANGES +static bool use_devmem = false; +#endif + +#if HAVE(SBRK) +static bool use_sbrk = false; +#endif + +#if HAVE(MMAP) +static bool use_mmap = true; +#endif + +#if HAVE(VIRTUALALLOC) +static bool use_VirtualAlloc = true; +#endif + +// Flags to keep us from retrying allocators that failed. +static bool devmem_failure = false; +static bool sbrk_failure = false; +static bool mmap_failure = false; +static bool VirtualAlloc_failure = false; + +#ifndef WTF_CHANGES +DEFINE_int32(malloc_devmem_start, 0, + "Physical memory starting location in MB for /dev/mem allocation." + " Setting this to 0 disables /dev/mem allocation"); +DEFINE_int32(malloc_devmem_limit, 0, + "Physical memory limit location in MB for /dev/mem allocation." + " Setting this to 0 means no limit."); +#else +static const int32_t FLAGS_malloc_devmem_start = 0; +static const int32_t FLAGS_malloc_devmem_limit = 0; +#endif + +#if HAVE(SBRK) + +static void* TrySbrk(size_t size, size_t *actual_size, size_t alignment) { + size = ((size + alignment - 1) / alignment) * alignment; + + // could theoretically return the "extra" bytes here, but this + // is simple and correct. + if (actual_size) + *actual_size = size; + + void* result = sbrk(size); + if (result == reinterpret_cast<void*>(-1)) { + sbrk_failure = true; + return NULL; + } + + // Is it aligned? + uintptr_t ptr = reinterpret_cast<uintptr_t>(result); + if ((ptr & (alignment-1)) == 0) return result; + + // Try to get more memory for alignment + size_t extra = alignment - (ptr & (alignment-1)); + void* r2 = sbrk(extra); + if (reinterpret_cast<uintptr_t>(r2) == (ptr + size)) { + // Contiguous with previous result + return reinterpret_cast<void*>(ptr + extra); + } + + // Give up and ask for "size + alignment - 1" bytes so + // that we can find an aligned region within it. + result = sbrk(size + alignment - 1); + if (result == reinterpret_cast<void*>(-1)) { + sbrk_failure = true; + return NULL; + } + ptr = reinterpret_cast<uintptr_t>(result); + if ((ptr & (alignment-1)) != 0) { + ptr += alignment - (ptr & (alignment-1)); + } + return reinterpret_cast<void*>(ptr); +} + +#endif /* HAVE(SBRK) */ + +#if HAVE(MMAP) + +static void* TryMmap(size_t size, size_t *actual_size, size_t alignment) { + // Enforce page alignment + if (pagesize == 0) pagesize = getpagesize(); + if (alignment < pagesize) alignment = pagesize; + size = ((size + alignment - 1) / alignment) * alignment; + + // could theoretically return the "extra" bytes here, but this + // is simple and correct. + if (actual_size) + *actual_size = size; + + // Ask for extra memory if alignment > pagesize + size_t extra = 0; + if (alignment > pagesize) { + extra = alignment - pagesize; + } + void* result = mmap(NULL, size + extra, + PROT_READ | PROT_WRITE, + MAP_PRIVATE|MAP_ANONYMOUS, + -1, 0); + if (result == reinterpret_cast<void*>(MAP_FAILED)) { + mmap_failure = true; + return NULL; + } + + // Adjust the return memory so it is aligned + uintptr_t ptr = reinterpret_cast<uintptr_t>(result); + size_t adjust = 0; + if ((ptr & (alignment - 1)) != 0) { + adjust = alignment - (ptr & (alignment - 1)); + } + + // Return the unused memory to the system + if (adjust > 0) { + munmap(reinterpret_cast<void*>(ptr), adjust); + } + if (adjust < extra) { + munmap(reinterpret_cast<void*>(ptr + adjust + size), extra - adjust); + } + + ptr += adjust; + return reinterpret_cast<void*>(ptr); +} + +#endif /* HAVE(MMAP) */ + +#if HAVE(VIRTUALALLOC) + +static void* TryVirtualAlloc(size_t size, size_t *actual_size, size_t alignment) { + // Enforce page alignment + if (pagesize == 0) { + SYSTEM_INFO system_info; + GetSystemInfo(&system_info); + pagesize = system_info.dwPageSize; + } + + if (alignment < pagesize) alignment = pagesize; + size = ((size + alignment - 1) / alignment) * alignment; + + // could theoretically return the "extra" bytes here, but this + // is simple and correct. + if (actual_size) + *actual_size = size; + + // Ask for extra memory if alignment > pagesize + size_t extra = 0; + if (alignment > pagesize) { + extra = alignment - pagesize; + } + void* result = VirtualAlloc(NULL, size + extra, + MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, + PAGE_READWRITE); + + if (result == NULL) { + VirtualAlloc_failure = true; + return NULL; + } + + // Adjust the return memory so it is aligned + uintptr_t ptr = reinterpret_cast<uintptr_t>(result); + size_t adjust = 0; + if ((ptr & (alignment - 1)) != 0) { + adjust = alignment - (ptr & (alignment - 1)); + } + + // Return the unused memory to the system - we'd like to release but the best we can do + // is decommit, since Windows only lets you free the whole allocation. + if (adjust > 0) { + VirtualFree(reinterpret_cast<void*>(ptr), adjust, MEM_DECOMMIT); + } + if (adjust < extra) { + VirtualFree(reinterpret_cast<void*>(ptr + adjust + size), extra-adjust, MEM_DECOMMIT); + } + + ptr += adjust; + return reinterpret_cast<void*>(ptr); +} + +#endif /* HAVE(MMAP) */ + +#ifndef WTF_CHANGES +static void* TryDevMem(size_t size, size_t *actual_size, size_t alignment) { + static bool initialized = false; + static off_t physmem_base; // next physical memory address to allocate + static off_t physmem_limit; // maximum physical address allowed + static int physmem_fd; // file descriptor for /dev/mem + + // Check if we should use /dev/mem allocation. Note that it may take + // a while to get this flag initialized, so meanwhile we fall back to + // the next allocator. (It looks like 7MB gets allocated before + // this flag gets initialized -khr.) + if (FLAGS_malloc_devmem_start == 0) { + // NOTE: not a devmem_failure - we'd like TCMalloc_SystemAlloc to + // try us again next time. + return NULL; + } + + if (!initialized) { + physmem_fd = open("/dev/mem", O_RDWR); + if (physmem_fd < 0) { + devmem_failure = true; + return NULL; + } + physmem_base = FLAGS_malloc_devmem_start*1024LL*1024LL; + physmem_limit = FLAGS_malloc_devmem_limit*1024LL*1024LL; + initialized = true; + } + + // Enforce page alignment + if (pagesize == 0) pagesize = getpagesize(); + if (alignment < pagesize) alignment = pagesize; + size = ((size + alignment - 1) / alignment) * alignment; + + // could theoretically return the "extra" bytes here, but this + // is simple and correct. + if (actual_size) + *actual_size = size; + + // Ask for extra memory if alignment > pagesize + size_t extra = 0; + if (alignment > pagesize) { + extra = alignment - pagesize; + } + + // check to see if we have any memory left + if (physmem_limit != 0 && physmem_base + size + extra > physmem_limit) { + devmem_failure = true; + return NULL; + } + void *result = mmap(0, size + extra, PROT_READ | PROT_WRITE, + MAP_SHARED, physmem_fd, physmem_base); + if (result == reinterpret_cast<void*>(MAP_FAILED)) { + devmem_failure = true; + return NULL; + } + uintptr_t ptr = reinterpret_cast<uintptr_t>(result); + + // Adjust the return memory so it is aligned + size_t adjust = 0; + if ((ptr & (alignment - 1)) != 0) { + adjust = alignment - (ptr & (alignment - 1)); + } + + // Return the unused virtual memory to the system + if (adjust > 0) { + munmap(reinterpret_cast<void*>(ptr), adjust); + } + if (adjust < extra) { + munmap(reinterpret_cast<void*>(ptr + adjust + size), extra - adjust); + } + + ptr += adjust; + physmem_base += adjust + size; + + return reinterpret_cast<void*>(ptr); +} +#endif + +void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size, size_t alignment) { + // Discard requests that overflow + if (size + alignment < size) return NULL; + + SpinLockHolder lock_holder(&spinlock); + + // Enforce minimum alignment + if (alignment < sizeof(MemoryAligner)) alignment = sizeof(MemoryAligner); + + // Try twice, once avoiding allocators that failed before, and once + // more trying all allocators even if they failed before. + for (int i = 0; i < 2; i++) { + +#ifndef WTF_CHANGES + if (use_devmem && !devmem_failure) { + void* result = TryDevMem(size, actual_size, alignment); + if (result != NULL) return result; + } +#endif + +#if HAVE(SBRK) + if (use_sbrk && !sbrk_failure) { + void* result = TrySbrk(size, actual_size, alignment); + if (result != NULL) return result; + } +#endif + +#if HAVE(MMAP) + if (use_mmap && !mmap_failure) { + void* result = TryMmap(size, actual_size, alignment); + if (result != NULL) return result; + } +#endif + +#if HAVE(VIRTUALALLOC) + if (use_VirtualAlloc && !VirtualAlloc_failure) { + void* result = TryVirtualAlloc(size, actual_size, alignment); + if (result != NULL) return result; + } +#endif + + // nothing worked - reset failure flags and try again + devmem_failure = false; + sbrk_failure = false; + mmap_failure = false; + VirtualAlloc_failure = false; + } + return NULL; +} + +void TCMalloc_SystemRelease(void* start, size_t length) +{ + UNUSED_PARAM(start); + UNUSED_PARAM(length); +#if HAVE(MADV_DONTNEED) + if (FLAGS_malloc_devmem_start) { + // It's not safe to use MADV_DONTNEED if we've been mapping + // /dev/mem for heap memory + return; + } + if (pagesize == 0) pagesize = getpagesize(); + const size_t pagemask = pagesize - 1; + + size_t new_start = reinterpret_cast<size_t>(start); + size_t end = new_start + length; + size_t new_end = end; + + // Round up the starting address and round down the ending address + // to be page aligned: + new_start = (new_start + pagesize - 1) & ~pagemask; + new_end = new_end & ~pagemask; + + ASSERT((new_start & pagemask) == 0); + ASSERT((new_end & pagemask) == 0); + ASSERT(new_start >= reinterpret_cast<size_t>(start)); + ASSERT(new_end <= end); + + if (new_end > new_start) { + // Note -- ignoring most return codes, because if this fails it + // doesn't matter... + while (madvise(reinterpret_cast<char*>(new_start), new_end - new_start, + MADV_DONTNEED) == -1 && + errno == EAGAIN) { + // NOP + } + return; + } +#endif + +#if HAVE(MMAP) + void* newAddress = mmap(start, length, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); + // If the mmap failed then that's ok, we just won't return the memory to the system. + ASSERT_UNUSED(newAddress, newAddress == start || newAddress == reinterpret_cast<void*>(MAP_FAILED)); + return; +#endif +} + +#if HAVE(VIRTUALALLOC) +void TCMalloc_SystemCommit(void* start, size_t length) +{ + UNUSED_PARAM(start); + UNUSED_PARAM(length); +} +#endif diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/TCSystemAlloc.h b/src/3rdparty/webkit/JavaScriptCore/wtf/TCSystemAlloc.h new file mode 100644 index 0000000..0caf718 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/TCSystemAlloc.h @@ -0,0 +1,71 @@ +// Copyright (c) 2005, 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// --- +// Author: Sanjay Ghemawat +// +// Routine that uses sbrk/mmap to allocate memory from the system. +// Useful for implementing malloc. + +#ifndef TCMALLOC_SYSTEM_ALLOC_H__ +#define TCMALLOC_SYSTEM_ALLOC_H__ + +// REQUIRES: "alignment" is a power of two or "0" to indicate default alignment +// +// Allocate and return "N" bytes of zeroed memory. +// +// If actual_bytes is NULL then the returned memory is exactly the +// requested size. If actual bytes is non-NULL then the allocator +// may optionally return more bytes than asked for (i.e. return an +// entire "huge" page if a huge page allocator is in use). +// +// The returned pointer is a multiple of "alignment" if non-zero. +// +// Returns NULL when out of memory. +extern void* TCMalloc_SystemAlloc(size_t bytes, size_t *actual_bytes, + size_t alignment = 0); + +// This call is a hint to the operating system that the pages +// contained in the specified range of memory will not be used for a +// while, and can be released for use by other processes or the OS. +// Pages which are released in this way may be destroyed (zeroed) by +// the OS. The benefit of this function is that it frees memory for +// use by the system, the cost is that the pages are faulted back into +// the address space next time they are touched, which can impact +// performance. (Only pages fully covered by the memory region will +// be released, partial pages will not.) +extern void TCMalloc_SystemRelease(void* start, size_t length); + +#if HAVE(VIRTUALALLOC) +extern void TCMalloc_SystemCommit(void* start, size_t length); +#else +inline void TCMalloc_SystemCommit(void*, size_t) { } +#endif + +#endif /* TCMALLOC_SYSTEM_ALLOC_H__ */ diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/ThreadSpecific.h b/src/3rdparty/webkit/JavaScriptCore/wtf/ThreadSpecific.h new file mode 100644 index 0000000..7603802 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/ThreadSpecific.h @@ -0,0 +1,229 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * Copyright (C) 2009 Jian Li <jianli@chromium.org> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* Thread local storage is implemented by using either pthread API or Windows + * native API. There is subtle semantic discrepancy for the cleanup function + * implementation as noted below: + * @ In pthread implementation, the destructor function will be called + * repeatedly if there is still non-NULL value associated with the function. + * @ In Windows native implementation, the destructor function will be called + * only once. + * This semantic discrepancy does not impose any problem because nowhere in + * WebKit the repeated call bahavior is utilized. + */ + +#ifndef WTF_ThreadSpecific_h +#define WTF_ThreadSpecific_h + +#include <wtf/Noncopyable.h> + +#if USE(PTHREADS) +#include <pthread.h> +#elif PLATFORM(WIN_OS) +#include <windows.h> +#endif + +namespace WTF { + +#if !USE(PTHREADS) && PLATFORM(WIN_OS) +// ThreadSpecificThreadExit should be called each time when a thread is detached. +// This is done automatically for threads created with WTF::createThread. +void ThreadSpecificThreadExit(); +#endif + +template<typename T> class ThreadSpecific : Noncopyable { +public: + ThreadSpecific(); + T* operator->(); + operator T*(); + T& operator*(); + ~ThreadSpecific(); + +private: +#if !USE(PTHREADS) && PLATFORM(WIN_OS) + friend void ThreadSpecificThreadExit(); +#endif + + T* get(); + void set(T*); + void static destroy(void* ptr); + +#if USE(PTHREADS) || PLATFORM(WIN_OS) + struct Data : Noncopyable { + Data(T* value, ThreadSpecific<T>* owner) : value(value), owner(owner) {} + + T* value; + ThreadSpecific<T>* owner; +#if !USE(PTHREADS) + void (*destructor)(void*); +#endif + }; +#endif + +#if USE(PTHREADS) + pthread_key_t m_key; +#elif PLATFORM(WIN_OS) + int m_index; +#endif +}; + +#if USE(PTHREADS) +template<typename T> +inline ThreadSpecific<T>::ThreadSpecific() +{ + int error = pthread_key_create(&m_key, destroy); + if (error) + CRASH(); +} + +template<typename T> +inline ThreadSpecific<T>::~ThreadSpecific() +{ + pthread_key_delete(m_key); // Does not invoke destructor functions. +} + +template<typename T> +inline T* ThreadSpecific<T>::get() +{ + Data* data = static_cast<Data*>(pthread_getspecific(m_key)); + return data ? data->value : 0; +} + +template<typename T> +inline void ThreadSpecific<T>::set(T* ptr) +{ + ASSERT(!get()); + pthread_setspecific(m_key, new Data(ptr, this)); +} + +#elif PLATFORM(WIN_OS) + +// The maximum number of TLS keys that can be created. For simplification, we assume that: +// 1) Once the instance of ThreadSpecific<> is created, it will not be destructed until the program dies. +// 2) We do not need to hold many instances of ThreadSpecific<> data. This fixed number should be far enough. +const int kMaxTlsKeySize = 256; + +extern long g_tls_key_count; +extern DWORD g_tls_keys[kMaxTlsKeySize]; + +template<typename T> +inline ThreadSpecific<T>::ThreadSpecific() + : m_index(-1) +{ + DWORD tls_key = TlsAlloc(); + if (tls_key == TLS_OUT_OF_INDEXES) + CRASH(); + + m_index = InterlockedIncrement(&g_tls_key_count) - 1; + if (m_index >= kMaxTlsKeySize) + CRASH(); + g_tls_keys[m_index] = tls_key; +} + +template<typename T> +inline ThreadSpecific<T>::~ThreadSpecific() +{ + // Does not invoke destructor functions. They will be called from ThreadSpecificThreadExit when the thread is detached. + TlsFree(g_tls_keys[m_index]); +} + +template<typename T> +inline T* ThreadSpecific<T>::get() +{ + Data* data = static_cast<Data*>(TlsGetValue(g_tls_keys[m_index])); + return data ? data->value : 0; +} + +template<typename T> +inline void ThreadSpecific<T>::set(T* ptr) +{ + ASSERT(!get()); + Data* data = new Data(ptr, this); + data->destructor = &ThreadSpecific<T>::destroy; + TlsSetValue(g_tls_keys[m_index], data); +} + +#else +#error ThreadSpecific is not implemented for this platform. +#endif + +template<typename T> +inline void ThreadSpecific<T>::destroy(void* ptr) +{ + Data* data = static_cast<Data*>(ptr); + +#if USE(PTHREADS) + // We want get() to keep working while data destructor works, because it can be called indirectly by the destructor. + // Some pthreads implementations zero out the pointer before calling destroy(), so we temporarily reset it. + pthread_setspecific(data->owner->m_key, ptr); +#endif + + data->value->~T(); + fastFree(data->value); + +#if USE(PTHREADS) + pthread_setspecific(data->owner->m_key, 0); +#elif PLATFORM(WIN_OS) + TlsSetValue(g_tls_keys[data->owner->m_index], 0); +#else +#error ThreadSpecific is not implemented for this platform. +#endif + + delete data; +} + +template<typename T> +inline ThreadSpecific<T>::operator T*() +{ + T* ptr = static_cast<T*>(get()); + if (!ptr) { + // Set up thread-specific value's memory pointer before invoking constructor, in case any function it calls + // needs to access the value, to avoid recursion. + ptr = static_cast<T*>(fastMalloc(sizeof(T))); + set(ptr); + new (ptr) T; + } + return ptr; +} + +template<typename T> +inline T* ThreadSpecific<T>::operator->() +{ + return operator T*(); +} + +template<typename T> +inline T& ThreadSpecific<T>::operator*() +{ + return *operator T*(); +} + +} + +#endif diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/ThreadSpecificWin.cpp b/src/3rdparty/webkit/JavaScriptCore/wtf/ThreadSpecificWin.cpp new file mode 100644 index 0000000..1a3febb --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/ThreadSpecificWin.cpp @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2009 Jian Li <jianli@chromium.org> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#include "config.h" + +#include "ThreadSpecific.h" +#include <wtf/Noncopyable.h> + +#if USE(PTHREADS) +#error This file should not be compiled by ports that do not use Windows native ThreadSpecific implementation. +#endif + +namespace WTF { + +long g_tls_key_count = 0; +DWORD g_tls_keys[kMaxTlsKeySize]; + +void ThreadSpecificThreadExit() +{ + for (long i = 0; i < g_tls_key_count; i++) { + // The layout of ThreadSpecific<T>::Data does not depend on T. So we are safe to do the static cast to ThreadSpecific<int> in order to access its data member. + ThreadSpecific<int>::Data* data = static_cast<ThreadSpecific<int>::Data*>(TlsGetValue(g_tls_keys[i])); + if (data) + data->destructor(data); + } +} + +} // namespace WTF diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/Threading.cpp b/src/3rdparty/webkit/JavaScriptCore/wtf/Threading.cpp new file mode 100644 index 0000000..0179dcc --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/Threading.cpp @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "Threading.h" + +namespace WTF { + +struct NewThreadContext { + NewThreadContext(ThreadFunction entryPoint, void* data) + : entryPoint(entryPoint) + , data(data) + { } + + ThreadFunction entryPoint; + void* data; + + Mutex creationMutex; +}; + +static void* threadEntryPoint(void* contextData) +{ + NewThreadContext* context = reinterpret_cast<NewThreadContext*>(contextData); + + // Block until our creating thread has completed any extra setup work + { + MutexLocker locker(context->creationMutex); + } + + // Grab the info that we need out of the context, then deallocate it. + ThreadFunction entryPoint = context->entryPoint; + void* data = context->data; + delete context; + + return entryPoint(data); +} + +ThreadIdentifier createThread(ThreadFunction entryPoint, void* data, const char* name) +{ + NewThreadContext* context = new NewThreadContext(entryPoint, data); + + // Prevent the thread body from executing until we've established the thread identifier + MutexLocker locker(context->creationMutex); + + return createThreadInternal(threadEntryPoint, context, name); +} + +#if PLATFORM(MAC) || PLATFORM(WIN) + +// This function is deprecated but needs to be kept around for backward +// compatibility. Use the 3-argument version of createThread above. + +ThreadIdentifier createThread(ThreadFunction entryPoint, void* data) +{ + return createThread(entryPoint, data, 0); +} +#endif + +} // namespace WTF diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/Threading.h b/src/3rdparty/webkit/JavaScriptCore/wtf/Threading.h new file mode 100644 index 0000000..156013d --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/Threading.h @@ -0,0 +1,276 @@ +/* + * Copyright (C) 2007, 2008 Apple Inc. All rights reserved. + * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Note: The implementations of InterlockedIncrement and InterlockedDecrement are based + * on atomic_increment and atomic_exchange_and_add from the Boost C++ Library. The license + * is virtually identical to the Apple license above but is included here for completeness. + * + * Boost Software License - Version 1.0 - August 17th, 2003 + * + * Permission is hereby granted, free of charge, to any person or organization + * obtaining a copy of the software and accompanying documentation covered by + * this license (the "Software") to use, reproduce, display, distribute, + * execute, and transmit the Software, and to prepare derivative works of the + * Software, and to permit third-parties to whom the Software is furnished to + * do so, all subject to the following: + * + * The copyright notices in the Software and this entire statement, including + * the above license grant, this restriction and the following disclaimer, + * must be included in all copies of the Software, in whole or in part, and + * all derivative works of the Software, unless such copies or derivative + * works are solely in the form of machine-executable object code generated by + * a source language processor. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT + * SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE + * FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef Threading_h +#define Threading_h + +#if PLATFORM(WIN_CE) +#include <windows.h> +#endif + +#include <wtf/Assertions.h> +#include <wtf/Locker.h> +#include <wtf/Noncopyable.h> + +#if PLATFORM(WIN_OS) && !PLATFORM(WIN_CE) +#include <windows.h> +#elif PLATFORM(DARWIN) +#include <libkern/OSAtomic.h> +#elif COMPILER(GCC) +#if (__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 2)) +#include <ext/atomicity.h> +#else +#include <bits/atomicity.h> +#endif +#endif + +#if USE(PTHREADS) +#include <pthread.h> +#elif PLATFORM(GTK) +#include <wtf/GOwnPtr.h> +typedef struct _GMutex GMutex; +typedef struct _GCond GCond; +#endif + +#if PLATFORM(QT) +#include <qglobal.h> +QT_BEGIN_NAMESPACE +class QMutex; +class QWaitCondition; +QT_END_NAMESPACE +#endif + +#include <stdint.h> + +// For portability, we do not use thread-safe statics natively supported by some compilers (e.g. gcc). +#define AtomicallyInitializedStatic(T, name) \ + WTF::lockAtomicallyInitializedStaticMutex(); \ + static T name; \ + WTF::unlockAtomicallyInitializedStaticMutex(); + +namespace WTF { + +typedef uint32_t ThreadIdentifier; +typedef void* (*ThreadFunction)(void* argument); + +// Returns 0 if thread creation failed +ThreadIdentifier createThread(ThreadFunction, void*, const char* threadName); +ThreadIdentifier createThreadInternal(ThreadFunction, void*, const char* threadName); + +ThreadIdentifier currentThread(); +bool isMainThread(); +int waitForThreadCompletion(ThreadIdentifier, void**); +void detachThread(ThreadIdentifier); + +#if USE(PTHREADS) +typedef pthread_mutex_t PlatformMutex; +typedef pthread_cond_t PlatformCondition; +#elif PLATFORM(GTK) +typedef GOwnPtr<GMutex> PlatformMutex; +typedef GOwnPtr<GCond> PlatformCondition; +#elif PLATFORM(QT) +typedef QT_PREPEND_NAMESPACE(QMutex)* PlatformMutex; +typedef QT_PREPEND_NAMESPACE(QWaitCondition)* PlatformCondition; +#elif PLATFORM(WIN_OS) +struct PlatformMutex { + CRITICAL_SECTION m_internalMutex; + size_t m_recursionCount; +}; +struct PlatformCondition { + size_t m_timedOut; + size_t m_blocked; + size_t m_waitingForRemoval; + HANDLE m_gate; + HANDLE m_queue; + HANDLE m_mutex; +}; +#else +typedef void* PlatformMutex; +typedef void* PlatformCondition; +#endif + +class Mutex : Noncopyable { +public: + Mutex(); + ~Mutex(); + + void lock(); + bool tryLock(); + void unlock(); + +public: + PlatformMutex& impl() { return m_mutex; } +private: + PlatformMutex m_mutex; +}; + +typedef Locker<Mutex> MutexLocker; + +class ThreadCondition : Noncopyable { +public: + ThreadCondition(); + ~ThreadCondition(); + + void wait(Mutex& mutex); + // Returns true if the condition was signaled before the timeout, false if the timeout was reached + bool timedWait(Mutex&, double interval); + void signal(); + void broadcast(); + +private: + PlatformCondition m_condition; +}; + +#if PLATFORM(WIN_OS) +#define WTF_USE_LOCKFREE_THREADSAFESHARED 1 + +#if COMPILER(MINGW) || COMPILER(MSVC7) || PLATFORM(WIN_CE) +inline void atomicIncrement(int* addend) { InterlockedIncrement(reinterpret_cast<long*>(addend)); } +inline int atomicDecrement(int* addend) { return InterlockedDecrement(reinterpret_cast<long*>(addend)); } +#else +inline void atomicIncrement(int volatile* addend) { InterlockedIncrement(reinterpret_cast<long volatile*>(addend)); } +inline int atomicDecrement(int volatile* addend) { return InterlockedDecrement(reinterpret_cast<long volatile*>(addend)); } +#endif + +#elif PLATFORM(DARWIN) +#define WTF_USE_LOCKFREE_THREADSAFESHARED 1 + +inline void atomicIncrement(int volatile* addend) { OSAtomicIncrement32Barrier(const_cast<int*>(addend)); } +inline int atomicDecrement(int volatile* addend) { return OSAtomicDecrement32Barrier(const_cast<int*>(addend)); } + +#elif COMPILER(GCC) +#define WTF_USE_LOCKFREE_THREADSAFESHARED 1 + +inline void atomicIncrement(int volatile* addend) { __gnu_cxx::__atomic_add(addend, 1); } +inline int atomicDecrement(int volatile* addend) { return __gnu_cxx::__exchange_and_add(addend, -1) - 1; } + +#endif + +template<class T> class ThreadSafeShared : Noncopyable { +public: + ThreadSafeShared(int initialRefCount = 1) + : m_refCount(initialRefCount) + { + } + + void ref() + { +#if USE(LOCKFREE_THREADSAFESHARED) + atomicIncrement(&m_refCount); +#else + MutexLocker locker(m_mutex); + ++m_refCount; +#endif + } + + void deref() + { +#if USE(LOCKFREE_THREADSAFESHARED) + if (atomicDecrement(&m_refCount) <= 0) +#else + { + MutexLocker locker(m_mutex); + --m_refCount; + } + if (m_refCount <= 0) +#endif + delete static_cast<T*>(this); + } + + bool hasOneRef() + { + return refCount() == 1; + } + + int refCount() const + { +#if !USE(LOCKFREE_THREADSAFESHARED) + MutexLocker locker(m_mutex); +#endif + return static_cast<int const volatile &>(m_refCount); + } + +private: + int m_refCount; +#if !USE(LOCKFREE_THREADSAFESHARED) + mutable Mutex m_mutex; +#endif +}; + +// This function must be called from the main thread. It is safe to call it repeatedly. +// Darwin is an exception to this rule: it is OK to call it from any thread, the only requirement is that the calls are not reentrant. +void initializeThreading(); + +void lockAtomicallyInitializedStaticMutex(); +void unlockAtomicallyInitializedStaticMutex(); + +} // namespace WTF + +using WTF::Mutex; +using WTF::MutexLocker; +using WTF::ThreadCondition; +using WTF::ThreadIdentifier; +using WTF::ThreadSafeShared; + +using WTF::createThread; +using WTF::currentThread; +using WTF::isMainThread; +using WTF::detachThread; +using WTF::waitForThreadCompletion; + +#endif // Threading_h diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/ThreadingGtk.cpp b/src/3rdparty/webkit/JavaScriptCore/wtf/ThreadingGtk.cpp new file mode 100644 index 0000000..777d55b --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/ThreadingGtk.cpp @@ -0,0 +1,244 @@ +/* + * Copyright (C) 2007, 2008 Apple Inc. All rights reserved. + * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "Threading.h" + +#if !USE(PTHREADS) + +#include "HashMap.h" +#include "MainThread.h" +#include "RandomNumberSeed.h" + +#include <glib.h> + +namespace WTF { + +static Mutex* atomicallyInitializedStaticMutex; + +static ThreadIdentifier mainThreadIdentifier; + +static Mutex& threadMapMutex() +{ + static Mutex mutex; + return mutex; +} + +void initializeThreading() +{ + if (!g_thread_supported()) + g_thread_init(NULL); + ASSERT(g_thread_supported()); + + if (!atomicallyInitializedStaticMutex) { + atomicallyInitializedStaticMutex = new Mutex; + threadMapMutex(); + initializeRandomNumberGenerator(); + mainThreadIdentifier = currentThread(); + initializeMainThread(); + } +} + +void lockAtomicallyInitializedStaticMutex() +{ + ASSERT(atomicallyInitializedStaticMutex); + atomicallyInitializedStaticMutex->lock(); +} + +void unlockAtomicallyInitializedStaticMutex() +{ + atomicallyInitializedStaticMutex->unlock(); +} + +static HashMap<ThreadIdentifier, GThread*>& threadMap() +{ + static HashMap<ThreadIdentifier, GThread*> map; + return map; +} + +static ThreadIdentifier identifierByGthreadHandle(GThread*& thread) +{ + MutexLocker locker(threadMapMutex()); + + HashMap<ThreadIdentifier, GThread*>::iterator i = threadMap().begin(); + for (; i != threadMap().end(); ++i) { + if (i->second == thread) + return i->first; + } + + return 0; +} + +static ThreadIdentifier establishIdentifierForThread(GThread*& thread) +{ + ASSERT(!identifierByGthreadHandle(thread)); + + MutexLocker locker(threadMapMutex()); + + static ThreadIdentifier identifierCount = 1; + + threadMap().add(identifierCount, thread); + + return identifierCount++; +} + +static GThread* threadForIdentifier(ThreadIdentifier id) +{ + MutexLocker locker(threadMapMutex()); + + return threadMap().get(id); +} + +static void clearThreadForIdentifier(ThreadIdentifier id) +{ + MutexLocker locker(threadMapMutex()); + + ASSERT(threadMap().contains(id)); + + threadMap().remove(id); +} + +ThreadIdentifier createThreadInternal(ThreadFunction entryPoint, void* data, const char*) +{ + GThread* thread; + if (!(thread = g_thread_create(entryPoint, data, TRUE, 0))) { + LOG_ERROR("Failed to create thread at entry point %p with data %p", entryPoint, data); + return 0; + } + + ThreadIdentifier threadID = establishIdentifierForThread(thread); + return threadID; +} + +int waitForThreadCompletion(ThreadIdentifier threadID, void** result) +{ + ASSERT(threadID); + + GThread* thread = threadForIdentifier(threadID); + + void* joinResult = g_thread_join(thread); + if (result) + *result = joinResult; + + clearThreadForIdentifier(threadID); + return 0; +} + +void detachThread(ThreadIdentifier) +{ +} + +ThreadIdentifier currentThread() +{ + GThread* currentThread = g_thread_self(); + if (ThreadIdentifier id = identifierByGthreadHandle(currentThread)) + return id; + return establishIdentifierForThread(currentThread); +} + +bool isMainThread() +{ + return currentThread() == mainThreadIdentifier; +} + +Mutex::Mutex() + : m_mutex(g_mutex_new()) +{ +} + +Mutex::~Mutex() +{ +} + +void Mutex::lock() +{ + g_mutex_lock(m_mutex.get()); +} + +bool Mutex::tryLock() +{ + return g_mutex_trylock(m_mutex.get()); +} + +void Mutex::unlock() +{ + g_mutex_unlock(m_mutex.get()); +} + +ThreadCondition::ThreadCondition() + : m_condition(g_cond_new()) +{ +} + +ThreadCondition::~ThreadCondition() +{ +} + +void ThreadCondition::wait(Mutex& mutex) +{ + g_cond_wait(m_condition.get(), mutex.impl().get()); +} + +bool ThreadCondition::timedWait(Mutex& mutex, double interval) +{ + if (interval < 0.0) { + wait(mutex); + return true; + } + + int intervalSeconds = static_cast<int>(interval); + int intervalMicroseconds = static_cast<int>((interval - intervalSeconds) * 1000000.0); + + GTimeVal targetTime; + g_get_current_time(&targetTime); + + targetTime.tv_sec += intervalSeconds; + targetTime.tv_usec += intervalMicroseconds; + if (targetTime.tv_usec > 1000000) { + targetTime.tv_usec -= 1000000; + targetTime.tv_sec++; + } + + return g_cond_timed_wait(m_condition.get(), mutex.impl().get(), &targetTime); +} + +void ThreadCondition::signal() +{ + g_cond_signal(m_condition.get()); +} + +void ThreadCondition::broadcast() +{ + g_cond_broadcast(m_condition.get()); +} + + +} + +#endif // !USE(PTHREADS) diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/ThreadingNone.cpp b/src/3rdparty/webkit/JavaScriptCore/wtf/ThreadingNone.cpp new file mode 100644 index 0000000..832cd0c --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/ThreadingNone.cpp @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2007 Apple Inc. All rights reserved. + * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "Threading.h" + +namespace WTF { + +void initializeThreading() { } +ThreadIdentifier createThreadInternal(ThreadFunction, void*, const char*) { return 0; } +int waitForThreadCompletion(ThreadIdentifier, void**) { return 0; } +void detachThread(ThreadIdentifier) { } +ThreadIdentifier currentThread() { return 0; } +bool isMainThread() { return true; } + +Mutex::Mutex() { } +Mutex::~Mutex() { } +void Mutex::lock() { } +bool Mutex::tryLock() { return false; } +void Mutex::unlock() { } + +ThreadCondition::ThreadCondition() { } +ThreadCondition::~ThreadCondition() { } +void ThreadCondition::wait(Mutex& mutex) { } +bool ThreadCondition::timedWait(Mutex& mutex, double interval) { return false; } +void ThreadCondition::signal() { } +void ThreadCondition::broadcast() { } + +void lockAtomicallyInitializedStaticMutex() { } +void unlockAtomicallyInitializedStaticMutex() { } + +} // namespace WebCore diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/ThreadingPthreads.cpp b/src/3rdparty/webkit/JavaScriptCore/wtf/ThreadingPthreads.cpp new file mode 100644 index 0000000..f111fcf --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/ThreadingPthreads.cpp @@ -0,0 +1,275 @@ +/* + * Copyright (C) 2007 Apple Inc. All rights reserved. + * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "config.h" +#include "Threading.h" + +#include "StdLibExtras.h" + +#if USE(PTHREADS) + +#include "HashMap.h" +#include "MainThread.h" +#include "RandomNumberSeed.h" + +#include <errno.h> +#include <sys/time.h> + +namespace WTF { + +typedef HashMap<ThreadIdentifier, pthread_t> ThreadMap; + +static Mutex* atomicallyInitializedStaticMutex; + +#if !PLATFORM(DARWIN) +static ThreadIdentifier mainThreadIdentifier; // The thread that was the first to call initializeThreading(), which must be the main thread. +#endif + +static Mutex& threadMapMutex() +{ + DEFINE_STATIC_LOCAL(Mutex, mutex, ()); + return mutex; +} + +void initializeThreading() +{ + if (!atomicallyInitializedStaticMutex) { + atomicallyInitializedStaticMutex = new Mutex; + threadMapMutex(); + initializeRandomNumberGenerator(); +#if !PLATFORM(DARWIN) + mainThreadIdentifier = currentThread(); +#endif + initializeMainThread(); + } +} + +void lockAtomicallyInitializedStaticMutex() +{ + ASSERT(atomicallyInitializedStaticMutex); + atomicallyInitializedStaticMutex->lock(); +} + +void unlockAtomicallyInitializedStaticMutex() +{ + atomicallyInitializedStaticMutex->unlock(); +} + +static ThreadMap& threadMap() +{ + DEFINE_STATIC_LOCAL(ThreadMap, map, ()); + return map; +} + +static ThreadIdentifier identifierByPthreadHandle(const pthread_t& pthreadHandle) +{ + MutexLocker locker(threadMapMutex()); + + ThreadMap::iterator i = threadMap().begin(); + for (; i != threadMap().end(); ++i) { + if (pthread_equal(i->second, pthreadHandle)) + return i->first; + } + + return 0; +} + +static ThreadIdentifier establishIdentifierForPthreadHandle(pthread_t& pthreadHandle) +{ + ASSERT(!identifierByPthreadHandle(pthreadHandle)); + + MutexLocker locker(threadMapMutex()); + + static ThreadIdentifier identifierCount = 1; + + threadMap().add(identifierCount, pthreadHandle); + + return identifierCount++; +} + +static pthread_t pthreadHandleForIdentifier(ThreadIdentifier id) +{ + MutexLocker locker(threadMapMutex()); + + return threadMap().get(id); +} + +static void clearPthreadHandleForIdentifier(ThreadIdentifier id) +{ + MutexLocker locker(threadMapMutex()); + + ASSERT(threadMap().contains(id)); + + threadMap().remove(id); +} + +ThreadIdentifier createThreadInternal(ThreadFunction entryPoint, void* data, const char*) +{ + pthread_t threadHandle; + if (pthread_create(&threadHandle, NULL, entryPoint, data)) { + LOG_ERROR("Failed to create pthread at entry point %p with data %p", entryPoint, data); + return 0; + } + + return establishIdentifierForPthreadHandle(threadHandle); +} + +int waitForThreadCompletion(ThreadIdentifier threadID, void** result) +{ + ASSERT(threadID); + + pthread_t pthreadHandle = pthreadHandleForIdentifier(threadID); + + int joinResult = pthread_join(pthreadHandle, result); + if (joinResult == EDEADLK) + LOG_ERROR("ThreadIdentifier %u was found to be deadlocked trying to quit", threadID); + + clearPthreadHandleForIdentifier(threadID); + return joinResult; +} + +void detachThread(ThreadIdentifier threadID) +{ + ASSERT(threadID); + + pthread_t pthreadHandle = pthreadHandleForIdentifier(threadID); + + pthread_detach(pthreadHandle); + + clearPthreadHandleForIdentifier(threadID); +} + +ThreadIdentifier currentThread() +{ + pthread_t currentThread = pthread_self(); + if (ThreadIdentifier id = identifierByPthreadHandle(currentThread)) + return id; + return establishIdentifierForPthreadHandle(currentThread); +} + +bool isMainThread() +{ +#if PLATFORM(DARWIN) + return pthread_main_np(); +#else + return currentThread() == mainThreadIdentifier; +#endif +} + +Mutex::Mutex() +{ + pthread_mutex_init(&m_mutex, NULL); +} + +Mutex::~Mutex() +{ + pthread_mutex_destroy(&m_mutex); +} + +void Mutex::lock() +{ + if (pthread_mutex_lock(&m_mutex) != 0) + ASSERT(false); +} + +bool Mutex::tryLock() +{ + int result = pthread_mutex_trylock(&m_mutex); + + if (result == 0) + return true; + else if (result == EBUSY) + return false; + + ASSERT(false); + return false; +} + +void Mutex::unlock() +{ + if (pthread_mutex_unlock(&m_mutex) != 0) + ASSERT(false); +} + +ThreadCondition::ThreadCondition() +{ + pthread_cond_init(&m_condition, NULL); +} + +ThreadCondition::~ThreadCondition() +{ + pthread_cond_destroy(&m_condition); +} + +void ThreadCondition::wait(Mutex& mutex) +{ + if (pthread_cond_wait(&m_condition, &mutex.impl()) != 0) + ASSERT(false); +} + +bool ThreadCondition::timedWait(Mutex& mutex, double secondsToWait) +{ + if (secondsToWait < 0.0) { + wait(mutex); + return true; + } + + int intervalSeconds = static_cast<int>(secondsToWait); + int intervalMicroseconds = static_cast<int>((secondsToWait - intervalSeconds) * 1000000.0); + + // Current time comes in sec/microsec + timeval currentTime; + gettimeofday(¤tTime, NULL); + + // Target time comes in sec/nanosec + timespec targetTime; + targetTime.tv_sec = currentTime.tv_sec + intervalSeconds; + targetTime.tv_nsec = (currentTime.tv_usec + intervalMicroseconds) * 1000; + if (targetTime.tv_nsec > 1000000000) { + targetTime.tv_nsec -= 1000000000; + targetTime.tv_sec++; + } + + return pthread_cond_timedwait(&m_condition, &mutex.impl(), &targetTime) == 0; +} + +void ThreadCondition::signal() +{ + if (pthread_cond_signal(&m_condition) != 0) + ASSERT(false); +} + +void ThreadCondition::broadcast() +{ + if (pthread_cond_broadcast(&m_condition) != 0) + ASSERT(false); +} + +} // namespace WTF + +#endif // USE(PTHREADS) diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/ThreadingQt.cpp b/src/3rdparty/webkit/JavaScriptCore/wtf/ThreadingQt.cpp new file mode 100644 index 0000000..1a40d52 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/ThreadingQt.cpp @@ -0,0 +1,257 @@ +/* + * Copyright (C) 2007 Apple Inc. All rights reserved. + * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "config.h" +#include "Threading.h" + +#include "HashMap.h" +#include "MainThread.h" +#include "RandomNumberSeed.h" + +#include <QCoreApplication> +#include <QMutex> +#include <QThread> +#include <QWaitCondition> + +namespace WTF { + +class ThreadPrivate : public QThread { +public: + ThreadPrivate(ThreadFunction entryPoint, void* data); + void run(); + void* getReturnValue() { return m_returnValue; } +private: + void* m_data; + ThreadFunction m_entryPoint; + void* m_returnValue; +}; + +ThreadPrivate::ThreadPrivate(ThreadFunction entryPoint, void* data) + : m_data(data) + , m_entryPoint(entryPoint) + , m_returnValue(0) +{ +} + +void ThreadPrivate::run() +{ + m_returnValue = m_entryPoint(m_data); +} + + +static Mutex* atomicallyInitializedStaticMutex; + +static ThreadIdentifier mainThreadIdentifier; + +static Mutex& threadMapMutex() +{ + static Mutex mutex; + return mutex; +} + +static HashMap<ThreadIdentifier, QThread*>& threadMap() +{ + static HashMap<ThreadIdentifier, QThread*> map; + return map; +} + +static ThreadIdentifier identifierByQthreadHandle(QThread*& thread) +{ + MutexLocker locker(threadMapMutex()); + + HashMap<ThreadIdentifier, QThread*>::iterator i = threadMap().begin(); + for (; i != threadMap().end(); ++i) { + if (i->second == thread) + return i->first; + } + + return 0; +} + +static ThreadIdentifier establishIdentifierForThread(QThread*& thread) +{ + ASSERT(!identifierByQthreadHandle(thread)); + + MutexLocker locker(threadMapMutex()); + + static ThreadIdentifier identifierCount = 1; + + threadMap().add(identifierCount, thread); + + return identifierCount++; +} + +static void clearThreadForIdentifier(ThreadIdentifier id) +{ + MutexLocker locker(threadMapMutex()); + + ASSERT(threadMap().contains(id)); + + threadMap().remove(id); +} + +static QThread* threadForIdentifier(ThreadIdentifier id) +{ + MutexLocker locker(threadMapMutex()); + + return threadMap().get(id); +} + +void initializeThreading() +{ + if (!atomicallyInitializedStaticMutex) { + atomicallyInitializedStaticMutex = new Mutex; + threadMapMutex(); + initializeRandomNumberGenerator(); + QThread* mainThread = QCoreApplication::instance()->thread(); + mainThreadIdentifier = identifierByQthreadHandle(mainThread); + if (!mainThreadIdentifier) + mainThreadIdentifier = establishIdentifierForThread(mainThread); + initializeMainThread(); + } +} + +void lockAtomicallyInitializedStaticMutex() +{ + ASSERT(atomicallyInitializedStaticMutex); + atomicallyInitializedStaticMutex->lock(); +} + +void unlockAtomicallyInitializedStaticMutex() +{ + atomicallyInitializedStaticMutex->unlock(); +} + +ThreadIdentifier createThreadInternal(ThreadFunction entryPoint, void* data, const char*) +{ + ThreadPrivate* thread = new ThreadPrivate(entryPoint, data); + if (!thread) { + LOG_ERROR("Failed to create thread at entry point %p with data %p", entryPoint, data); + return 0; + } + thread->start(); + + QThread* threadRef = static_cast<QThread*>(thread); + + return establishIdentifierForThread(threadRef); +} + +int waitForThreadCompletion(ThreadIdentifier threadID, void** result) +{ + ASSERT(threadID); + + QThread* thread = threadForIdentifier(threadID); + + bool res = thread->wait(); + + clearThreadForIdentifier(threadID); + if (result) + *result = static_cast<ThreadPrivate*>(thread)->getReturnValue(); + + return !res; +} + +void detachThread(ThreadIdentifier) +{ +} + +ThreadIdentifier currentThread() +{ + QThread* currentThread = QThread::currentThread(); + if (ThreadIdentifier id = identifierByQthreadHandle(currentThread)) + return id; + return establishIdentifierForThread(currentThread); +} + +bool isMainThread() +{ + return QThread::currentThread() == QCoreApplication::instance()->thread(); +} + +Mutex::Mutex() + : m_mutex(new QMutex()) +{ +} + +Mutex::~Mutex() +{ + delete m_mutex; +} + +void Mutex::lock() +{ + m_mutex->lock(); +} + +bool Mutex::tryLock() +{ + return m_mutex->tryLock(); +} + +void Mutex::unlock() +{ + m_mutex->unlock(); +} + +ThreadCondition::ThreadCondition() + : m_condition(new QWaitCondition()) +{ +} + +ThreadCondition::~ThreadCondition() +{ + delete m_condition; +} + +void ThreadCondition::wait(Mutex& mutex) +{ + m_condition->wait(mutex.impl()); +} + +bool ThreadCondition::timedWait(Mutex& mutex, double secondsToWait) +{ + if (secondsToWait < 0.0) { + wait(mutex); + return true; + } + + unsigned long millisecondsToWait = static_cast<unsigned long>(secondsToWait * 1000.0); + return m_condition->wait(mutex.impl(), millisecondsToWait); +} + +void ThreadCondition::signal() +{ + m_condition->wakeOne(); +} + +void ThreadCondition::broadcast() +{ + m_condition->wakeAll(); +} + +} // namespace WebCore diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/ThreadingWin.cpp b/src/3rdparty/webkit/JavaScriptCore/wtf/ThreadingWin.cpp new file mode 100644 index 0000000..3626a37 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/ThreadingWin.cpp @@ -0,0 +1,472 @@ +/* + * Copyright (C) 2007, 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * ============================================================================= + * Note: The implementation of condition variables under the Windows + * plaform was based on that of the excellent BOOST C++ library. It + * has been rewritten to fit in with the WebKit architecture and to + * use its coding conventions. + * ============================================================================= + * + * The Boost license is virtually identical to the Apple variation at the + * top of this file, but is included here for completeness: + * + * Boost Software License - Version 1.0 - August 17th, 2003 + * + * Permission is hereby granted, free of charge, to any person or organization + * obtaining a copy of the software and accompanying documentation covered by + * this license (the "Software") to use, reproduce, display, distribute, + * execute, and transmit the Software, and to prepare derivative works of the + * Software, and to permit third-parties to whom the Software is furnished to + * do so, all subject to the following: + * + * The copyright notices in the Software and this entire statement, including + * the above license grant, this restriction and the following disclaimer, + * must be included in all copies of the Software, in whole or in part, and + * all derivative works of the Software, unless such copies or derivative + * works are solely in the form of machine-executable object code generated by + * a source language processor. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT + * SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE + * FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "config.h" +#include "Threading.h" + +#include "MainThread.h" +#if !USE(PTHREADS) && PLATFORM(WIN_OS) +#include "ThreadSpecific.h" +#endif +#include <process.h> +#include <windows.h> +#include <wtf/HashMap.h> +#include <wtf/MathExtras.h> +#include <wtf/RandomNumberSeed.h> + +namespace WTF { + +// MS_VC_EXCEPTION, THREADNAME_INFO, and setThreadName all come from <http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx>. +static const DWORD MS_VC_EXCEPTION = 0x406D1388; + +#pragma pack(push, 8) +typedef struct tagTHREADNAME_INFO { + DWORD dwType; // must be 0x1000 + LPCSTR szName; // pointer to name (in user addr space) + DWORD dwThreadID; // thread ID (-1=caller thread) + DWORD dwFlags; // reserved for future use, must be zero +} THREADNAME_INFO; +#pragma pack(pop) + +static void setThreadName(DWORD dwThreadID, LPCSTR szThreadName) +{ + // Visual Studio has a 31-character limit on thread names. Longer names will + // be truncated silently, but we'd like callers to know about the limit. + ASSERT_ARG(szThreadName, strlen(szThreadName) <= 31); + + THREADNAME_INFO info; + info.dwType = 0x1000; + info.szName = szThreadName; + info.dwThreadID = dwThreadID; + info.dwFlags = 0; + + __try { + RaiseException(MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(ULONG_PTR), reinterpret_cast<ULONG_PTR*>(&info)); + } __except (EXCEPTION_CONTINUE_EXECUTION) { + } +} + +static Mutex* atomicallyInitializedStaticMutex; + +void lockAtomicallyInitializedStaticMutex() +{ + ASSERT(atomicallyInitializedStaticMutex); + atomicallyInitializedStaticMutex->lock(); +} + +void unlockAtomicallyInitializedStaticMutex() +{ + atomicallyInitializedStaticMutex->unlock(); +} + +static ThreadIdentifier mainThreadIdentifier; + +static Mutex& threadMapMutex() +{ + static Mutex mutex; + return mutex; +} + +void initializeThreading() +{ + if (!atomicallyInitializedStaticMutex) { + atomicallyInitializedStaticMutex = new Mutex; + threadMapMutex(); + initializeRandomNumberGenerator(); + initializeMainThread(); + mainThreadIdentifier = currentThread(); + setThreadName(mainThreadIdentifier, "Main Thread"); + } +} + +static HashMap<DWORD, HANDLE>& threadMap() +{ + static HashMap<DWORD, HANDLE> map; + return map; +} + +static void storeThreadHandleByIdentifier(DWORD threadID, HANDLE threadHandle) +{ + MutexLocker locker(threadMapMutex()); + ASSERT(!threadMap().contains(threadID)); + threadMap().add(threadID, threadHandle); +} + +static HANDLE threadHandleForIdentifier(ThreadIdentifier id) +{ + MutexLocker locker(threadMapMutex()); + return threadMap().get(id); +} + +static void clearThreadHandleForIdentifier(ThreadIdentifier id) +{ + MutexLocker locker(threadMapMutex()); + ASSERT(threadMap().contains(id)); + threadMap().remove(id); +} + +struct ThreadFunctionInvocation { + ThreadFunctionInvocation(ThreadFunction function, void* data) : function(function), data(data) {} + + ThreadFunction function; + void* data; +}; + +static unsigned __stdcall wtfThreadEntryPoint(void* param) +{ + ThreadFunctionInvocation invocation = *static_cast<ThreadFunctionInvocation*>(param); + delete static_cast<ThreadFunctionInvocation*>(param); + + void* result = invocation.function(invocation.data); + +#if !USE(PTHREADS) && PLATFORM(WIN_OS) + // Do the TLS cleanup. + ThreadSpecificThreadExit(); +#endif + + return reinterpret_cast<unsigned>(result); +} + +ThreadIdentifier createThreadInternal(ThreadFunction entryPoint, void* data, const char* threadName) +{ + unsigned threadIdentifier = 0; + ThreadIdentifier threadID = 0; + ThreadFunctionInvocation* invocation = new ThreadFunctionInvocation(entryPoint, data); + HANDLE threadHandle = reinterpret_cast<HANDLE>(_beginthreadex(0, 0, wtfThreadEntryPoint, invocation, 0, &threadIdentifier)); + if (!threadHandle) { + LOG_ERROR("Failed to create thread at entry point %p with data %p: %ld", entryPoint, data, errno); + return 0; + } + + if (threadName) + setThreadName(threadIdentifier, threadName); + + threadID = static_cast<ThreadIdentifier>(threadIdentifier); + storeThreadHandleByIdentifier(threadIdentifier, threadHandle); + + return threadID; +} + +int waitForThreadCompletion(ThreadIdentifier threadID, void** result) +{ + ASSERT(threadID); + + HANDLE threadHandle = threadHandleForIdentifier(threadID); + if (!threadHandle) + LOG_ERROR("ThreadIdentifier %u did not correspond to an active thread when trying to quit", threadID); + + DWORD joinResult = ::WaitForSingleObject(threadHandle, INFINITE); + if (joinResult == WAIT_FAILED) + LOG_ERROR("ThreadIdentifier %u was found to be deadlocked trying to quit", threadID); + + ::CloseHandle(threadHandle); + clearThreadHandleForIdentifier(threadID); + + return joinResult; +} + +void detachThread(ThreadIdentifier threadID) +{ + ASSERT(threadID); + + HANDLE threadHandle = threadHandleForIdentifier(threadID); + if (threadHandle) + ::CloseHandle(threadHandle); + clearThreadHandleForIdentifier(threadID); +} + +ThreadIdentifier currentThread() +{ + return static_cast<ThreadIdentifier>(::GetCurrentThreadId()); +} + +bool isMainThread() +{ + return currentThread() == mainThreadIdentifier; +} + +Mutex::Mutex() +{ + m_mutex.m_recursionCount = 0; + ::InitializeCriticalSection(&m_mutex.m_internalMutex); +} + +Mutex::~Mutex() +{ + ::DeleteCriticalSection(&m_mutex.m_internalMutex); +} + +void Mutex::lock() +{ + ::EnterCriticalSection(&m_mutex.m_internalMutex); + ++m_mutex.m_recursionCount; +} + +bool Mutex::tryLock() +{ + // This method is modeled after the behavior of pthread_mutex_trylock, + // which will return an error if the lock is already owned by the + // current thread. Since the primitive Win32 'TryEnterCriticalSection' + // treats this as a successful case, it changes the behavior of several + // tests in WebKit that check to see if the current thread already + // owned this mutex (see e.g., IconDatabase::getOrCreateIconRecord) + DWORD result = ::TryEnterCriticalSection(&m_mutex.m_internalMutex); + + if (result != 0) { // We got the lock + // If this thread already had the lock, we must unlock and + // return false so that we mimic the behavior of POSIX's + // pthread_mutex_trylock: + if (m_mutex.m_recursionCount > 0) { + ::LeaveCriticalSection(&m_mutex.m_internalMutex); + return false; + } + + ++m_mutex.m_recursionCount; + return true; + } + + return false; +} + +void Mutex::unlock() +{ + --m_mutex.m_recursionCount; + ::LeaveCriticalSection(&m_mutex.m_internalMutex); +} + +static const long MaxSemaphoreCount = static_cast<long>(~0UL >> 1); + +ThreadCondition::ThreadCondition() +{ + m_condition.m_timedOut = 0; + m_condition.m_blocked = 0; + m_condition.m_waitingForRemoval = 0; + m_condition.m_gate = ::CreateSemaphore(0, 1, 1, 0); + m_condition.m_queue = ::CreateSemaphore(0, 0, MaxSemaphoreCount, 0); + m_condition.m_mutex = ::CreateMutex(0, 0, 0); + + if (!m_condition.m_gate || !m_condition.m_queue || !m_condition.m_mutex) { + if (m_condition.m_gate) + ::CloseHandle(m_condition.m_gate); + if (m_condition.m_queue) + ::CloseHandle(m_condition.m_queue); + if (m_condition.m_mutex) + ::CloseHandle(m_condition.m_mutex); + } +} + +ThreadCondition::~ThreadCondition() +{ + ::CloseHandle(m_condition.m_gate); + ::CloseHandle(m_condition.m_queue); + ::CloseHandle(m_condition.m_mutex); +} + +void ThreadCondition::wait(Mutex& mutex) +{ + PlatformMutex& cs = mutex.impl(); + + // Enter the wait state. + DWORD res = ::WaitForSingleObject(m_condition.m_gate, INFINITE); + ASSERT(res == WAIT_OBJECT_0); + ++m_condition.m_blocked; + res = ::ReleaseSemaphore(m_condition.m_gate, 1, 0); + ASSERT(res); + + ::LeaveCriticalSection(&cs.m_internalMutex); + + res = ::WaitForSingleObject(m_condition.m_queue, INFINITE); + ASSERT(res == WAIT_OBJECT_0); + + res = ::WaitForSingleObject(m_condition.m_mutex, INFINITE); + ASSERT(res == WAIT_OBJECT_0); + size_t wasWaiting = m_condition.m_waitingForRemoval; + size_t wasTimedOut = m_condition.m_timedOut; + if (wasWaiting != 0) { + if (--m_condition.m_waitingForRemoval == 0) { + if (m_condition.m_blocked != 0) { + res = ::ReleaseSemaphore(m_condition.m_gate, 1, 0); // open m_gate + ASSERT(res); + wasWaiting = 0; + } + else if (m_condition.m_timedOut != 0) + m_condition.m_timedOut = 0; + } + } else if (++m_condition.m_timedOut == ((std::numeric_limits<unsigned>::max)() / 2)) { + // timeout occured, normalize the m_condition.m_timedOut count + // this may occur if many calls to wait with a timeout are made and + // no call to notify_* is made + res = ::WaitForSingleObject(m_condition.m_gate, INFINITE); + ASSERT(res == WAIT_OBJECT_0); + m_condition.m_blocked -= m_condition.m_timedOut; + res = ::ReleaseSemaphore(m_condition.m_gate, 1, 0); + ASSERT(res); + m_condition.m_timedOut = 0; + } + res = ::ReleaseMutex(m_condition.m_mutex); + ASSERT(res); + + if (wasWaiting == 1) { + for (/**/ ; wasTimedOut; --wasTimedOut) { + // better now than spurious later + res = ::WaitForSingleObject(m_condition.m_queue, INFINITE); + ASSERT(res == WAIT_OBJECT_0); + } + res = ::ReleaseSemaphore(m_condition.m_gate, 1, 0); + ASSERT(res); + } + + ::EnterCriticalSection (&cs.m_internalMutex); +} + +bool ThreadCondition::timedWait(Mutex& mutex, double interval) +{ + // Empty for now + ASSERT(false); + return false; +} + +void ThreadCondition::signal() +{ + unsigned signals = 0; + + DWORD res = ::WaitForSingleObject(m_condition.m_mutex, INFINITE); + ASSERT(res == WAIT_OBJECT_0); + + if (m_condition.m_waitingForRemoval != 0) { // the m_gate is already closed + if (m_condition.m_blocked == 0) { + res = ::ReleaseMutex(m_condition.m_mutex); + ASSERT(res); + return; + } + + ++m_condition.m_waitingForRemoval; + --m_condition.m_blocked; + + signals = 1; + } else { + res = ::WaitForSingleObject(m_condition.m_gate, INFINITE); + ASSERT(res == WAIT_OBJECT_0); + if (m_condition.m_blocked > m_condition.m_timedOut) { + if (m_condition.m_timedOut != 0) { + m_condition.m_blocked -= m_condition.m_timedOut; + m_condition.m_timedOut = 0; + } + signals = m_condition.m_waitingForRemoval = 1; + --m_condition.m_blocked; + } else { + res = ::ReleaseSemaphore(m_condition.m_gate, 1, 0); + ASSERT(res); + } + } + + res =::ReleaseMutex(m_condition.m_mutex); + ASSERT(res); + + if (signals) { + res = ::ReleaseSemaphore(m_condition.m_queue, signals, 0); + ASSERT(res); + } +} + +void ThreadCondition::broadcast() +{ + unsigned signals = 0; + + DWORD res = ::WaitForSingleObject(m_condition.m_mutex, INFINITE); + ASSERT(res == WAIT_OBJECT_0); + + if (m_condition.m_waitingForRemoval != 0) { // the m_gate is already closed + if (m_condition.m_blocked == 0) { + res = ::ReleaseMutex(m_condition.m_mutex); + ASSERT(res); + return; + } + + m_condition.m_waitingForRemoval += (signals = m_condition.m_blocked); + m_condition.m_blocked = 0; + } else { + res = ::WaitForSingleObject(m_condition.m_gate, INFINITE); + ASSERT(res == WAIT_OBJECT_0); + if (m_condition.m_blocked > m_condition.m_timedOut) { + if (m_condition.m_timedOut != 0) { + m_condition.m_blocked -= m_condition.m_timedOut; + m_condition.m_timedOut = 0; + } + signals = m_condition.m_waitingForRemoval = m_condition.m_blocked; + m_condition.m_blocked = 0; + } else { + res = ::ReleaseSemaphore(m_condition.m_gate, 1, 0); + ASSERT(res); + } + } + + res = ::ReleaseMutex(m_condition.m_mutex); + ASSERT(res); + + if (signals) { + res = ::ReleaseSemaphore(m_condition.m_queue, signals, 0); + ASSERT(res); + } +} + +} // namespace WTF diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/UnusedParam.h b/src/3rdparty/webkit/JavaScriptCore/wtf/UnusedParam.h new file mode 100644 index 0000000..996f5c8 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/UnusedParam.h @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2006 Apple Computer, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_UnusedParam_h +#define WTF_UnusedParam_h + +/* don't use this for C++, it should only be used in plain C files or + ObjC methods, where leaving off the parameter name is not allowed. */ + +#define UNUSED_PARAM(x) (void)x + +#endif /* WTF_UnusedParam_h */ diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/Vector.h b/src/3rdparty/webkit/JavaScriptCore/wtf/Vector.h new file mode 100644 index 0000000..880b45d --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/Vector.h @@ -0,0 +1,950 @@ +/* + * Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_Vector_h +#define WTF_Vector_h + +#include "Assertions.h" +#include "FastMalloc.h" +#include "Noncopyable.h" +#include "NotFound.h" +#include "VectorTraits.h" +#include <limits> +#include <stdlib.h> +#include <string.h> +#include <utility> + +namespace WTF { + + using std::min; + using std::max; + + // WTF_ALIGN_OF / WTF_ALIGNED + #if COMPILER(GCC) || COMPILER(MINGW) || COMPILER(RVCT) || COMPILER(WINSCW) + #define WTF_ALIGN_OF(type) __alignof__(type) + #define WTF_ALIGNED(variable_type, variable, n) variable_type variable __attribute__((__aligned__(n))) + #elif COMPILER(MSVC) + #define WTF_ALIGN_OF(type) __alignof(type) + #define WTF_ALIGNED(variable_type, variable, n) __declspec(align(n)) variable_type variable + #else + #error WTF_ALIGN macros need alignment control. + #endif + + #if COMPILER(GCC) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 303) + typedef char __attribute__((__may_alias__)) AlignedBufferChar; + #else + typedef char AlignedBufferChar; + #endif + + template <size_t size, size_t alignment> struct AlignedBuffer; + template <size_t size> struct AlignedBuffer<size, 1> { AlignedBufferChar buffer[size]; }; + template <size_t size> struct AlignedBuffer<size, 2> { WTF_ALIGNED(AlignedBufferChar, buffer[size], 2); }; + template <size_t size> struct AlignedBuffer<size, 4> { WTF_ALIGNED(AlignedBufferChar, buffer[size], 4); }; + template <size_t size> struct AlignedBuffer<size, 8> { WTF_ALIGNED(AlignedBufferChar, buffer[size], 8); }; + template <size_t size> struct AlignedBuffer<size, 16> { WTF_ALIGNED(AlignedBufferChar, buffer[size], 16); }; + template <size_t size> struct AlignedBuffer<size, 32> { WTF_ALIGNED(AlignedBufferChar, buffer[size], 32); }; + template <size_t size> struct AlignedBuffer<size, 64> { WTF_ALIGNED(AlignedBufferChar, buffer[size], 64); }; + + template <bool needsDestruction, typename T> + class VectorDestructor; + + template<typename T> + struct VectorDestructor<false, T> + { + static void destruct(T*, T*) {} + }; + + template<typename T> + struct VectorDestructor<true, T> + { + static void destruct(T* begin, T* end) + { + for (T* cur = begin; cur != end; ++cur) + cur->~T(); + } + }; + + template <bool needsInitialization, bool canInitializeWithMemset, typename T> + class VectorInitializer; + + template<bool ignore, typename T> + struct VectorInitializer<false, ignore, T> + { + static void initialize(T*, T*) {} + }; + + template<typename T> + struct VectorInitializer<true, false, T> + { + static void initialize(T* begin, T* end) + { + for (T* cur = begin; cur != end; ++cur) + new (cur) T; + } + }; + + template<typename T> + struct VectorInitializer<true, true, T> + { + static void initialize(T* begin, T* end) + { + memset(begin, 0, reinterpret_cast<char*>(end) - reinterpret_cast<char*>(begin)); + } + }; + + template <bool canMoveWithMemcpy, typename T> + class VectorMover; + + template<typename T> + struct VectorMover<false, T> + { + static void move(const T* src, const T* srcEnd, T* dst) + { + while (src != srcEnd) { + new (dst) T(*src); + src->~T(); + ++dst; + ++src; + } + } + static void moveOverlapping(const T* src, const T* srcEnd, T* dst) + { + if (src > dst) + move(src, srcEnd, dst); + else { + T* dstEnd = dst + (srcEnd - src); + while (src != srcEnd) { + --srcEnd; + --dstEnd; + new (dstEnd) T(*srcEnd); + srcEnd->~T(); + } + } + } + }; + + template<typename T> + struct VectorMover<true, T> + { + static void move(const T* src, const T* srcEnd, T* dst) + { + memcpy(dst, src, reinterpret_cast<const char*>(srcEnd) - reinterpret_cast<const char*>(src)); + } + static void moveOverlapping(const T* src, const T* srcEnd, T* dst) + { + memmove(dst, src, reinterpret_cast<const char*>(srcEnd) - reinterpret_cast<const char*>(src)); + } + }; + + template <bool canCopyWithMemcpy, typename T> + class VectorCopier; + + template<typename T> + struct VectorCopier<false, T> + { + static void uninitializedCopy(const T* src, const T* srcEnd, T* dst) + { + while (src != srcEnd) { + new (dst) T(*src); + ++dst; + ++src; + } + } + }; + + template<typename T> + struct VectorCopier<true, T> + { + static void uninitializedCopy(const T* src, const T* srcEnd, T* dst) + { + memcpy(dst, src, reinterpret_cast<const char*>(srcEnd) - reinterpret_cast<const char*>(src)); + } + }; + + template <bool canFillWithMemset, typename T> + class VectorFiller; + + template<typename T> + struct VectorFiller<false, T> + { + static void uninitializedFill(T* dst, T* dstEnd, const T& val) + { + while (dst != dstEnd) { + new (dst) T(val); + ++dst; + } + } + }; + + template<typename T> + struct VectorFiller<true, T> + { + static void uninitializedFill(T* dst, T* dstEnd, const T& val) + { + ASSERT(sizeof(T) == sizeof(char)); + memset(dst, val, dstEnd - dst); + } + }; + + template<bool canCompareWithMemcmp, typename T> + class VectorComparer; + + template<typename T> + struct VectorComparer<false, T> + { + static bool compare(const T* a, const T* b, size_t size) + { + for (size_t i = 0; i < size; ++i) + if (a[i] != b[i]) + return false; + return true; + } + }; + + template<typename T> + struct VectorComparer<true, T> + { + static bool compare(const T* a, const T* b, size_t size) + { + return memcmp(a, b, sizeof(T) * size) == 0; + } + }; + + template<typename T> + struct VectorTypeOperations + { + static void destruct(T* begin, T* end) + { + VectorDestructor<VectorTraits<T>::needsDestruction, T>::destruct(begin, end); + } + + static void initialize(T* begin, T* end) + { + VectorInitializer<VectorTraits<T>::needsInitialization, VectorTraits<T>::canInitializeWithMemset, T>::initialize(begin, end); + } + + static void move(const T* src, const T* srcEnd, T* dst) + { + VectorMover<VectorTraits<T>::canMoveWithMemcpy, T>::move(src, srcEnd, dst); + } + + static void moveOverlapping(const T* src, const T* srcEnd, T* dst) + { + VectorMover<VectorTraits<T>::canMoveWithMemcpy, T>::moveOverlapping(src, srcEnd, dst); + } + + static void uninitializedCopy(const T* src, const T* srcEnd, T* dst) + { + VectorCopier<VectorTraits<T>::canCopyWithMemcpy, T>::uninitializedCopy(src, srcEnd, dst); + } + + static void uninitializedFill(T* dst, T* dstEnd, const T& val) + { + VectorFiller<VectorTraits<T>::canFillWithMemset, T>::uninitializedFill(dst, dstEnd, val); + } + + static bool compare(const T* a, const T* b, size_t size) + { + return VectorComparer<VectorTraits<T>::canCompareWithMemcmp, T>::compare(a, b, size); + } + }; + + template<typename T> + class VectorBufferBase : Noncopyable { + public: + void allocateBuffer(size_t newCapacity) + { + m_capacity = newCapacity; + if (newCapacity > std::numeric_limits<size_t>::max() / sizeof(T)) + CRASH(); + m_buffer = static_cast<T*>(fastMalloc(newCapacity * sizeof(T))); + } + + void deallocateBuffer(T* bufferToDeallocate) + { + if (m_buffer == bufferToDeallocate) { + m_buffer = 0; + m_capacity = 0; + } + fastFree(bufferToDeallocate); + } + + T* buffer() { return m_buffer; } + const T* buffer() const { return m_buffer; } + T** bufferSlot() { return &m_buffer; } + size_t capacity() const { return m_capacity; } + + T* releaseBuffer() + { + T* buffer = m_buffer; + m_buffer = 0; + m_capacity = 0; + return buffer; + } + + protected: + VectorBufferBase() + : m_buffer(0) + , m_capacity(0) + { + } + + VectorBufferBase(T* buffer, size_t capacity) + : m_buffer(buffer) + , m_capacity(capacity) + { + } + + ~VectorBufferBase() + { + // FIXME: It would be nice to find a way to ASSERT that m_buffer hasn't leaked here. + } + + T* m_buffer; + size_t m_capacity; + }; + + template<typename T, size_t inlineCapacity> + class VectorBuffer; + + template<typename T> + class VectorBuffer<T, 0> : private VectorBufferBase<T> { + private: + typedef VectorBufferBase<T> Base; + public: + VectorBuffer() + { + } + + VectorBuffer(size_t capacity) + { + allocateBuffer(capacity); + } + + ~VectorBuffer() + { + deallocateBuffer(buffer()); + } + + void swap(VectorBuffer<T, 0>& other) + { + std::swap(m_buffer, other.m_buffer); + std::swap(m_capacity, other.m_capacity); + } + + void restoreInlineBufferIfNeeded() { } + + using Base::allocateBuffer; + using Base::deallocateBuffer; + + using Base::buffer; + using Base::bufferSlot; + using Base::capacity; + + using Base::releaseBuffer; + private: + using Base::m_buffer; + using Base::m_capacity; + }; + + template<typename T, size_t inlineCapacity> + class VectorBuffer : private VectorBufferBase<T> { + private: + typedef VectorBufferBase<T> Base; + public: + VectorBuffer() + : Base(inlineBuffer(), inlineCapacity) + { + } + + VectorBuffer(size_t capacity) + : Base(inlineBuffer(), inlineCapacity) + { + allocateBuffer(capacity); + } + + ~VectorBuffer() + { + deallocateBuffer(buffer()); + } + + void allocateBuffer(size_t newCapacity) + { + if (newCapacity > inlineCapacity) + Base::allocateBuffer(newCapacity); + } + + void deallocateBuffer(T* bufferToDeallocate) + { + if (bufferToDeallocate == inlineBuffer()) + return; + Base::deallocateBuffer(bufferToDeallocate); + } + + void restoreInlineBufferIfNeeded() + { + if (m_buffer) + return; + m_buffer = inlineBuffer(); + m_capacity = inlineCapacity; + } + + using Base::buffer; + using Base::bufferSlot; + using Base::capacity; + + T* releaseBuffer() + { + if (buffer() == inlineBuffer()) + return 0; + return Base::releaseBuffer(); + } + + private: + using Base::m_buffer; + using Base::m_capacity; + + static const size_t m_inlineBufferSize = inlineCapacity * sizeof(T); + T* inlineBuffer() { return reinterpret_cast<T*>(m_inlineBuffer.buffer); } + + AlignedBuffer<m_inlineBufferSize, WTF_ALIGN_OF(T)> m_inlineBuffer; + }; + + template<typename T, size_t inlineCapacity = 0> + class Vector { + private: + typedef VectorBuffer<T, inlineCapacity> Buffer; + typedef VectorTypeOperations<T> TypeOperations; + + public: + typedef T ValueType; + + typedef T* iterator; + typedef const T* const_iterator; + + Vector() + : m_size(0) + { + } + + explicit Vector(size_t size) + : m_size(size) + , m_buffer(size) + { + if (begin()) + TypeOperations::initialize(begin(), end()); + } + + ~Vector() + { + if (m_size) shrink(0); + } + + Vector(const Vector&); + template<size_t otherCapacity> + Vector(const Vector<T, otherCapacity>&); + + Vector& operator=(const Vector&); + template<size_t otherCapacity> + Vector& operator=(const Vector<T, otherCapacity>&); + + size_t size() const { return m_size; } + size_t capacity() const { return m_buffer.capacity(); } + bool isEmpty() const { return !size(); } + + T& at(size_t i) + { + ASSERT(i < size()); + return m_buffer.buffer()[i]; + } + const T& at(size_t i) const + { + ASSERT(i < size()); + return m_buffer.buffer()[i]; + } + + T& operator[](size_t i) { return at(i); } + const T& operator[](size_t i) const { return at(i); } + + T* data() { return m_buffer.buffer(); } + const T* data() const { return m_buffer.buffer(); } + T** dataSlot() { return m_buffer.bufferSlot(); } + + iterator begin() { return data(); } + iterator end() { return begin() + m_size; } + const_iterator begin() const { return data(); } + const_iterator end() const { return begin() + m_size; } + + T& first() { return at(0); } + const T& first() const { return at(0); } + T& last() { return at(size() - 1); } + const T& last() const { return at(size() - 1); } + + template<typename U> size_t find(const U&) const; + + void shrink(size_t size); + void grow(size_t size); + void resize(size_t size); + void reserveCapacity(size_t newCapacity); + void shrinkCapacity(size_t newCapacity); + void shrinkToFit() { shrinkCapacity(size()); } + + void clear() { shrinkCapacity(0); } + + template<typename U> void append(const U*, size_t); + template<typename U> void append(const U&); + template<typename U> void uncheckedAppend(const U& val); + template<size_t otherCapacity> void append(const Vector<T, otherCapacity>&); + + template<typename U> void insert(size_t position, const U*, size_t); + template<typename U> void insert(size_t position, const U&); + template<typename U, size_t c> void insert(size_t position, const Vector<U, c>&); + + template<typename U> void prepend(const U*, size_t); + template<typename U> void prepend(const U&); + template<typename U, size_t c> void prepend(const Vector<U, c>&); + + void remove(size_t position); + void remove(size_t position, size_t length); + + void removeLast() + { + ASSERT(!isEmpty()); + shrink(size() - 1); + } + + Vector(size_t size, const T& val) + : m_size(size) + , m_buffer(size) + { + if (begin()) + TypeOperations::uninitializedFill(begin(), end(), val); + } + + void fill(const T&, size_t); + void fill(const T& val) { fill(val, size()); } + + template<typename Iterator> void appendRange(Iterator start, Iterator end); + + T* releaseBuffer(); + + void swap(Vector<T, inlineCapacity>& other) + { + std::swap(m_size, other.m_size); + m_buffer.swap(other.m_buffer); + } + + private: + void expandCapacity(size_t newMinCapacity); + const T* expandCapacity(size_t newMinCapacity, const T*); + template<typename U> U* expandCapacity(size_t newMinCapacity, U*); + + size_t m_size; + Buffer m_buffer; + }; + + template<typename T, size_t inlineCapacity> + Vector<T, inlineCapacity>::Vector(const Vector& other) + : m_size(other.size()) + , m_buffer(other.capacity()) + { + if (begin()) + TypeOperations::uninitializedCopy(other.begin(), other.end(), begin()); + } + + template<typename T, size_t inlineCapacity> + template<size_t otherCapacity> + Vector<T, inlineCapacity>::Vector(const Vector<T, otherCapacity>& other) + : m_size(other.size()) + , m_buffer(other.capacity()) + { + if (begin()) + TypeOperations::uninitializedCopy(other.begin(), other.end(), begin()); + } + + template<typename T, size_t inlineCapacity> + Vector<T, inlineCapacity>& Vector<T, inlineCapacity>::operator=(const Vector<T, inlineCapacity>& other) + { + if (&other == this) + return *this; + + if (size() > other.size()) + shrink(other.size()); + else if (other.size() > capacity()) { + clear(); + reserveCapacity(other.size()); + if (!begin()) + return *this; + } + + std::copy(other.begin(), other.begin() + size(), begin()); + TypeOperations::uninitializedCopy(other.begin() + size(), other.end(), end()); + m_size = other.size(); + + return *this; + } + + template<typename T, size_t inlineCapacity> + template<size_t otherCapacity> + Vector<T, inlineCapacity>& Vector<T, inlineCapacity>::operator=(const Vector<T, otherCapacity>& other) + { + if (&other == this) + return *this; + + if (size() > other.size()) + shrink(other.size()); + else if (other.size() > capacity()) { + clear(); + reserveCapacity(other.size()); + if (!begin()) + return *this; + } + + std::copy(other.begin(), other.begin() + size(), begin()); + TypeOperations::uninitializedCopy(other.begin() + size(), other.end(), end()); + m_size = other.size(); + + return *this; + } + + template<typename T, size_t inlineCapacity> + template<typename U> + size_t Vector<T, inlineCapacity>::find(const U& value) const + { + for (size_t i = 0; i < size(); ++i) { + if (at(i) == value) + return i; + } + return notFound; + } + + template<typename T, size_t inlineCapacity> + void Vector<T, inlineCapacity>::fill(const T& val, size_t newSize) + { + if (size() > newSize) + shrink(newSize); + else if (newSize > capacity()) { + clear(); + reserveCapacity(newSize); + if (!begin()) + return; + } + + std::fill(begin(), end(), val); + TypeOperations::uninitializedFill(end(), begin() + newSize, val); + m_size = newSize; + } + + template<typename T, size_t inlineCapacity> + template<typename Iterator> + void Vector<T, inlineCapacity>::appendRange(Iterator start, Iterator end) + { + for (Iterator it = start; it != end; ++it) + append(*it); + } + + template<typename T, size_t inlineCapacity> + void Vector<T, inlineCapacity>::expandCapacity(size_t newMinCapacity) + { + reserveCapacity(max(newMinCapacity, max(static_cast<size_t>(16), capacity() + capacity() / 4 + 1))); + } + + template<typename T, size_t inlineCapacity> + const T* Vector<T, inlineCapacity>::expandCapacity(size_t newMinCapacity, const T* ptr) + { + if (ptr < begin() || ptr >= end()) { + expandCapacity(newMinCapacity); + return ptr; + } + size_t index = ptr - begin(); + expandCapacity(newMinCapacity); + return begin() + index; + } + + template<typename T, size_t inlineCapacity> template<typename U> + inline U* Vector<T, inlineCapacity>::expandCapacity(size_t newMinCapacity, U* ptr) + { + expandCapacity(newMinCapacity); + return ptr; + } + + template<typename T, size_t inlineCapacity> + void Vector<T, inlineCapacity>::resize(size_t size) + { + if (size <= m_size) + TypeOperations::destruct(begin() + size, end()); + else { + if (size > capacity()) + expandCapacity(size); + if (begin()) + TypeOperations::initialize(end(), begin() + size); + } + + m_size = size; + } + + template<typename T, size_t inlineCapacity> + void Vector<T, inlineCapacity>::shrink(size_t size) + { + ASSERT(size <= m_size); + TypeOperations::destruct(begin() + size, end()); + m_size = size; + } + + template<typename T, size_t inlineCapacity> + void Vector<T, inlineCapacity>::grow(size_t size) + { + ASSERT(size >= m_size); + if (size > capacity()) + expandCapacity(size); + if (begin()) + TypeOperations::initialize(end(), begin() + size); + m_size = size; + } + + template<typename T, size_t inlineCapacity> + void Vector<T, inlineCapacity>::reserveCapacity(size_t newCapacity) + { + if (newCapacity <= capacity()) + return; + T* oldBuffer = begin(); + T* oldEnd = end(); + m_buffer.allocateBuffer(newCapacity); + if (begin()) + TypeOperations::move(oldBuffer, oldEnd, begin()); + m_buffer.deallocateBuffer(oldBuffer); + } + + template<typename T, size_t inlineCapacity> + void Vector<T, inlineCapacity>::shrinkCapacity(size_t newCapacity) + { + if (newCapacity >= capacity()) + return; + + if (newCapacity < size()) + shrink(newCapacity); + + T* oldBuffer = begin(); + if (newCapacity > 0) { + T* oldEnd = end(); + m_buffer.allocateBuffer(newCapacity); + if (begin() != oldBuffer) + TypeOperations::move(oldBuffer, oldEnd, begin()); + } + + m_buffer.deallocateBuffer(oldBuffer); + m_buffer.restoreInlineBufferIfNeeded(); + } + + // Templatizing these is better than just letting the conversion happen implicitly, + // because for instance it allows a PassRefPtr to be appended to a RefPtr vector + // without refcount thrash. + + template<typename T, size_t inlineCapacity> template<typename U> + void Vector<T, inlineCapacity>::append(const U* data, size_t dataSize) + { + size_t newSize = m_size + dataSize; + if (newSize > capacity()) { + data = expandCapacity(newSize, data); + if (!begin()) + return; + } + T* dest = end(); + for (size_t i = 0; i < dataSize; ++i) + new (&dest[i]) T(data[i]); + m_size = newSize; + } + + template<typename T, size_t inlineCapacity> template<typename U> + inline void Vector<T, inlineCapacity>::append(const U& val) + { + const U* ptr = &val; + if (size() == capacity()) { + ptr = expandCapacity(size() + 1, ptr); + if (!begin()) + return; + } + +#if COMPILER(MSVC7) + // FIXME: MSVC7 generates compilation errors when trying to assign + // a pointer to a Vector of its base class (i.e. can't downcast). So far + // I've been unable to determine any logical reason for this, so I can + // only assume it is a bug with the compiler. Casting is a bad solution, + // however, because it subverts implicit conversions, so a better + // one is needed. + new (end()) T(static_cast<T>(*ptr)); +#else + new (end()) T(*ptr); +#endif + ++m_size; + } + + // This version of append saves a branch in the case where you know that the + // vector's capacity is large enough for the append to succeed. + + template<typename T, size_t inlineCapacity> template<typename U> + inline void Vector<T, inlineCapacity>::uncheckedAppend(const U& val) + { + ASSERT(size() < capacity()); + const U* ptr = &val; + new (end()) T(*ptr); + ++m_size; + } + + // This method should not be called append, a better name would be appendElements. + // It could also be eliminated entirely, and call sites could just use + // appendRange(val.begin(), val.end()). + template<typename T, size_t inlineCapacity> template<size_t otherCapacity> + inline void Vector<T, inlineCapacity>::append(const Vector<T, otherCapacity>& val) + { + append(val.begin(), val.size()); + } + + template<typename T, size_t inlineCapacity> template<typename U> + void Vector<T, inlineCapacity>::insert(size_t position, const U* data, size_t dataSize) + { + ASSERT(position <= size()); + size_t newSize = m_size + dataSize; + if (newSize > capacity()) { + data = expandCapacity(newSize, data); + if (!begin()) + return; + } + T* spot = begin() + position; + TypeOperations::moveOverlapping(spot, end(), spot + dataSize); + for (size_t i = 0; i < dataSize; ++i) + new (&spot[i]) T(data[i]); + m_size = newSize; + } + + template<typename T, size_t inlineCapacity> template<typename U> + inline void Vector<T, inlineCapacity>::insert(size_t position, const U& val) + { + ASSERT(position <= size()); + const U* data = &val; + if (size() == capacity()) { + data = expandCapacity(size() + 1, data); + if (!begin()) + return; + } + T* spot = begin() + position; + TypeOperations::moveOverlapping(spot, end(), spot + 1); + new (spot) T(*data); + ++m_size; + } + + template<typename T, size_t inlineCapacity> template<typename U, size_t c> + inline void Vector<T, inlineCapacity>::insert(size_t position, const Vector<U, c>& val) + { + insert(position, val.begin(), val.size()); + } + + template<typename T, size_t inlineCapacity> template<typename U> + void Vector<T, inlineCapacity>::prepend(const U* data, size_t dataSize) + { + insert(0, data, dataSize); + } + + template<typename T, size_t inlineCapacity> template<typename U> + inline void Vector<T, inlineCapacity>::prepend(const U& val) + { + insert(0, val); + } + + template<typename T, size_t inlineCapacity> template<typename U, size_t c> + inline void Vector<T, inlineCapacity>::prepend(const Vector<U, c>& val) + { + insert(0, val.begin(), val.size()); + } + + template<typename T, size_t inlineCapacity> + inline void Vector<T, inlineCapacity>::remove(size_t position) + { + ASSERT(position < size()); + T* spot = begin() + position; + spot->~T(); + TypeOperations::moveOverlapping(spot + 1, end(), spot); + --m_size; + } + + template<typename T, size_t inlineCapacity> + inline void Vector<T, inlineCapacity>::remove(size_t position, size_t length) + { + ASSERT(position < size()); + ASSERT(position + length < size()); + T* beginSpot = begin() + position; + T* endSpot = beginSpot + length; + TypeOperations::destruct(beginSpot, endSpot); + TypeOperations::moveOverlapping(endSpot, end(), beginSpot); + m_size -= length; + } + + template<typename T, size_t inlineCapacity> + inline T* Vector<T, inlineCapacity>::releaseBuffer() + { + T* buffer = m_buffer.releaseBuffer(); + if (inlineCapacity && !buffer && m_size) { + // If the vector had some data, but no buffer to release, + // that means it was using the inline buffer. In that case, + // we create a brand new buffer so the caller always gets one. + size_t bytes = m_size * sizeof(T); + buffer = static_cast<T*>(fastMalloc(bytes)); + memcpy(buffer, data(), bytes); + } + m_size = 0; + return buffer; + } + + template<typename T, size_t inlineCapacity> + void deleteAllValues(const Vector<T, inlineCapacity>& collection) + { + typedef typename Vector<T, inlineCapacity>::const_iterator iterator; + iterator end = collection.end(); + for (iterator it = collection.begin(); it != end; ++it) + delete *it; + } + + template<typename T, size_t inlineCapacity> + inline void swap(Vector<T, inlineCapacity>& a, Vector<T, inlineCapacity>& b) + { + a.swap(b); + } + + template<typename T, size_t inlineCapacity> + bool operator==(const Vector<T, inlineCapacity>& a, const Vector<T, inlineCapacity>& b) + { + if (a.size() != b.size()) + return false; + + return VectorTypeOperations<T>::compare(a.data(), b.data(), a.size()); + } + + template<typename T, size_t inlineCapacity> + inline bool operator!=(const Vector<T, inlineCapacity>& a, const Vector<T, inlineCapacity>& b) + { + return !(a == b); + } + + +} // namespace WTF + +using WTF::Vector; + +#endif // WTF_Vector_h diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/VectorTraits.h b/src/3rdparty/webkit/JavaScriptCore/wtf/VectorTraits.h new file mode 100644 index 0000000..6efe36c --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/VectorTraits.h @@ -0,0 +1,119 @@ +/* + * Copyright (C) 2006, 2007, 2008 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_VectorTraits_h +#define WTF_VectorTraits_h + +#include "RefPtr.h" +#include <utility> +#include <memory> + +using std::pair; + +namespace WTF { + + template <typename T> struct IsPod { static const bool value = false; }; + template <> struct IsPod<bool> { static const bool value = true; }; + template <> struct IsPod<char> { static const bool value = true; }; + template <> struct IsPod<signed char> { static const bool value = true; }; + template <> struct IsPod<unsigned char> { static const bool value = true; }; + template <> struct IsPod<short> { static const bool value = true; }; + template <> struct IsPod<unsigned short> { static const bool value = true; }; + template <> struct IsPod<int> { static const bool value = true; }; + template <> struct IsPod<unsigned int> { static const bool value = true; }; + template <> struct IsPod<long> { static const bool value = true; }; + template <> struct IsPod<unsigned long> { static const bool value = true; }; + template <> struct IsPod<long long> { static const bool value = true; }; + template <> struct IsPod<unsigned long long> { static const bool value = true; }; + template <> struct IsPod<float> { static const bool value = true; }; + template <> struct IsPod<double> { static const bool value = true; }; + template <> struct IsPod<long double> { static const bool value = true; }; + template <typename P> struct IsPod<P *> { static const bool value = true; }; + + template<bool isPod, typename T> + class VectorTraitsBase; + + template<typename T> + struct VectorTraitsBase<false, T> + { + static const bool needsDestruction = true; + static const bool needsInitialization = true; + static const bool canInitializeWithMemset = false; + static const bool canMoveWithMemcpy = false; + static const bool canCopyWithMemcpy = false; + static const bool canFillWithMemset = false; + static const bool canCompareWithMemcmp = false; + }; + + template<typename T> + struct VectorTraitsBase<true, T> + { + static const bool needsDestruction = false; + static const bool needsInitialization = false; + static const bool canInitializeWithMemset = false; + static const bool canMoveWithMemcpy = true; + static const bool canCopyWithMemcpy = true; + static const bool canFillWithMemset = sizeof(T) == sizeof(char); + static const bool canCompareWithMemcmp = true; + }; + + template<typename T> + struct VectorTraits : VectorTraitsBase<IsPod<T>::value, T> { }; + + struct SimpleClassVectorTraits + { + static const bool needsDestruction = true; + static const bool needsInitialization = true; + static const bool canInitializeWithMemset = true; + static const bool canMoveWithMemcpy = true; + static const bool canCopyWithMemcpy = false; + static const bool canFillWithMemset = false; + static const bool canCompareWithMemcmp = true; + }; + + // we know RefPtr is simple enough that initializing to 0 and moving with memcpy + // (and then not destructing the original) will totally work + template<typename P> + struct VectorTraits<RefPtr<P> > : SimpleClassVectorTraits { }; + + template<typename P> + struct VectorTraits<std::auto_ptr<P> > : SimpleClassVectorTraits { }; + + template<typename First, typename Second> + struct VectorTraits<pair<First, Second> > + { + typedef VectorTraits<First> FirstTraits; + typedef VectorTraits<Second> SecondTraits; + + static const bool needsDestruction = FirstTraits::needsDestruction || SecondTraits::needsDestruction; + static const bool needsInitialization = FirstTraits::needsInitialization || SecondTraits::needsInitialization; + static const bool canInitializeWithMemset = FirstTraits::canInitializeWithMemset && SecondTraits::canInitializeWithMemset; + static const bool canMoveWithMemcpy = FirstTraits::canMoveWithMemcpy && SecondTraits::canMoveWithMemcpy; + static const bool canCopyWithMemcpy = FirstTraits::canCopyWithMemcpy && SecondTraits::canCopyWithMemcpy; + static const bool canFillWithMemset = false; + static const bool canCompareWithMemcmp = FirstTraits::canCompareWithMemcmp && SecondTraits::canCompareWithMemcmp; + }; + +} // namespace WTF + +using WTF::VectorTraits; +using WTF::SimpleClassVectorTraits; + +#endif // WTF_VectorTraits_h diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/dtoa.cpp b/src/3rdparty/webkit/JavaScriptCore/wtf/dtoa.cpp new file mode 100644 index 0000000..adbb115 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/dtoa.cpp @@ -0,0 +1,2439 @@ +/**************************************************************** + * + * The author of this software is David M. Gay. + * + * Copyright (c) 1991, 2000, 2001 by Lucent Technologies. + * Copyright (C) 2002, 2005, 2006, 2007, 2008 Apple Inc. All rights reserved. + * + * Permission to use, copy, modify, and distribute this software for any + * purpose without fee is hereby granted, provided that this entire notice + * is included in all copies of any software which is or includes a copy + * or modification of this software and in all copies of the supporting + * documentation for such software. + * + * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED + * WARRANTY. IN PARTICULAR, NEITHER THE AUTHOR NOR LUCENT MAKES ANY + * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY + * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. + * + ***************************************************************/ + +/* Please send bug reports to + David M. Gay + Bell Laboratories, Room 2C-463 + 600 Mountain Avenue + Murray Hill, NJ 07974-0636 + U.S.A. + dmg@bell-labs.com + */ + +/* On a machine with IEEE extended-precision registers, it is + * necessary to specify double-precision (53-bit) rounding precision + * before invoking strtod or dtoa. If the machine uses (the equivalent + * of) Intel 80x87 arithmetic, the call + * _control87(PC_53, MCW_PC); + * does this with many compilers. Whether this or another call is + * appropriate depends on the compiler; for this to work, it may be + * necessary to #include "float.h" or another system-dependent header + * file. + */ + +/* strtod for IEEE-arithmetic machines. + * + * This strtod returns a nearest machine number to the input decimal + * string (or sets errno to ERANGE). With IEEE arithmetic, ties are + * broken by the IEEE round-even rule. Otherwise ties are broken by + * biased rounding (add half and chop). + * + * Inspired loosely by William D. Clinger's paper "How to Read Floating + * Point Numbers Accurately" [Proc. ACM SIGPLAN '90, pp. 92-101]. + * + * Modifications: + * + * 1. We only require IEEE. + * 2. We get by with floating-point arithmetic in a case that + * Clinger missed -- when we're computing d * 10^n + * for a small integer d and the integer n is not too + * much larger than 22 (the maximum integer k for which + * we can represent 10^k exactly), we may be able to + * compute (d*10^k) * 10^(e-k) with just one roundoff. + * 3. Rather than a bit-at-a-time adjustment of the binary + * result in the hard case, we use floating-point + * arithmetic to determine the adjustment to within + * one bit; only in really hard cases do we need to + * compute a second residual. + * 4. Because of 3., we don't need a large table of powers of 10 + * for ten-to-e (just some small tables, e.g. of 10^k + * for 0 <= k <= 22). + */ + +/* + * #define IEEE_8087 for IEEE-arithmetic machines where the least + * significant byte has the lowest address. + * #define IEEE_MC68k for IEEE-arithmetic machines where the most + * significant byte has the lowest address. + * #define No_leftright to omit left-right logic in fast floating-point + * computation of dtoa. + * #define Check_FLT_ROUNDS if FLT_ROUNDS can assume the values 2 or 3 + * and Honor_FLT_ROUNDS is not #defined. + * #define Inaccurate_Divide for IEEE-format with correctly rounded + * products but inaccurate quotients, e.g., for Intel i860. + * #define USE_LONG_LONG on machines that have a "long long" + * integer type (of >= 64 bits), and performance testing shows that + * it is faster than 32-bit fallback (which is often not the case + * on 32-bit machines). On such machines, you can #define Just_16 + * to store 16 bits per 32-bit int32_t when doing high-precision integer + * arithmetic. Whether this speeds things up or slows things down + * depends on the machine and the number being converted. + * #define Bad_float_h if your system lacks a float.h or if it does not + * define some or all of DBL_DIG, DBL_MAX_10_EXP, DBL_MAX_EXP, + * FLT_RADIX, FLT_ROUNDS, and DBL_MAX. + * #define INFNAN_CHECK on IEEE systems to cause strtod to check for + * Infinity and NaN (case insensitively). On some systems (e.g., + * some HP systems), it may be necessary to #define NAN_WORD0 + * appropriately -- to the most significant word of a quiet NaN. + * (On HP Series 700/800 machines, -DNAN_WORD0=0x7ff40000 works.) + * When INFNAN_CHECK is #defined and No_Hex_NaN is not #defined, + * strtod also accepts (case insensitively) strings of the form + * NaN(x), where x is a string of hexadecimal digits and spaces; + * if there is only one string of hexadecimal digits, it is taken + * for the 52 fraction bits of the resulting NaN; if there are two + * or more strings of hex digits, the first is for the high 20 bits, + * the second and subsequent for the low 32 bits, with intervening + * white space ignored; but if this results in none of the 52 + * fraction bits being on (an IEEE Infinity symbol), then NAN_WORD0 + * and NAN_WORD1 are used instead. + * #define NO_IEEE_Scale to disable new (Feb. 1997) logic in strtod that + * avoids underflows on inputs whose result does not underflow. + * If you #define NO_IEEE_Scale on a machine that uses IEEE-format + * floating-point numbers and flushes underflows to zero rather + * than implementing gradual underflow, then you must also #define + * Sudden_Underflow. + * #define YES_ALIAS to permit aliasing certain double values with + * arrays of ULongs. This leads to slightly better code with + * some compilers and was always used prior to 19990916, but it + * is not strictly legal and can cause trouble with aggressively + * optimizing compilers (e.g., gcc 2.95.1 under -O2). + * #define SET_INEXACT if IEEE arithmetic is being used and extra + * computation should be done to set the inexact flag when the + * result is inexact and avoid setting inexact when the result + * is exact. In this case, dtoa.c must be compiled in + * an environment, perhaps provided by #include "dtoa.c" in a + * suitable wrapper, that defines two functions, + * int get_inexact(void); + * void clear_inexact(void); + * such that get_inexact() returns a nonzero value if the + * inexact bit is already set, and clear_inexact() sets the + * inexact bit to 0. When SET_INEXACT is #defined, strtod + * also does extra computations to set the underflow and overflow + * flags when appropriate (i.e., when the result is tiny and + * inexact or when it is a numeric value rounded to +-infinity). + * #define NO_ERRNO if strtod should not assign errno = ERANGE when + * the result overflows to +-Infinity or underflows to 0. + */ + +#include "config.h" +#include "dtoa.h" + +#if HAVE(ERRNO_H) +#include <errno.h> +#else +#define NO_ERRNO +#endif +#include <float.h> +#include <math.h> +#include <stdint.h> +#include <stdlib.h> +#include <string.h> +#include <wtf/AlwaysInline.h> +#include <wtf/Assertions.h> +#include <wtf/FastMalloc.h> +#include <wtf/Threading.h> + +#if COMPILER(MSVC) +#pragma warning(disable: 4244) +#pragma warning(disable: 4245) +#pragma warning(disable: 4554) +#endif + +#if PLATFORM(BIG_ENDIAN) +#define IEEE_MC68k +#elif PLATFORM(MIDDLE_ENDIAN) +#define IEEE_ARM +#else +#define IEEE_8087 +#endif + +#define INFNAN_CHECK + +#if defined(IEEE_8087) + defined(IEEE_MC68k) + defined(IEEE_ARM) != 1 +Exactly one of IEEE_8087, IEEE_ARM or IEEE_MC68k should be defined. +#endif + +namespace WTF { + +#if ENABLE(JSC_MULTIPLE_THREADS) +Mutex* s_dtoaP5Mutex; +#endif + +typedef union { double d; uint32_t L[2]; } U; + +#ifdef YES_ALIAS +#define dval(x) x +#ifdef IEEE_8087 +#define word0(x) ((uint32_t*)&x)[1] +#define word1(x) ((uint32_t*)&x)[0] +#else +#define word0(x) ((uint32_t*)&x)[0] +#define word1(x) ((uint32_t*)&x)[1] +#endif +#else +#ifdef IEEE_8087 +#define word0(x) ((U*)&x)->L[1] +#define word1(x) ((U*)&x)->L[0] +#else +#define word0(x) ((U*)&x)->L[0] +#define word1(x) ((U*)&x)->L[1] +#endif +#define dval(x) ((U*)&x)->d +#endif + +/* The following definition of Storeinc is appropriate for MIPS processors. + * An alternative that might be better on some machines is + * #define Storeinc(a,b,c) (*a++ = b << 16 | c & 0xffff) + */ +#if defined(IEEE_8087) || defined(IEEE_ARM) +#define Storeinc(a,b,c) (((unsigned short*)a)[1] = (unsigned short)b, ((unsigned short*)a)[0] = (unsigned short)c, a++) +#else +#define Storeinc(a,b,c) (((unsigned short*)a)[0] = (unsigned short)b, ((unsigned short*)a)[1] = (unsigned short)c, a++) +#endif + +#define Exp_shift 20 +#define Exp_shift1 20 +#define Exp_msk1 0x100000 +#define Exp_msk11 0x100000 +#define Exp_mask 0x7ff00000 +#define P 53 +#define Bias 1023 +#define Emin (-1022) +#define Exp_1 0x3ff00000 +#define Exp_11 0x3ff00000 +#define Ebits 11 +#define Frac_mask 0xfffff +#define Frac_mask1 0xfffff +#define Ten_pmax 22 +#define Bletch 0x10 +#define Bndry_mask 0xfffff +#define Bndry_mask1 0xfffff +#define LSB 1 +#define Sign_bit 0x80000000 +#define Log2P 1 +#define Tiny0 0 +#define Tiny1 1 +#define Quick_max 14 +#define Int_max 14 + +#if !defined(NO_IEEE_Scale) +#undef Avoid_Underflow +#define Avoid_Underflow +#endif + +#if !defined(Flt_Rounds) +#if defined(FLT_ROUNDS) +#define Flt_Rounds FLT_ROUNDS +#else +#define Flt_Rounds 1 +#endif +#endif /*Flt_Rounds*/ + + +#define rounded_product(a,b) a *= b +#define rounded_quotient(a,b) a /= b + +#define Big0 (Frac_mask1 | Exp_msk1 * (DBL_MAX_EXP + Bias - 1)) +#define Big1 0xffffffff + +#ifndef Pack_32 +#define Pack_32 +#endif + +#if PLATFORM(PPC64) || PLATFORM(X86_64) +// 64-bit emulation provided by the compiler is likely to be slower than dtoa own code on 32-bit hardware. +#define USE_LONG_LONG +#endif + +#ifndef USE_LONG_LONG +#ifdef Just_16 +#undef Pack_32 +/* When Pack_32 is not defined, we store 16 bits per 32-bit int32_t. + * This makes some inner loops simpler and sometimes saves work + * during multiplications, but it often seems to make things slightly + * slower. Hence the default is now to store 32 bits per int32_t. + */ +#endif +#endif + +#define Kmax 15 + +struct Bigint { + struct Bigint* next; + int k, maxwds, sign, wds; + uint32_t x[1]; +}; + +static Bigint* Balloc(int k) +{ + int x = 1 << k; + Bigint* rv = (Bigint*)fastMalloc(sizeof(Bigint) + (x - 1)*sizeof(uint32_t)); + rv->k = k; + rv->maxwds = x; + rv->next = 0; + rv->sign = rv->wds = 0; + + return rv; +} + +static void Bfree(Bigint* v) +{ + fastFree(v); +} + +#define Bcopy(x, y) memcpy((char*)&x->sign, (char*)&y->sign, y->wds * sizeof(int32_t) + 2 * sizeof(int)) + +static Bigint* multadd(Bigint* b, int m, int a) /* multiply by m and add a */ +{ +#ifdef USE_LONG_LONG + unsigned long long carry; +#else + uint32_t carry; +#endif + + int wds = b->wds; + uint32_t* x = b->x; + int i = 0; + carry = a; + do { +#ifdef USE_LONG_LONG + unsigned long long y = *x * (unsigned long long)m + carry; + carry = y >> 32; + *x++ = (uint32_t)y & 0xffffffffUL; +#else +#ifdef Pack_32 + uint32_t xi = *x; + uint32_t y = (xi & 0xffff) * m + carry; + uint32_t z = (xi >> 16) * m + (y >> 16); + carry = z >> 16; + *x++ = (z << 16) + (y & 0xffff); +#else + uint32_t y = *x * m + carry; + carry = y >> 16; + *x++ = y & 0xffff; +#endif +#endif + } while (++i < wds); + + if (carry) { + if (wds >= b->maxwds) { + Bigint* b1 = Balloc(b->k + 1); + Bcopy(b1, b); + Bfree(b); + b = b1; + } + b->x[wds++] = (uint32_t)carry; + b->wds = wds; + } + return b; +} + +static Bigint* s2b(const char* s, int nd0, int nd, uint32_t y9) +{ + int k; + int32_t y; + int32_t x = (nd + 8) / 9; + + for (k = 0, y = 1; x > y; y <<= 1, k++) { } +#ifdef Pack_32 + Bigint* b = Balloc(k); + b->x[0] = y9; + b->wds = 1; +#else + Bigint* b = Balloc(k + 1); + b->x[0] = y9 & 0xffff; + b->wds = (b->x[1] = y9 >> 16) ? 2 : 1; +#endif + + int i = 9; + if (9 < nd0) { + s += 9; + do { + b = multadd(b, 10, *s++ - '0'); + } while (++i < nd0); + s++; + } else + s += 10; + for (; i < nd; i++) + b = multadd(b, 10, *s++ - '0'); + return b; +} + +static int hi0bits(uint32_t x) +{ + int k = 0; + + if (!(x & 0xffff0000)) { + k = 16; + x <<= 16; + } + if (!(x & 0xff000000)) { + k += 8; + x <<= 8; + } + if (!(x & 0xf0000000)) { + k += 4; + x <<= 4; + } + if (!(x & 0xc0000000)) { + k += 2; + x <<= 2; + } + if (!(x & 0x80000000)) { + k++; + if (!(x & 0x40000000)) + return 32; + } + return k; +} + +static int lo0bits (uint32_t* y) +{ + int k; + uint32_t x = *y; + + if (x & 7) { + if (x & 1) + return 0; + if (x & 2) { + *y = x >> 1; + return 1; + } + *y = x >> 2; + return 2; + } + k = 0; + if (!(x & 0xffff)) { + k = 16; + x >>= 16; + } + if (!(x & 0xff)) { + k += 8; + x >>= 8; + } + if (!(x & 0xf)) { + k += 4; + x >>= 4; + } + if (!(x & 0x3)) { + k += 2; + x >>= 2; + } + if (!(x & 1)) { + k++; + x >>= 1; + if (!x & 1) + return 32; + } + *y = x; + return k; +} + +static Bigint* i2b(int i) +{ + Bigint* b; + + b = Balloc(1); + b->x[0] = i; + b->wds = 1; + return b; +} + +static Bigint* mult(Bigint* a, Bigint* b) +{ + Bigint* c; + int k, wa, wb, wc; + uint32_t *x, *xa, *xae, *xb, *xbe, *xc, *xc0; + uint32_t y; +#ifdef USE_LONG_LONG + unsigned long long carry, z; +#else + uint32_t carry, z; +#endif + + if (a->wds < b->wds) { + c = a; + a = b; + b = c; + } + k = a->k; + wa = a->wds; + wb = b->wds; + wc = wa + wb; + if (wc > a->maxwds) + k++; + c = Balloc(k); + for (x = c->x, xa = x + wc; x < xa; x++) + *x = 0; + xa = a->x; + xae = xa + wa; + xb = b->x; + xbe = xb + wb; + xc0 = c->x; +#ifdef USE_LONG_LONG + for (; xb < xbe; xc0++) { + if ((y = *xb++)) { + x = xa; + xc = xc0; + carry = 0; + do { + z = *x++ * (unsigned long long)y + *xc + carry; + carry = z >> 32; + *xc++ = (uint32_t)z & 0xffffffffUL; + } while (x < xae); + *xc = (uint32_t)carry; + } + } +#else +#ifdef Pack_32 + for (; xb < xbe; xb++, xc0++) { + if ((y = *xb & 0xffff)) { + x = xa; + xc = xc0; + carry = 0; + do { + z = (*x & 0xffff) * y + (*xc & 0xffff) + carry; + carry = z >> 16; + uint32_t z2 = (*x++ >> 16) * y + (*xc >> 16) + carry; + carry = z2 >> 16; + Storeinc(xc, z2, z); + } while (x < xae); + *xc = carry; + } + if ((y = *xb >> 16)) { + x = xa; + xc = xc0; + carry = 0; + uint32_t z2 = *xc; + do { + z = (*x & 0xffff) * y + (*xc >> 16) + carry; + carry = z >> 16; + Storeinc(xc, z, z2); + z2 = (*x++ >> 16) * y + (*xc & 0xffff) + carry; + carry = z2 >> 16; + } while (x < xae); + *xc = z2; + } + } +#else + for(; xb < xbe; xc0++) { + if ((y = *xb++)) { + x = xa; + xc = xc0; + carry = 0; + do { + z = *x++ * y + *xc + carry; + carry = z >> 16; + *xc++ = z & 0xffff; + } while (x < xae); + *xc = carry; + } + } +#endif +#endif + for (xc0 = c->x, xc = xc0 + wc; wc > 0 && !*--xc; --wc) { } + c->wds = wc; + return c; +} + +static Bigint* p5s; +static int p5s_count; + +static Bigint* pow5mult(Bigint* b, int k) +{ + static int p05[3] = { 5, 25, 125 }; + + if (int i = k & 3) + b = multadd(b, p05[i - 1], 0); + + if (!(k >>= 2)) + return b; + +#if ENABLE(JSC_MULTIPLE_THREADS) + s_dtoaP5Mutex->lock(); +#endif + Bigint* p5 = p5s; + if (!p5) { + /* first time */ + p5 = p5s = i2b(625); + p5s_count = 1; + } + int p5s_count_local = p5s_count; +#if ENABLE(JSC_MULTIPLE_THREADS) + s_dtoaP5Mutex->unlock(); +#endif + int p5s_used = 0; + + for (;;) { + if (k & 1) { + Bigint* b1 = mult(b, p5); + Bfree(b); + b = b1; + } + if (!(k >>= 1)) + break; + + if (++p5s_used == p5s_count_local) { +#if ENABLE(JSC_MULTIPLE_THREADS) + s_dtoaP5Mutex->lock(); +#endif + if (p5s_used == p5s_count) { + ASSERT(!p5->next); + p5->next = mult(p5, p5); + ++p5s_count; + } + + p5s_count_local = p5s_count; +#if ENABLE(JSC_MULTIPLE_THREADS) + s_dtoaP5Mutex->unlock(); +#endif + } + p5 = p5->next; + } + + return b; +} + +static Bigint* lshift(Bigint* b, int k) +{ + Bigint* result = b; + +#ifdef Pack_32 + int n = k >> 5; +#else + int n = k >> 4; +#endif + + int k1 = b->k; + int n1 = n + b->wds + 1; + for (int i = b->maxwds; n1 > i; i <<= 1) + k1++; + if (b->k < k1) + result = Balloc(k1); + + const uint32_t* srcStart = b->x; + uint32_t* dstStart = result->x; + const uint32_t* src = srcStart + b->wds - 1; + uint32_t* dst = dstStart + n1 - 1; +#ifdef Pack_32 + if (k &= 0x1f) { + uint32_t hiSubword = 0; + int s = 32 - k; + for (; src >= srcStart; --src) { + *dst-- = hiSubword | *src >> s; + hiSubword = *src << k; + } + *dst = hiSubword; + ASSERT(dst == dstStart + n); + result->wds = b->wds + n + (result->x[n1 - 1] != 0); + } +#else + if (k &= 0xf) { + uint32_t hiSubword = 0; + int s = 16 - k; + for (; src >= srcStart; --src) { + *dst-- = hiSubword | *src >> s; + hiSubword = (*src << k) & 0xffff; + } + *dst = hiSubword; + ASSERT(dst == dstStart + n); + result->wds = b->wds + n + (result->x[n1 - 1] != 0); + } + #endif + else { + do { + *--dst = *src--; + } while (src >= srcStart); + result->wds = b->wds + n; + } + for (dst = dstStart + n; dst != dstStart; ) + *--dst = 0; + + if (result != b) + Bfree(b); + return result; +} + +static int cmp(Bigint* a, Bigint* b) +{ + uint32_t *xa, *xa0, *xb, *xb0; + int i, j; + + i = a->wds; + j = b->wds; + ASSERT(i <= 1 || a->x[i - 1]); + ASSERT(j <= 1 || b->x[j - 1]); + if (i -= j) + return i; + xa0 = a->x; + xa = xa0 + j; + xb0 = b->x; + xb = xb0 + j; + for (;;) { + if (*--xa != *--xb) + return *xa < *xb ? -1 : 1; + if (xa <= xa0) + break; + } + return 0; +} + +static Bigint* diff(Bigint* a, Bigint* b) +{ + Bigint* c; + int i, wa, wb; + uint32_t *xa, *xae, *xb, *xbe, *xc; + + i = cmp(a,b); + if (!i) { + c = Balloc(0); + c->wds = 1; + c->x[0] = 0; + return c; + } + if (i < 0) { + c = a; + a = b; + b = c; + i = 1; + } else + i = 0; + c = Balloc(a->k); + c->sign = i; + wa = a->wds; + xa = a->x; + xae = xa + wa; + wb = b->wds; + xb = b->x; + xbe = xb + wb; + xc = c->x; +#ifdef USE_LONG_LONG + unsigned long long borrow = 0; + do { + unsigned long long y = (unsigned long long)*xa++ - *xb++ - borrow; + borrow = y >> 32 & (uint32_t)1; + *xc++ = (uint32_t)y & 0xffffffffUL; + } while (xb < xbe); + while (xa < xae) { + unsigned long long y = *xa++ - borrow; + borrow = y >> 32 & (uint32_t)1; + *xc++ = (uint32_t)y & 0xffffffffUL; + } +#else + uint32_t borrow = 0; +#ifdef Pack_32 + do { + uint32_t y = (*xa & 0xffff) - (*xb & 0xffff) - borrow; + borrow = (y & 0x10000) >> 16; + uint32_t z = (*xa++ >> 16) - (*xb++ >> 16) - borrow; + borrow = (z & 0x10000) >> 16; + Storeinc(xc, z, y); + } while (xb < xbe); + while (xa < xae) { + uint32_t y = (*xa & 0xffff) - borrow; + borrow = (y & 0x10000) >> 16; + uint32_t z = (*xa++ >> 16) - borrow; + borrow = (z & 0x10000) >> 16; + Storeinc(xc, z, y); + } +#else + do { + uint32_t y = *xa++ - *xb++ - borrow; + borrow = (y & 0x10000) >> 16; + *xc++ = y & 0xffff; + } while (xb < xbe); + while (xa < xae) { + uint32_t y = *xa++ - borrow; + borrow = (y & 0x10000) >> 16; + *xc++ = y & 0xffff; + } +#endif +#endif + while (!*--xc) + wa--; + c->wds = wa; + return c; +} + +static double ulp(double x) +{ + register int32_t L; + double a; + + L = (word0(x) & Exp_mask) - (P - 1) * Exp_msk1; +#ifndef Avoid_Underflow +#ifndef Sudden_Underflow + if (L > 0) { +#endif +#endif + word0(a) = L; + word1(a) = 0; +#ifndef Avoid_Underflow +#ifndef Sudden_Underflow + } else { + L = -L >> Exp_shift; + if (L < Exp_shift) { + word0(a) = 0x80000 >> L; + word1(a) = 0; + } else { + word0(a) = 0; + L -= Exp_shift; + word1(a) = L >= 31 ? 1 : 1 << 31 - L; + } + } +#endif +#endif + return dval(a); +} + +static double b2d(Bigint* a, int* e) +{ + uint32_t* xa; + uint32_t* xa0; + uint32_t w; + uint32_t y; + uint32_t z; + int k; + double d; + +#define d0 word0(d) +#define d1 word1(d) + + xa0 = a->x; + xa = xa0 + a->wds; + y = *--xa; + ASSERT(y); + k = hi0bits(y); + *e = 32 - k; +#ifdef Pack_32 + if (k < Ebits) { + d0 = Exp_1 | y >> Ebits - k; + w = xa > xa0 ? *--xa : 0; + d1 = y << (32 - Ebits) + k | w >> Ebits - k; + goto ret_d; + } + z = xa > xa0 ? *--xa : 0; + if (k -= Ebits) { + d0 = Exp_1 | y << k | z >> 32 - k; + y = xa > xa0 ? *--xa : 0; + d1 = z << k | y >> 32 - k; + } else { + d0 = Exp_1 | y; + d1 = z; + } +#else + if (k < Ebits + 16) { + z = xa > xa0 ? *--xa : 0; + d0 = Exp_1 | y << k - Ebits | z >> Ebits + 16 - k; + w = xa > xa0 ? *--xa : 0; + y = xa > xa0 ? *--xa : 0; + d1 = z << k + 16 - Ebits | w << k - Ebits | y >> 16 + Ebits - k; + goto ret_d; + } + z = xa > xa0 ? *--xa : 0; + w = xa > xa0 ? *--xa : 0; + k -= Ebits + 16; + d0 = Exp_1 | y << k + 16 | z << k | w >> 16 - k; + y = xa > xa0 ? *--xa : 0; + d1 = w << k + 16 | y << k; +#endif +ret_d: +#undef d0 +#undef d1 + return dval(d); +} + +static Bigint* d2b(double d, int* e, int* bits) +{ + Bigint* b; + int de, k; + uint32_t *x, y, z; +#ifndef Sudden_Underflow + int i; +#endif +#define d0 word0(d) +#define d1 word1(d) + +#ifdef Pack_32 + b = Balloc(1); +#else + b = Balloc(2); +#endif + x = b->x; + + z = d0 & Frac_mask; + d0 &= 0x7fffffff; /* clear sign bit, which we ignore */ +#ifdef Sudden_Underflow + de = (int)(d0 >> Exp_shift); +#else + if ((de = (int)(d0 >> Exp_shift))) + z |= Exp_msk1; +#endif +#ifdef Pack_32 + if ((y = d1)) { + if ((k = lo0bits(&y))) { + x[0] = y | z << 32 - k; + z >>= k; + } else + x[0] = y; +#ifndef Sudden_Underflow + i = +#endif + b->wds = (x[1] = z) ? 2 : 1; + } else { + k = lo0bits(&z); + x[0] = z; +#ifndef Sudden_Underflow + i = +#endif + b->wds = 1; + k += 32; + } +#else + if ((y = d1)) { + if ((k = lo0bits(&y))) { + if (k >= 16) { + x[0] = y | z << 32 - k & 0xffff; + x[1] = z >> k - 16 & 0xffff; + x[2] = z >> k; + i = 2; + } else { + x[0] = y & 0xffff; + x[1] = y >> 16 | z << 16 - k & 0xffff; + x[2] = z >> k & 0xffff; + x[3] = z >> k + 16; + i = 3; + } + } else { + x[0] = y & 0xffff; + x[1] = y >> 16; + x[2] = z & 0xffff; + x[3] = z >> 16; + i = 3; + } + } else { + k = lo0bits(&z); + if (k >= 16) { + x[0] = z; + i = 0; + } else { + x[0] = z & 0xffff; + x[1] = z >> 16; + i = 1; + } + k += 32; + } while (!x[i]) + --i; + b->wds = i + 1; +#endif +#ifndef Sudden_Underflow + if (de) { +#endif + *e = de - Bias - (P - 1) + k; + *bits = P - k; +#ifndef Sudden_Underflow + } else { + *e = de - Bias - (P - 1) + 1 + k; +#ifdef Pack_32 + *bits = (32 * i) - hi0bits(x[i - 1]); +#else + *bits = (i + 2) * 16 - hi0bits(x[i]); +#endif + } +#endif + return b; +} +#undef d0 +#undef d1 + +static double ratio(Bigint* a, Bigint* b) +{ + double da, db; + int k, ka, kb; + + dval(da) = b2d(a, &ka); + dval(db) = b2d(b, &kb); +#ifdef Pack_32 + k = ka - kb + 32 * (a->wds - b->wds); +#else + k = ka - kb + 16 * (a->wds - b->wds); +#endif + if (k > 0) + word0(da) += k * Exp_msk1; + else { + k = -k; + word0(db) += k * Exp_msk1; + } + return dval(da) / dval(db); +} + +static const double tens[] = { + 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, + 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, + 1e20, 1e21, 1e22 +}; + +static const double bigtens[] = { 1e16, 1e32, 1e64, 1e128, 1e256 }; +static const double tinytens[] = { 1e-16, 1e-32, 1e-64, 1e-128, +#ifdef Avoid_Underflow + 9007199254740992. * 9007199254740992.e-256 + /* = 2^106 * 1e-53 */ +#else + 1e-256 +#endif +}; + +/* The factor of 2^53 in tinytens[4] helps us avoid setting the underflow */ +/* flag unnecessarily. It leads to a song and dance at the end of strtod. */ +#define Scale_Bit 0x10 +#define n_bigtens 5 + +#if defined(INFNAN_CHECK) + +#ifndef NAN_WORD0 +#define NAN_WORD0 0x7ff80000 +#endif + +#ifndef NAN_WORD1 +#define NAN_WORD1 0 +#endif + +static int match(const char** sp, const char* t) +{ + int c, d; + const char* s = *sp; + + while ((d = *t++)) { + if ((c = *++s) >= 'A' && c <= 'Z') + c += 'a' - 'A'; + if (c != d) + return 0; + } + *sp = s + 1; + return 1; +} + +#ifndef No_Hex_NaN +static void hexnan(double* rvp, const char** sp) +{ + uint32_t c, x[2]; + const char* s; + int havedig, udx0, xshift; + + x[0] = x[1] = 0; + havedig = xshift = 0; + udx0 = 1; + s = *sp; + while ((c = *(const unsigned char*)++s)) { + if (c >= '0' && c <= '9') + c -= '0'; + else if (c >= 'a' && c <= 'f') + c += 10 - 'a'; + else if (c >= 'A' && c <= 'F') + c += 10 - 'A'; + else if (c <= ' ') { + if (udx0 && havedig) { + udx0 = 0; + xshift = 1; + } + continue; + } else if (/*(*/ c == ')' && havedig) { + *sp = s + 1; + break; + } else + return; /* invalid form: don't change *sp */ + havedig = 1; + if (xshift) { + xshift = 0; + x[0] = x[1]; + x[1] = 0; + } + if (udx0) + x[0] = (x[0] << 4) | (x[1] >> 28); + x[1] = (x[1] << 4) | c; + } + if ((x[0] &= 0xfffff) || x[1]) { + word0(*rvp) = Exp_mask | x[0]; + word1(*rvp) = x[1]; + } +} +#endif /*No_Hex_NaN*/ +#endif /* INFNAN_CHECK */ + +double strtod(const char* s00, char** se) +{ +#ifdef Avoid_Underflow + int scale; +#endif + int bb2, bb5, bbe, bd2, bd5, bbbits, bs2, c, dsign, + e, e1, esign, i, j, k, nd, nd0, nf, nz, nz0, sign; + const char *s, *s0, *s1; + double aadj, aadj1, adj, rv, rv0; + int32_t L; + uint32_t y, z; + Bigint *bb = NULL, *bb1 = NULL, *bd = NULL, *bd0 = NULL, *bs = NULL, *delta = NULL; +#ifdef SET_INEXACT + int inexact, oldinexact; +#endif + + sign = nz0 = nz = 0; + dval(rv) = 0.; + for (s = s00; ; s++) + switch (*s) { + case '-': + sign = 1; + /* no break */ + case '+': + if (*++s) + goto break2; + /* no break */ + case 0: + goto ret0; + case '\t': + case '\n': + case '\v': + case '\f': + case '\r': + case ' ': + continue; + default: + goto break2; + } +break2: + if (*s == '0') { + nz0 = 1; + while (*++s == '0') { } + if (!*s) + goto ret; + } + s0 = s; + y = z = 0; + for (nd = nf = 0; (c = *s) >= '0' && c <= '9'; nd++, s++) + if (nd < 9) + y = (10 * y) + c - '0'; + else if (nd < 16) + z = (10 * z) + c - '0'; + nd0 = nd; + if (c == '.') { + c = *++s; + if (!nd) { + for (; c == '0'; c = *++s) + nz++; + if (c > '0' && c <= '9') { + s0 = s; + nf += nz; + nz = 0; + goto have_dig; + } + goto dig_done; + } + for (; c >= '0' && c <= '9'; c = *++s) { +have_dig: + nz++; + if (c -= '0') { + nf += nz; + for (i = 1; i < nz; i++) + if (nd++ < 9) + y *= 10; + else if (nd <= DBL_DIG + 1) + z *= 10; + if (nd++ < 9) + y = (10 * y) + c; + else if (nd <= DBL_DIG + 1) + z = (10 * z) + c; + nz = 0; + } + } + } +dig_done: + e = 0; + if (c == 'e' || c == 'E') { + if (!nd && !nz && !nz0) { + goto ret0; + } + s00 = s; + esign = 0; + switch (c = *++s) { + case '-': + esign = 1; + case '+': + c = *++s; + } + if (c >= '0' && c <= '9') { + while (c == '0') + c = *++s; + if (c > '0' && c <= '9') { + L = c - '0'; + s1 = s; + while ((c = *++s) >= '0' && c <= '9') + L = (10 * L) + c - '0'; + if (s - s1 > 8 || L > 19999) + /* Avoid confusion from exponents + * so large that e might overflow. + */ + e = 19999; /* safe for 16 bit ints */ + else + e = (int)L; + if (esign) + e = -e; + } else + e = 0; + } else + s = s00; + } + if (!nd) { + if (!nz && !nz0) { +#ifdef INFNAN_CHECK + /* Check for Nan and Infinity */ + switch(c) { + case 'i': + case 'I': + if (match(&s,"nf")) { + --s; + if (!match(&s,"inity")) + ++s; + word0(rv) = 0x7ff00000; + word1(rv) = 0; + goto ret; + } + break; + case 'n': + case 'N': + if (match(&s, "an")) { + word0(rv) = NAN_WORD0; + word1(rv) = NAN_WORD1; +#ifndef No_Hex_NaN + if (*s == '(') /*)*/ + hexnan(&rv, &s); +#endif + goto ret; + } + } +#endif /* INFNAN_CHECK */ +ret0: + s = s00; + sign = 0; + } + goto ret; + } + e1 = e -= nf; + + /* Now we have nd0 digits, starting at s0, followed by a + * decimal point, followed by nd-nd0 digits. The number we're + * after is the integer represented by those digits times + * 10**e */ + + if (!nd0) + nd0 = nd; + k = nd < DBL_DIG + 1 ? nd : DBL_DIG + 1; + dval(rv) = y; + if (k > 9) { +#ifdef SET_INEXACT + if (k > DBL_DIG) + oldinexact = get_inexact(); +#endif + dval(rv) = tens[k - 9] * dval(rv) + z; + } + bd0 = 0; + if (nd <= DBL_DIG && Flt_Rounds == 1) { + if (!e) + goto ret; + if (e > 0) { + if (e <= Ten_pmax) { + /* rv = */ rounded_product(dval(rv), tens[e]); + goto ret; + } + i = DBL_DIG - nd; + if (e <= Ten_pmax + i) { + /* A fancier test would sometimes let us do + * this for larger i values. + */ + e -= i; + dval(rv) *= tens[i]; + /* rv = */ rounded_product(dval(rv), tens[e]); + goto ret; + } + } +#ifndef Inaccurate_Divide + else if (e >= -Ten_pmax) { + /* rv = */ rounded_quotient(dval(rv), tens[-e]); + goto ret; + } +#endif + } + e1 += nd - k; + +#ifdef SET_INEXACT + inexact = 1; + if (k <= DBL_DIG) + oldinexact = get_inexact(); +#endif +#ifdef Avoid_Underflow + scale = 0; +#endif + + /* Get starting approximation = rv * 10**e1 */ + + if (e1 > 0) { + if ((i = e1 & 15)) + dval(rv) *= tens[i]; + if (e1 &= ~15) { + if (e1 > DBL_MAX_10_EXP) { +ovfl: +#ifndef NO_ERRNO + errno = ERANGE; +#endif + /* Can't trust HUGE_VAL */ + word0(rv) = Exp_mask; + word1(rv) = 0; +#ifdef SET_INEXACT + /* set overflow bit */ + dval(rv0) = 1e300; + dval(rv0) *= dval(rv0); +#endif + if (bd0) + goto retfree; + goto ret; + } + e1 >>= 4; + for (j = 0; e1 > 1; j++, e1 >>= 1) + if (e1 & 1) + dval(rv) *= bigtens[j]; + /* The last multiplication could overflow. */ + word0(rv) -= P * Exp_msk1; + dval(rv) *= bigtens[j]; + if ((z = word0(rv) & Exp_mask) > Exp_msk1 * (DBL_MAX_EXP + Bias - P)) + goto ovfl; + if (z > Exp_msk1 * (DBL_MAX_EXP + Bias - 1 - P)) { + /* set to largest number */ + /* (Can't trust DBL_MAX) */ + word0(rv) = Big0; + word1(rv) = Big1; + } else + word0(rv) += P * Exp_msk1; + } + } else if (e1 < 0) { + e1 = -e1; + if ((i = e1 & 15)) + dval(rv) /= tens[i]; + if (e1 >>= 4) { + if (e1 >= 1 << n_bigtens) + goto undfl; +#ifdef Avoid_Underflow + if (e1 & Scale_Bit) + scale = 2 * P; + for (j = 0; e1 > 0; j++, e1 >>= 1) + if (e1 & 1) + dval(rv) *= tinytens[j]; + if (scale && (j = (2 * P) + 1 - ((word0(rv) & Exp_mask) >> Exp_shift)) > 0) { + /* scaled rv is denormal; zap j low bits */ + if (j >= 32) { + word1(rv) = 0; + if (j >= 53) + word0(rv) = (P + 2) * Exp_msk1; + else + word0(rv) &= 0xffffffff << j - 32; + } else + word1(rv) &= 0xffffffff << j; + } +#else + for (j = 0; e1 > 1; j++, e1 >>= 1) + if (e1 & 1) + dval(rv) *= tinytens[j]; + /* The last multiplication could underflow. */ + dval(rv0) = dval(rv); + dval(rv) *= tinytens[j]; + if (!dval(rv)) { + dval(rv) = 2. * dval(rv0); + dval(rv) *= tinytens[j]; +#endif + if (!dval(rv)) { +undfl: + dval(rv) = 0.; +#ifndef NO_ERRNO + errno = ERANGE; +#endif + if (bd0) + goto retfree; + goto ret; + } +#ifndef Avoid_Underflow + word0(rv) = Tiny0; + word1(rv) = Tiny1; + /* The refinement below will clean + * this approximation up. + */ + } +#endif + } + } + + /* Now the hard part -- adjusting rv to the correct value.*/ + + /* Put digits into bd: true value = bd * 10^e */ + + bd0 = s2b(s0, nd0, nd, y); + + for (;;) { + bd = Balloc(bd0->k); + Bcopy(bd, bd0); + bb = d2b(dval(rv), &bbe, &bbbits); /* rv = bb * 2^bbe */ + bs = i2b(1); + + if (e >= 0) { + bb2 = bb5 = 0; + bd2 = bd5 = e; + } else { + bb2 = bb5 = -e; + bd2 = bd5 = 0; + } + if (bbe >= 0) + bb2 += bbe; + else + bd2 -= bbe; + bs2 = bb2; +#ifdef Avoid_Underflow + j = bbe - scale; + i = j + bbbits - 1; /* logb(rv) */ + if (i < Emin) /* denormal */ + j += P - Emin; + else + j = P + 1 - bbbits; +#else /*Avoid_Underflow*/ +#ifdef Sudden_Underflow + j = P + 1 - bbbits; +#else /*Sudden_Underflow*/ + j = bbe; + i = j + bbbits - 1; /* logb(rv) */ + if (i < Emin) /* denormal */ + j += P - Emin; + else + j = P + 1 - bbbits; +#endif /*Sudden_Underflow*/ +#endif /*Avoid_Underflow*/ + bb2 += j; + bd2 += j; +#ifdef Avoid_Underflow + bd2 += scale; +#endif + i = bb2 < bd2 ? bb2 : bd2; + if (i > bs2) + i = bs2; + if (i > 0) { + bb2 -= i; + bd2 -= i; + bs2 -= i; + } + if (bb5 > 0) { + bs = pow5mult(bs, bb5); + bb1 = mult(bs, bb); + Bfree(bb); + bb = bb1; + } + if (bb2 > 0) + bb = lshift(bb, bb2); + if (bd5 > 0) + bd = pow5mult(bd, bd5); + if (bd2 > 0) + bd = lshift(bd, bd2); + if (bs2 > 0) + bs = lshift(bs, bs2); + delta = diff(bb, bd); + dsign = delta->sign; + delta->sign = 0; + i = cmp(delta, bs); + + if (i < 0) { + /* Error is less than half an ulp -- check for + * special case of mantissa a power of two. + */ + if (dsign || word1(rv) || word0(rv) & Bndry_mask +#ifdef Avoid_Underflow + || (word0(rv) & Exp_mask) <= (2 * P + 1) * Exp_msk1 +#else + || (word0(rv) & Exp_mask) <= Exp_msk1 +#endif + ) { +#ifdef SET_INEXACT + if (!delta->x[0] && delta->wds <= 1) + inexact = 0; +#endif + break; + } + if (!delta->x[0] && delta->wds <= 1) { + /* exact result */ +#ifdef SET_INEXACT + inexact = 0; +#endif + break; + } + delta = lshift(delta,Log2P); + if (cmp(delta, bs) > 0) + goto drop_down; + break; + } + if (i == 0) { + /* exactly half-way between */ + if (dsign) { + if ((word0(rv) & Bndry_mask1) == Bndry_mask1 + && word1(rv) == ( +#ifdef Avoid_Underflow + (scale && (y = word0(rv) & Exp_mask) <= 2 * P * Exp_msk1) + ? (0xffffffff & (0xffffffff << (2 * P + 1 - (y >> Exp_shift)))) : +#endif + 0xffffffff)) { + /*boundary case -- increment exponent*/ + word0(rv) = (word0(rv) & Exp_mask) + Exp_msk1; + word1(rv) = 0; +#ifdef Avoid_Underflow + dsign = 0; +#endif + break; + } + } else if (!(word0(rv) & Bndry_mask) && !word1(rv)) { +drop_down: + /* boundary case -- decrement exponent */ +#ifdef Sudden_Underflow /*{{*/ + L = word0(rv) & Exp_mask; +#ifdef Avoid_Underflow + if (L <= (scale ? (2 * P + 1) * Exp_msk1 : Exp_msk1)) +#else + if (L <= Exp_msk1) +#endif /*Avoid_Underflow*/ + goto undfl; + L -= Exp_msk1; +#else /*Sudden_Underflow}{*/ +#ifdef Avoid_Underflow + if (scale) { + L = word0(rv) & Exp_mask; + if (L <= (2 * P + 1) * Exp_msk1) { + if (L > (P + 2) * Exp_msk1) + /* round even ==> */ + /* accept rv */ + break; + /* rv = smallest denormal */ + goto undfl; + } + } +#endif /*Avoid_Underflow*/ + L = (word0(rv) & Exp_mask) - Exp_msk1; +#endif /*Sudden_Underflow}}*/ + word0(rv) = L | Bndry_mask1; + word1(rv) = 0xffffffff; + break; + } + if (!(word1(rv) & LSB)) + break; + if (dsign) + dval(rv) += ulp(dval(rv)); + else { + dval(rv) -= ulp(dval(rv)); +#ifndef Sudden_Underflow + if (!dval(rv)) + goto undfl; +#endif + } +#ifdef Avoid_Underflow + dsign = 1 - dsign; +#endif + break; + } + if ((aadj = ratio(delta, bs)) <= 2.) { + if (dsign) + aadj = aadj1 = 1.; + else if (word1(rv) || word0(rv) & Bndry_mask) { +#ifndef Sudden_Underflow + if (word1(rv) == Tiny1 && !word0(rv)) + goto undfl; +#endif + aadj = 1.; + aadj1 = -1.; + } else { + /* special case -- power of FLT_RADIX to be */ + /* rounded down... */ + + if (aadj < 2. / FLT_RADIX) + aadj = 1. / FLT_RADIX; + else + aadj *= 0.5; + aadj1 = -aadj; + } + } else { + aadj *= 0.5; + aadj1 = dsign ? aadj : -aadj; +#ifdef Check_FLT_ROUNDS + switch (Rounding) { + case 2: /* towards +infinity */ + aadj1 -= 0.5; + break; + case 0: /* towards 0 */ + case 3: /* towards -infinity */ + aadj1 += 0.5; + } +#else + if (Flt_Rounds == 0) + aadj1 += 0.5; +#endif /*Check_FLT_ROUNDS*/ + } + y = word0(rv) & Exp_mask; + + /* Check for overflow */ + + if (y == Exp_msk1 * (DBL_MAX_EXP + Bias - 1)) { + dval(rv0) = dval(rv); + word0(rv) -= P * Exp_msk1; + adj = aadj1 * ulp(dval(rv)); + dval(rv) += adj; + if ((word0(rv) & Exp_mask) >= Exp_msk1 * (DBL_MAX_EXP + Bias - P)) { + if (word0(rv0) == Big0 && word1(rv0) == Big1) + goto ovfl; + word0(rv) = Big0; + word1(rv) = Big1; + goto cont; + } else + word0(rv) += P * Exp_msk1; + } else { +#ifdef Avoid_Underflow + if (scale && y <= 2 * P * Exp_msk1) { + if (aadj <= 0x7fffffff) { + if ((z = (uint32_t)aadj) <= 0) + z = 1; + aadj = z; + aadj1 = dsign ? aadj : -aadj; + } + word0(aadj1) += (2 * P + 1) * Exp_msk1 - y; + } + adj = aadj1 * ulp(dval(rv)); + dval(rv) += adj; +#else +#ifdef Sudden_Underflow + if ((word0(rv) & Exp_mask) <= P * Exp_msk1) { + dval(rv0) = dval(rv); + word0(rv) += P * Exp_msk1; + adj = aadj1 * ulp(dval(rv)); + dval(rv) += adj; + if ((word0(rv) & Exp_mask) <= P * Exp_msk1) + { + if (word0(rv0) == Tiny0 && word1(rv0) == Tiny1) + goto undfl; + word0(rv) = Tiny0; + word1(rv) = Tiny1; + goto cont; + } + else + word0(rv) -= P * Exp_msk1; + } else { + adj = aadj1 * ulp(dval(rv)); + dval(rv) += adj; + } +#else /*Sudden_Underflow*/ + /* Compute adj so that the IEEE rounding rules will + * correctly round rv + adj in some half-way cases. + * If rv * ulp(rv) is denormalized (i.e., + * y <= (P - 1) * Exp_msk1), we must adjust aadj to avoid + * trouble from bits lost to denormalization; + * example: 1.2e-307 . + */ + if (y <= (P - 1) * Exp_msk1 && aadj > 1.) { + aadj1 = (double)(int)(aadj + 0.5); + if (!dsign) + aadj1 = -aadj1; + } + adj = aadj1 * ulp(dval(rv)); + dval(rv) += adj; +#endif /*Sudden_Underflow*/ +#endif /*Avoid_Underflow*/ + } + z = word0(rv) & Exp_mask; +#ifndef SET_INEXACT +#ifdef Avoid_Underflow + if (!scale) +#endif + if (y == z) { + /* Can we stop now? */ + L = (int32_t)aadj; + aadj -= L; + /* The tolerances below are conservative. */ + if (dsign || word1(rv) || word0(rv) & Bndry_mask) { + if (aadj < .4999999 || aadj > .5000001) + break; + } else if (aadj < .4999999 / FLT_RADIX) + break; + } +#endif +cont: + Bfree(bb); + Bfree(bd); + Bfree(bs); + Bfree(delta); + } +#ifdef SET_INEXACT + if (inexact) { + if (!oldinexact) { + word0(rv0) = Exp_1 + (70 << Exp_shift); + word1(rv0) = 0; + dval(rv0) += 1.; + } + } else if (!oldinexact) + clear_inexact(); +#endif +#ifdef Avoid_Underflow + if (scale) { + word0(rv0) = Exp_1 - 2 * P * Exp_msk1; + word1(rv0) = 0; + dval(rv) *= dval(rv0); +#ifndef NO_ERRNO + /* try to avoid the bug of testing an 8087 register value */ + if (word0(rv) == 0 && word1(rv) == 0) + errno = ERANGE; +#endif + } +#endif /* Avoid_Underflow */ +#ifdef SET_INEXACT + if (inexact && !(word0(rv) & Exp_mask)) { + /* set underflow bit */ + dval(rv0) = 1e-300; + dval(rv0) *= dval(rv0); + } +#endif +retfree: + Bfree(bb); + Bfree(bd); + Bfree(bs); + Bfree(bd0); + Bfree(delta); +ret: + if (se) + *se = (char*)s; + return sign ? -dval(rv) : dval(rv); +} + +static int quorem(Bigint* b, Bigint* S) +{ + int n; + uint32_t *bx, *bxe, q, *sx, *sxe; +#ifdef USE_LONG_LONG + unsigned long long borrow, carry, y, ys; +#else + uint32_t borrow, carry, y, ys; +#ifdef Pack_32 + uint32_t si, z, zs; +#endif +#endif + + n = S->wds; + ASSERT_WITH_MESSAGE(b->wds <= n, "oversize b in quorem"); + if (b->wds < n) + return 0; + sx = S->x; + sxe = sx + --n; + bx = b->x; + bxe = bx + n; + q = *bxe / (*sxe + 1); /* ensure q <= true quotient */ + ASSERT_WITH_MESSAGE(q <= 9, "oversized quotient in quorem"); + if (q) { + borrow = 0; + carry = 0; + do { +#ifdef USE_LONG_LONG + ys = *sx++ * (unsigned long long)q + carry; + carry = ys >> 32; + y = *bx - (ys & 0xffffffffUL) - borrow; + borrow = y >> 32 & (uint32_t)1; + *bx++ = (uint32_t)y & 0xffffffffUL; +#else +#ifdef Pack_32 + si = *sx++; + ys = (si & 0xffff) * q + carry; + zs = (si >> 16) * q + (ys >> 16); + carry = zs >> 16; + y = (*bx & 0xffff) - (ys & 0xffff) - borrow; + borrow = (y & 0x10000) >> 16; + z = (*bx >> 16) - (zs & 0xffff) - borrow; + borrow = (z & 0x10000) >> 16; + Storeinc(bx, z, y); +#else + ys = *sx++ * q + carry; + carry = ys >> 16; + y = *bx - (ys & 0xffff) - borrow; + borrow = (y & 0x10000) >> 16; + *bx++ = y & 0xffff; +#endif +#endif + } while (sx <= sxe); + if (!*bxe) { + bx = b->x; + while (--bxe > bx && !*bxe) + --n; + b->wds = n; + } + } + if (cmp(b, S) >= 0) { + q++; + borrow = 0; + carry = 0; + bx = b->x; + sx = S->x; + do { +#ifdef USE_LONG_LONG + ys = *sx++ + carry; + carry = ys >> 32; + y = *bx - (ys & 0xffffffffUL) - borrow; + borrow = y >> 32 & (uint32_t)1; + *bx++ = (uint32_t)y & 0xffffffffUL; +#else +#ifdef Pack_32 + si = *sx++; + ys = (si & 0xffff) + carry; + zs = (si >> 16) + (ys >> 16); + carry = zs >> 16; + y = (*bx & 0xffff) - (ys & 0xffff) - borrow; + borrow = (y & 0x10000) >> 16; + z = (*bx >> 16) - (zs & 0xffff) - borrow; + borrow = (z & 0x10000) >> 16; + Storeinc(bx, z, y); +#else + ys = *sx++ + carry; + carry = ys >> 16; + y = *bx - (ys & 0xffff) - borrow; + borrow = (y & 0x10000) >> 16; + *bx++ = y & 0xffff; +#endif +#endif + } while (sx <= sxe); + bx = b->x; + bxe = bx + n; + if (!*bxe) { + while (--bxe > bx && !*bxe) + --n; + b->wds = n; + } + } + return q; +} + +#if !ENABLE(JSC_MULTIPLE_THREADS) +static char* dtoa_result; +#endif + +static char* rv_alloc(int i) +{ + int k; + + int j = sizeof(uint32_t); + for (k = 0; + sizeof(Bigint) - sizeof(uint32_t) - sizeof(int) + j <= (unsigned)i; + j <<= 1) + k++; + int* r = (int*)Balloc(k); + *r = k; + return +#if !ENABLE(JSC_MULTIPLE_THREADS) + dtoa_result = +#endif + (char*)(r + 1); +} + +static char* nrv_alloc(const char* s, char** rve, int n) +{ + char* rv = rv_alloc(n); + char* t = rv; + + while ((*t = *s++)) + t++; + if (rve) + *rve = t; + return rv; +} + +/* freedtoa(s) must be used to free values s returned by dtoa + * when MULTIPLE_THREADS is #defined. It should be used in all cases, + * but for consistency with earlier versions of dtoa, it is optional + * when MULTIPLE_THREADS is not defined. + */ + +void freedtoa(char* s) +{ + Bigint* b = (Bigint*)((int*)s - 1); + b->maxwds = 1 << (b->k = *(int*)b); + Bfree(b); +#if !ENABLE(JSC_MULTIPLE_THREADS) + if (s == dtoa_result) + dtoa_result = 0; +#endif +} + +/* dtoa for IEEE arithmetic (dmg): convert double to ASCII string. + * + * Inspired by "How to Print Floating-Point Numbers Accurately" by + * Guy L. Steele, Jr. and Jon L. White [Proc. ACM SIGPLAN '90, pp. 92-101]. + * + * Modifications: + * 1. Rather than iterating, we use a simple numeric overestimate + * to determine k = floor(log10(d)). We scale relevant + * quantities using O(log2(k)) rather than O(k) multiplications. + * 2. For some modes > 2 (corresponding to ecvt and fcvt), we don't + * try to generate digits strictly left to right. Instead, we + * compute with fewer bits and propagate the carry if necessary + * when rounding the final digit up. This is often faster. + * 3. Under the assumption that input will be rounded nearest, + * mode 0 renders 1e23 as 1e23 rather than 9.999999999999999e22. + * That is, we allow equality in stopping tests when the + * round-nearest rule will give the same floating-point value + * as would satisfaction of the stopping test with strict + * inequality. + * 4. We remove common factors of powers of 2 from relevant + * quantities. + * 5. When converting floating-point integers less than 1e16, + * we use floating-point arithmetic rather than resorting + * to multiple-precision integers. + * 6. When asked to produce fewer than 15 digits, we first try + * to get by with floating-point arithmetic; we resort to + * multiple-precision integer arithmetic only if we cannot + * guarantee that the floating-point calculation has given + * the correctly rounded result. For k requested digits and + * "uniformly" distributed input, the probability is + * something like 10^(k-15) that we must resort to the int32_t + * calculation. + */ + +char* dtoa(double d, int ndigits, int* decpt, int* sign, char** rve) +{ + /* + Arguments ndigits, decpt, sign are similar to those + of ecvt and fcvt; trailing zeros are suppressed from + the returned string. If not null, *rve is set to point + to the end of the return value. If d is +-Infinity or NaN, + then *decpt is set to 9999. + + */ + + int bbits, b2, b5, be, dig, i, ieps, ilim = 0, ilim0, ilim1 = 0, + j, j1, k, k0, k_check, leftright, m2, m5, s2, s5, + spec_case, try_quick; + int32_t L; +#ifndef Sudden_Underflow + int denorm; + uint32_t x; +#endif + Bigint *b, *b1, *delta, *mlo = NULL, *mhi, *S; + double d2, ds, eps; + char *s, *s0; +#ifdef SET_INEXACT + int inexact, oldinexact; +#endif + +#if !ENABLE(JSC_MULTIPLE_THREADS) + if (dtoa_result) { + freedtoa(dtoa_result); + dtoa_result = 0; + } +#endif + + if (word0(d) & Sign_bit) { + /* set sign for everything, including 0's and NaNs */ + *sign = 1; + word0(d) &= ~Sign_bit; /* clear sign bit */ + } else + *sign = 0; + + if ((word0(d) & Exp_mask) == Exp_mask) + { + /* Infinity or NaN */ + *decpt = 9999; + if (!word1(d) && !(word0(d) & 0xfffff)) + return nrv_alloc("Infinity", rve, 8); + return nrv_alloc("NaN", rve, 3); + } + if (!dval(d)) { + *decpt = 1; + return nrv_alloc("0", rve, 1); + } + +#ifdef SET_INEXACT + try_quick = oldinexact = get_inexact(); + inexact = 1; +#endif + + b = d2b(dval(d), &be, &bbits); +#ifdef Sudden_Underflow + i = (int)(word0(d) >> Exp_shift1 & (Exp_mask >> Exp_shift1)); +#else + if ((i = (int)(word0(d) >> Exp_shift1 & (Exp_mask >> Exp_shift1)))) { +#endif + dval(d2) = dval(d); + word0(d2) &= Frac_mask1; + word0(d2) |= Exp_11; + + /* log(x) ~=~ log(1.5) + (x-1.5)/1.5 + * log10(x) = log(x) / log(10) + * ~=~ log(1.5)/log(10) + (x-1.5)/(1.5*log(10)) + * log10(d) = (i-Bias)*log(2)/log(10) + log10(d2) + * + * This suggests computing an approximation k to log10(d) by + * + * k = (i - Bias)*0.301029995663981 + * + ( (d2-1.5)*0.289529654602168 + 0.176091259055681 ); + * + * We want k to be too large rather than too small. + * The error in the first-order Taylor series approximation + * is in our favor, so we just round up the constant enough + * to compensate for any error in the multiplication of + * (i - Bias) by 0.301029995663981; since |i - Bias| <= 1077, + * and 1077 * 0.30103 * 2^-52 ~=~ 7.2e-14, + * adding 1e-13 to the constant term more than suffices. + * Hence we adjust the constant term to 0.1760912590558. + * (We could get a more accurate k by invoking log10, + * but this is probably not worthwhile.) + */ + + i -= Bias; +#ifndef Sudden_Underflow + denorm = 0; + } else { + /* d is denormalized */ + + i = bbits + be + (Bias + (P - 1) - 1); + x = i > 32 ? word0(d) << 64 - i | word1(d) >> i - 32 + : word1(d) << 32 - i; + dval(d2) = x; + word0(d2) -= 31 * Exp_msk1; /* adjust exponent */ + i -= (Bias + (P - 1) - 1) + 1; + denorm = 1; + } +#endif + ds = (dval(d2) - 1.5) * 0.289529654602168 + 0.1760912590558 + (i * 0.301029995663981); + k = (int)ds; + if (ds < 0. && ds != k) + k--; /* want k = floor(ds) */ + k_check = 1; + if (k >= 0 && k <= Ten_pmax) { + if (dval(d) < tens[k]) + k--; + k_check = 0; + } + j = bbits - i - 1; + if (j >= 0) { + b2 = 0; + s2 = j; + } else { + b2 = -j; + s2 = 0; + } + if (k >= 0) { + b5 = 0; + s5 = k; + s2 += k; + } else { + b2 -= k; + b5 = -k; + s5 = 0; + } + +#ifndef SET_INEXACT +#ifdef Check_FLT_ROUNDS + try_quick = Rounding == 1; +#else + try_quick = 1; +#endif +#endif /*SET_INEXACT*/ + + leftright = 1; + ilim = ilim1 = -1; + i = 18; + ndigits = 0; + s = s0 = rv_alloc(i); + + if (ilim >= 0 && ilim <= Quick_max && try_quick) { + + /* Try to get by with floating-point arithmetic. */ + + i = 0; + dval(d2) = dval(d); + k0 = k; + ilim0 = ilim; + ieps = 2; /* conservative */ + if (k > 0) { + ds = tens[k & 0xf]; + j = k >> 4; + if (j & Bletch) { + /* prevent overflows */ + j &= Bletch - 1; + dval(d) /= bigtens[n_bigtens - 1]; + ieps++; + } + for (; j; j >>= 1, i++) { + if (j & 1) { + ieps++; + ds *= bigtens[i]; + } + } + dval(d) /= ds; + } else if ((j1 = -k)) { + dval(d) *= tens[j1 & 0xf]; + for (j = j1 >> 4; j; j >>= 1, i++) { + if (j & 1) { + ieps++; + dval(d) *= bigtens[i]; + } + } + } + if (k_check && dval(d) < 1. && ilim > 0) { + if (ilim1 <= 0) + goto fast_failed; + ilim = ilim1; + k--; + dval(d) *= 10.; + ieps++; + } + dval(eps) = (ieps * dval(d)) + 7.; + word0(eps) -= (P - 1) * Exp_msk1; + if (ilim == 0) { + S = mhi = 0; + dval(d) -= 5.; + if (dval(d) > dval(eps)) + goto one_digit; + if (dval(d) < -dval(eps)) + goto no_digits; + goto fast_failed; + } +#ifndef No_leftright + if (leftright) { + /* Use Steele & White method of only + * generating digits needed. + */ + dval(eps) = (0.5 / tens[ilim - 1]) - dval(eps); + for (i = 0;;) { + L = (long int)dval(d); + dval(d) -= L; + *s++ = '0' + (int)L; + if (dval(d) < dval(eps)) + goto ret1; + if (1. - dval(d) < dval(eps)) + goto bump_up; + if (++i >= ilim) + break; + dval(eps) *= 10.; + dval(d) *= 10.; + } + } else { +#endif + /* Generate ilim digits, then fix them up. */ + dval(eps) *= tens[ilim - 1]; + for (i = 1;; i++, dval(d) *= 10.) { + L = (int32_t)(dval(d)); + if (!(dval(d) -= L)) + ilim = i; + *s++ = '0' + (int)L; + if (i == ilim) { + if (dval(d) > 0.5 + dval(eps)) + goto bump_up; + else if (dval(d) < 0.5 - dval(eps)) { + while (*--s == '0') { } + s++; + goto ret1; + } + break; + } + } +#ifndef No_leftright + } +#endif +fast_failed: + s = s0; + dval(d) = dval(d2); + k = k0; + ilim = ilim0; + } + + /* Do we have a "small" integer? */ + + if (be >= 0 && k <= Int_max) { + /* Yes. */ + ds = tens[k]; + if (ndigits < 0 && ilim <= 0) { + S = mhi = 0; + if (ilim < 0 || dval(d) <= 5 * ds) + goto no_digits; + goto one_digit; + } + for (i = 1;; i++, dval(d) *= 10.) { + L = (int32_t)(dval(d) / ds); + dval(d) -= L * ds; +#ifdef Check_FLT_ROUNDS + /* If FLT_ROUNDS == 2, L will usually be high by 1 */ + if (dval(d) < 0) { + L--; + dval(d) += ds; + } +#endif + *s++ = '0' + (int)L; + if (!dval(d)) { +#ifdef SET_INEXACT + inexact = 0; +#endif + break; + } + if (i == ilim) { + dval(d) += dval(d); + if (dval(d) > ds || dval(d) == ds && L & 1) { +bump_up: + while (*--s == '9') + if (s == s0) { + k++; + *s = '0'; + break; + } + ++*s++; + } + break; + } + } + goto ret1; + } + + m2 = b2; + m5 = b5; + mhi = mlo = 0; + if (leftright) { + i = +#ifndef Sudden_Underflow + denorm ? be + (Bias + (P - 1) - 1 + 1) : +#endif + 1 + P - bbits; + b2 += i; + s2 += i; + mhi = i2b(1); + } + if (m2 > 0 && s2 > 0) { + i = m2 < s2 ? m2 : s2; + b2 -= i; + m2 -= i; + s2 -= i; + } + if (b5 > 0) { + if (leftright) { + if (m5 > 0) { + mhi = pow5mult(mhi, m5); + b1 = mult(mhi, b); + Bfree(b); + b = b1; + } + if ((j = b5 - m5)) + b = pow5mult(b, j); + } else + b = pow5mult(b, b5); + } + S = i2b(1); + if (s5 > 0) + S = pow5mult(S, s5); + + /* Check for special case that d is a normalized power of 2. */ + + spec_case = 0; + if (!word1(d) && !(word0(d) & Bndry_mask) +#ifndef Sudden_Underflow + && word0(d) & (Exp_mask & ~Exp_msk1) +#endif + ) { + /* The special case */ + b2 += Log2P; + s2 += Log2P; + spec_case = 1; + } + + /* Arrange for convenient computation of quotients: + * shift left if necessary so divisor has 4 leading 0 bits. + * + * Perhaps we should just compute leading 28 bits of S once + * and for all and pass them and a shift to quorem, so it + * can do shifts and ors to compute the numerator for q. + */ +#ifdef Pack_32 + if ((i = ((s5 ? 32 - hi0bits(S->x[S->wds - 1]) : 1) + s2) & 0x1f)) + i = 32 - i; +#else + if ((i = ((s5 ? 32 - hi0bits(S->x[S->wds - 1]) : 1) + s2) & 0xf)) + i = 16 - i; +#endif + if (i > 4) { + i -= 4; + b2 += i; + m2 += i; + s2 += i; + } else if (i < 4) { + i += 28; + b2 += i; + m2 += i; + s2 += i; + } + if (b2 > 0) + b = lshift(b, b2); + if (s2 > 0) + S = lshift(S, s2); + if (k_check) { + if (cmp(b,S) < 0) { + k--; + b = multadd(b, 10, 0); /* we botched the k estimate */ + if (leftright) + mhi = multadd(mhi, 10, 0); + ilim = ilim1; + } + } + + if (leftright) { + if (m2 > 0) + mhi = lshift(mhi, m2); + + /* Compute mlo -- check for special case + * that d is a normalized power of 2. + */ + + mlo = mhi; + if (spec_case) { + mhi = Balloc(mhi->k); + Bcopy(mhi, mlo); + mhi = lshift(mhi, Log2P); + } + + for (i = 1;;i++) { + dig = quorem(b,S) + '0'; + /* Do we yet have the shortest decimal string + * that will round to d? + */ + j = cmp(b, mlo); + delta = diff(S, mhi); + j1 = delta->sign ? 1 : cmp(b, delta); + Bfree(delta); + if (j1 == 0 && !(word1(d) & 1)) { + if (dig == '9') + goto round_9_up; + if (j > 0) + dig++; +#ifdef SET_INEXACT + else if (!b->x[0] && b->wds <= 1) + inexact = 0; +#endif + *s++ = dig; + goto ret; + } + if (j < 0 || j == 0 && !(word1(d) & 1)) { + if (!b->x[0] && b->wds <= 1) { +#ifdef SET_INEXACT + inexact = 0; +#endif + goto accept_dig; + } + if (j1 > 0) { + b = lshift(b, 1); + j1 = cmp(b, S); + if ((j1 > 0 || j1 == 0 && dig & 1) && dig++ == '9') + goto round_9_up; + } +accept_dig: + *s++ = dig; + goto ret; + } + if (j1 > 0) { + if (dig == '9') { /* possible if i == 1 */ +round_9_up: + *s++ = '9'; + goto roundoff; + } + *s++ = dig + 1; + goto ret; + } + *s++ = dig; + if (i == ilim) + break; + b = multadd(b, 10, 0); + if (mlo == mhi) + mlo = mhi = multadd(mhi, 10, 0); + else { + mlo = multadd(mlo, 10, 0); + mhi = multadd(mhi, 10, 0); + } + } + } else + for (i = 1;; i++) { + *s++ = dig = quorem(b,S) + '0'; + if (!b->x[0] && b->wds <= 1) { +#ifdef SET_INEXACT + inexact = 0; +#endif + goto ret; + } + if (i >= ilim) + break; + b = multadd(b, 10, 0); + } + + /* Round off last digit */ + + b = lshift(b, 1); + j = cmp(b, S); + if (j > 0 || j == 0 && dig & 1) { +roundoff: + while (*--s == '9') + if (s == s0) { + k++; + *s++ = '1'; + goto ret; + } + ++*s++; + } else { + while (*--s == '0') { } + s++; + } + goto ret; +no_digits: + k = -1 - ndigits; + goto ret; +one_digit: + *s++ = '1'; + k++; + goto ret; +ret: + Bfree(S); + if (mhi) { + if (mlo && mlo != mhi) + Bfree(mlo); + Bfree(mhi); + } +ret1: +#ifdef SET_INEXACT + if (inexact) { + if (!oldinexact) { + word0(d) = Exp_1 + (70 << Exp_shift); + word1(d) = 0; + dval(d) += 1.; + } + } else if (!oldinexact) + clear_inexact(); +#endif + Bfree(b); + *s = 0; + *decpt = k + 1; + if (rve) + *rve = s; + return s0; +} + +} // namespace WTF diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/dtoa.h b/src/3rdparty/webkit/JavaScriptCore/wtf/dtoa.h new file mode 100644 index 0000000..ed858c0 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/dtoa.h @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2003, 2008 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_dtoa_h +#define WTF_dtoa_h + +namespace WTF { + class Mutex; +} + +namespace WTF { + + extern WTF::Mutex* s_dtoaP5Mutex; + + double strtod(const char* s00, char** se); + char* dtoa(double d, int ndigits, int* decpt, int* sign, char** rve); + void freedtoa(char* s); + +} // namespace WTF + +#endif // WTF_dtoa_h diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/qt/MainThreadQt.cpp b/src/3rdparty/webkit/JavaScriptCore/wtf/qt/MainThreadQt.cpp new file mode 100644 index 0000000..1914600 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/qt/MainThreadQt.cpp @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2007 Staikos Computing Services Inc. + * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies) + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "MainThread.h" + +#include <QtCore/QObject> +#include <QtCore/QCoreApplication> + + +namespace WTF { + +class MainThreadInvoker : public QObject { + Q_OBJECT +public: + MainThreadInvoker(); + +private Q_SLOTS: + void dispatch(); +}; + +MainThreadInvoker::MainThreadInvoker() +{ + moveToThread(QCoreApplication::instance()->thread()); +} + +void MainThreadInvoker::dispatch() +{ + dispatchFunctionsFromMainThread(); +} + +Q_GLOBAL_STATIC(MainThreadInvoker, webkit_main_thread_invoker) + + +void scheduleDispatchFunctionsOnMainThread() +{ + QMetaObject::invokeMethod(webkit_main_thread_invoker(), "dispatch", Qt::QueuedConnection); +} + +} + +#include "MainThreadQt.moc" diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/unicode/Collator.h b/src/3rdparty/webkit/JavaScriptCore/wtf/unicode/Collator.h new file mode 100644 index 0000000..f04779d --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/unicode/Collator.h @@ -0,0 +1,67 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef WTF_Collator_h +#define WTF_Collator_h + +#include <memory> +#include <wtf/Noncopyable.h> +#include <wtf/unicode/Unicode.h> + +#if USE(ICU_UNICODE) && !UCONFIG_NO_COLLATION +struct UCollator; +#endif + +namespace WTF { + + class Collator : Noncopyable { + public: + enum Result { Equal = 0, Greater = 1, Less = -1 }; + + Collator(const char* locale); // Parsing is lenient; e.g. language identifiers (such as "en-US") are accepted, too. + ~Collator(); + void setOrderLowerFirst(bool); + + static std::auto_ptr<Collator> userDefault(); + + Result collate(const ::UChar*, size_t, const ::UChar*, size_t) const; + + private: +#if USE(ICU_UNICODE) && !UCONFIG_NO_COLLATION + void createCollator() const; + void releaseCollator(); + mutable UCollator* m_collator; +#endif + char* m_locale; + bool m_lowerFirst; + }; +} + +using WTF::Collator; + +#endif diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/unicode/CollatorDefault.cpp b/src/3rdparty/webkit/JavaScriptCore/wtf/unicode/CollatorDefault.cpp new file mode 100644 index 0000000..eddbe53 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/unicode/CollatorDefault.cpp @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "Collator.h" + +#if !USE(ICU_UNICODE) || UCONFIG_NO_COLLATION + +namespace WTF { + +Collator::Collator(const char*) +{ +} + +Collator::~Collator() +{ +} + +void Collator::setOrderLowerFirst(bool) +{ +} + +std::auto_ptr<Collator> Collator::userDefault() +{ + return std::auto_ptr<Collator>(new Collator(0)); +} + +// A default implementation for platforms that lack Unicode-aware collation. +Collator::Result Collator::collate(const UChar* lhs, size_t lhsLength, const UChar* rhs, size_t rhsLength) const +{ + int lmin = lhsLength < rhsLength ? lhsLength : rhsLength; + int l = 0; + while (l < lmin && *lhs == *rhs) { + lhs++; + rhs++; + l++; + } + + if (l < lmin) + return (*lhs > *rhs) ? Greater : Less; + + if (lhsLength == rhsLength) + return Equal; + + return (lhsLength > rhsLength) ? Greater : Less; +} + +} + +#endif diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/unicode/UTF8.cpp b/src/3rdparty/webkit/JavaScriptCore/wtf/unicode/UTF8.cpp new file mode 100644 index 0000000..9e713fe --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/unicode/UTF8.cpp @@ -0,0 +1,303 @@ +/* + * Copyright (C) 2007 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "UTF8.h" + +namespace WTF { +namespace Unicode { + +inline int inlineUTF8SequenceLengthNonASCII(char b0) +{ + if ((b0 & 0xC0) != 0xC0) + return 0; + if ((b0 & 0xE0) == 0xC0) + return 2; + if ((b0 & 0xF0) == 0xE0) + return 3; + if ((b0 & 0xF8) == 0xF0) + return 4; + return 0; +} + +inline int inlineUTF8SequenceLength(char b0) +{ + return (b0 & 0x80) == 0 ? 1 : inlineUTF8SequenceLengthNonASCII(b0); +} + +int UTF8SequenceLength(char b0) +{ + return (b0 & 0x80) == 0 ? 1 : inlineUTF8SequenceLengthNonASCII(b0); +} + +int decodeUTF8Sequence(const char* sequence) +{ + // Handle 0-byte sequences (never valid). + const unsigned char b0 = sequence[0]; + const int length = inlineUTF8SequenceLength(b0); + if (length == 0) + return -1; + + // Handle 1-byte sequences (plain ASCII). + const unsigned char b1 = sequence[1]; + if (length == 1) { + if (b1) + return -1; + return b0; + } + + // Handle 2-byte sequences. + if ((b1 & 0xC0) != 0x80) + return -1; + const unsigned char b2 = sequence[2]; + if (length == 2) { + if (b2) + return -1; + const int c = ((b0 & 0x1F) << 6) | (b1 & 0x3F); + if (c < 0x80) + return -1; + return c; + } + + // Handle 3-byte sequences. + if ((b2 & 0xC0) != 0x80) + return -1; + const unsigned char b3 = sequence[3]; + if (length == 3) { + if (b3) + return -1; + const int c = ((b0 & 0xF) << 12) | ((b1 & 0x3F) << 6) | (b2 & 0x3F); + if (c < 0x800) + return -1; + // UTF-16 surrogates should never appear in UTF-8 data. + if (c >= 0xD800 && c <= 0xDFFF) + return -1; + return c; + } + + // Handle 4-byte sequences. + if ((b3 & 0xC0) != 0x80) + return -1; + const unsigned char b4 = sequence[4]; + if (length == 4) { + if (b4) + return -1; + const int c = ((b0 & 0x7) << 18) | ((b1 & 0x3F) << 12) | ((b2 & 0x3F) << 6) | (b3 & 0x3F); + if (c < 0x10000 || c > 0x10FFFF) + return -1; + return c; + } + + return -1; +} + +// Once the bits are split out into bytes of UTF-8, this is a mask OR-ed +// into the first byte, depending on how many bytes follow. There are +// as many entries in this table as there are UTF-8 sequence types. +// (I.e., one byte sequence, two byte... etc.). Remember that sequencs +// for *legal* UTF-8 will be 4 or fewer bytes total. +static const unsigned char firstByteMark[7] = { 0x00, 0x00, 0xC0, 0xE0, 0xF0, 0xF8, 0xFC }; + +ConversionResult convertUTF16ToUTF8( + const UChar** sourceStart, const UChar* sourceEnd, + char** targetStart, char* targetEnd, bool strict) +{ + ConversionResult result = conversionOK; + const UChar* source = *sourceStart; + char* target = *targetStart; + while (source < sourceEnd) { + UChar32 ch; + unsigned short bytesToWrite = 0; + const UChar32 byteMask = 0xBF; + const UChar32 byteMark = 0x80; + const UChar* oldSource = source; // In case we have to back up because of target overflow. + ch = static_cast<unsigned short>(*source++); + // If we have a surrogate pair, convert to UChar32 first. + if (ch >= 0xD800 && ch <= 0xDBFF) { + // If the 16 bits following the high surrogate are in the source buffer... + if (source < sourceEnd) { + UChar32 ch2 = static_cast<unsigned short>(*source); + // If it's a low surrogate, convert to UChar32. + if (ch2 >= 0xDC00 && ch2 <= 0xDFFF) { + ch = ((ch - 0xD800) << 10) + (ch2 - 0xDC00) + 0x0010000; + ++source; + } else if (strict) { // it's an unpaired high surrogate + --source; // return to the illegal value itself + result = sourceIllegal; + break; + } + } else { // We don't have the 16 bits following the high surrogate. + --source; // return to the high surrogate + result = sourceExhausted; + break; + } + } else if (strict) { + // UTF-16 surrogate values are illegal in UTF-32 + if (ch >= 0xDC00 && ch <= 0xDFFF) { + --source; // return to the illegal value itself + result = sourceIllegal; + break; + } + } + // Figure out how many bytes the result will require + if (ch < (UChar32)0x80) { + bytesToWrite = 1; + } else if (ch < (UChar32)0x800) { + bytesToWrite = 2; + } else if (ch < (UChar32)0x10000) { + bytesToWrite = 3; + } else if (ch < (UChar32)0x110000) { + bytesToWrite = 4; + } else { + bytesToWrite = 3; + ch = 0xFFFD; + } + + target += bytesToWrite; + if (target > targetEnd) { + source = oldSource; // Back up source pointer! + target -= bytesToWrite; + result = targetExhausted; + break; + } + switch (bytesToWrite) { // note: everything falls through. + case 4: *--target = (char)((ch | byteMark) & byteMask); ch >>= 6; + case 3: *--target = (char)((ch | byteMark) & byteMask); ch >>= 6; + case 2: *--target = (char)((ch | byteMark) & byteMask); ch >>= 6; + case 1: *--target = (char)(ch | firstByteMark[bytesToWrite]); + } + target += bytesToWrite; + } + *sourceStart = source; + *targetStart = target; + return result; +} + +// This must be called with the length pre-determined by the first byte. +// If presented with a length > 4, this returns false. The Unicode +// definition of UTF-8 goes up to 4-byte sequences. +static bool isLegalUTF8(const unsigned char* source, int length) +{ + unsigned char a; + const unsigned char* srcptr = source + length; + switch (length) { + default: return false; + // Everything else falls through when "true"... + case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false; + case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false; + case 2: if ((a = (*--srcptr)) > 0xBF) return false; + + switch (*source) { + // no fall-through in this inner switch + case 0xE0: if (a < 0xA0) return false; break; + case 0xED: if (a > 0x9F) return false; break; + case 0xF0: if (a < 0x90) return false; break; + case 0xF4: if (a > 0x8F) return false; break; + default: if (a < 0x80) return false; + } + + case 1: if (*source >= 0x80 && *source < 0xC2) return false; + } + if (*source > 0xF4) + return false; + return true; +} + +// Magic values subtracted from a buffer value during UTF8 conversion. +// This table contains as many values as there might be trailing bytes +// in a UTF-8 sequence. +static const UChar32 offsetsFromUTF8[6] = { 0x00000000UL, 0x00003080UL, 0x000E2080UL, + 0x03C82080UL, 0xFA082080UL, 0x82082080UL }; + +ConversionResult convertUTF8ToUTF16( + const char** sourceStart, const char* sourceEnd, + UChar** targetStart, UChar* targetEnd, bool strict) +{ + ConversionResult result = conversionOK; + const char* source = *sourceStart; + UChar* target = *targetStart; + while (source < sourceEnd) { + UChar32 ch = 0; + int extraBytesToRead = UTF8SequenceLength(*source) - 1; + if (source + extraBytesToRead >= sourceEnd) { + result = sourceExhausted; + break; + } + // Do this check whether lenient or strict + if (!isLegalUTF8(reinterpret_cast<const unsigned char*>(source), extraBytesToRead + 1)) { + result = sourceIllegal; + break; + } + // The cases all fall through. + switch (extraBytesToRead) { + case 5: ch += static_cast<unsigned char>(*source++); ch <<= 6; // remember, illegal UTF-8 + case 4: ch += static_cast<unsigned char>(*source++); ch <<= 6; // remember, illegal UTF-8 + case 3: ch += static_cast<unsigned char>(*source++); ch <<= 6; + case 2: ch += static_cast<unsigned char>(*source++); ch <<= 6; + case 1: ch += static_cast<unsigned char>(*source++); ch <<= 6; + case 0: ch += static_cast<unsigned char>(*source++); + } + ch -= offsetsFromUTF8[extraBytesToRead]; + + if (target >= targetEnd) { + source -= (extraBytesToRead + 1); // Back up source pointer! + result = targetExhausted; break; + } + if (ch <= 0xFFFF) { + // UTF-16 surrogate values are illegal in UTF-32 + if (ch >= 0xD800 && ch <= 0xDFFF) { + if (strict) { + source -= (extraBytesToRead + 1); // return to the illegal value itself + result = sourceIllegal; + break; + } else + *target++ = 0xFFFD; + } else + *target++ = (UChar)ch; // normal case + } else if (ch > 0x10FFFF) { + if (strict) { + result = sourceIllegal; + source -= (extraBytesToRead + 1); // return to the start + break; // Bail out; shouldn't continue + } else + *target++ = 0xFFFD; + } else { + // target is a character in range 0xFFFF - 0x10FFFF + if (target + 1 >= targetEnd) { + source -= (extraBytesToRead + 1); // Back up source pointer! + result = targetExhausted; + break; + } + ch -= 0x0010000UL; + *target++ = (UChar)((ch >> 10) + 0xD800); + *target++ = (UChar)((ch & 0x03FF) + 0xDC00); + } + } + *sourceStart = source; + *targetStart = target; + return result; +} + +} +} diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/unicode/UTF8.h b/src/3rdparty/webkit/JavaScriptCore/wtf/unicode/UTF8.h new file mode 100644 index 0000000..a5ed93e --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/unicode/UTF8.h @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2007 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef WTF_UTF8_h +#define WTF_UTF8_h + +#include "Unicode.h" + +namespace WTF { + namespace Unicode { + + // Given a first byte, gives the length of the UTF-8 sequence it begins. + // Returns 0 for bytes that are not legal starts of UTF-8 sequences. + // Only allows sequences of up to 4 bytes, since that works for all Unicode characters (U-00000000 to U-0010FFFF). + int UTF8SequenceLength(char); + + // Takes a null-terminated C-style string with a UTF-8 sequence in it and converts it to a character. + // Only allows Unicode characters (U-00000000 to U-0010FFFF). + // Returns -1 if the sequence is not valid (including presence of extra bytes). + int decodeUTF8Sequence(const char*); + + typedef enum { + conversionOK, // conversion successful + sourceExhausted, // partial character in source, but hit end + targetExhausted, // insuff. room in target for conversion + sourceIllegal // source sequence is illegal/malformed + } ConversionResult; + + // These conversion functions take a "strict" argument. When this + // flag is set to strict, both irregular sequences and isolated surrogates + // will cause an error. When the flag is set to lenient, both irregular + // sequences and isolated surrogates are converted. + // + // Whether the flag is strict or lenient, all illegal sequences will cause + // an error return. This includes sequences such as: <F4 90 80 80>, <C0 80>, + // or <A0> in UTF-8, and values above 0x10FFFF in UTF-32. Conformant code + // must check for illegal sequences. + // + // When the flag is set to lenient, characters over 0x10FFFF are converted + // to the replacement character; otherwise (when the flag is set to strict) + // they constitute an error. + + ConversionResult convertUTF8ToUTF16( + const char** sourceStart, const char* sourceEnd, + UChar** targetStart, UChar* targetEnd, bool strict = true); + + ConversionResult convertUTF16ToUTF8( + const UChar** sourceStart, const UChar* sourceEnd, + char** targetStart, char* targetEnd, bool strict = true); + } +} + +#endif // WTF_UTF8_h diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/unicode/Unicode.h b/src/3rdparty/webkit/JavaScriptCore/wtf/unicode/Unicode.h new file mode 100644 index 0000000..e6e8f23 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/unicode/Unicode.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2006 George Staikos <staikos@kde.org> + * Copyright (C) 2006, 2008, 2009 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_UNICODE_H +#define WTF_UNICODE_H + +#include <wtf/Assertions.h> + +#if USE(QT4_UNICODE) +#include "qt4/UnicodeQt4.h" +#elif USE(ICU_UNICODE) +#include <wtf/unicode/icu/UnicodeIcu.h> +#else +#error "Unknown Unicode implementation" +#endif + +COMPILE_ASSERT(sizeof(UChar) == 2, UCharIsTwoBytes); + +#endif // WTF_UNICODE_H diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/unicode/icu/CollatorICU.cpp b/src/3rdparty/webkit/JavaScriptCore/wtf/unicode/icu/CollatorICU.cpp new file mode 100644 index 0000000..79dec79 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/unicode/icu/CollatorICU.cpp @@ -0,0 +1,144 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "Collator.h" + +#if USE(ICU_UNICODE) && !UCONFIG_NO_COLLATION + +#include "Assertions.h" +#include "Threading.h" +#include <unicode/ucol.h> +#include <string.h> + +#if PLATFORM(DARWIN) +#include <CoreFoundation/CoreFoundation.h> +#endif + +namespace WTF { + +static UCollator* cachedCollator; +static Mutex& cachedCollatorMutex() +{ + AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); + return mutex; +} + +Collator::Collator(const char* locale) + : m_collator(0) + , m_locale(locale ? strdup(locale) : 0) + , m_lowerFirst(false) +{ +} + +std::auto_ptr<Collator> Collator::userDefault() +{ +#if PLATFORM(DARWIN) && PLATFORM(CF) + // Mac OS X doesn't set UNIX locale to match user-selected one, so ICU default doesn't work. + CFStringRef collationOrder = (CFStringRef)CFPreferencesCopyValue(CFSTR("AppleCollationOrder"), kCFPreferencesAnyApplication, kCFPreferencesCurrentUser, kCFPreferencesAnyHost); + char buf[256]; + if (collationOrder) { + CFStringGetCString(collationOrder, buf, sizeof(buf), kCFStringEncodingASCII); + CFRelease(collationOrder); + return std::auto_ptr<Collator>(new Collator(buf)); + } else + return std::auto_ptr<Collator>(new Collator("")); +#else + return std::auto_ptr<Collator>(new Collator(0)); +#endif +} + +Collator::~Collator() +{ + releaseCollator(); + free(m_locale); +} + +void Collator::setOrderLowerFirst(bool lowerFirst) +{ + m_lowerFirst = lowerFirst; +} + +Collator::Result Collator::collate(const UChar* lhs, size_t lhsLength, const UChar* rhs, size_t rhsLength) const +{ + if (!m_collator) + createCollator(); + + return static_cast<Result>(ucol_strcoll(m_collator, lhs, lhsLength, rhs, rhsLength)); +} + +void Collator::createCollator() const +{ + ASSERT(!m_collator); + UErrorCode status = U_ZERO_ERROR; + + { + Locker<Mutex> lock(cachedCollatorMutex()); + if (cachedCollator) { + const char* cachedCollatorLocale = ucol_getLocaleByType(cachedCollator, ULOC_REQUESTED_LOCALE, &status); + ASSERT(U_SUCCESS(status)); + ASSERT(cachedCollatorLocale); + + UColAttributeValue cachedCollatorLowerFirst = ucol_getAttribute(cachedCollator, UCOL_CASE_FIRST, &status); + ASSERT(U_SUCCESS(status)); + + // FIXME: default locale is never matched, because ucol_getLocaleByType returns the actual one used, not 0. + if (m_locale && 0 == strcmp(cachedCollatorLocale, m_locale) + && ((UCOL_LOWER_FIRST == cachedCollatorLowerFirst && m_lowerFirst) || (UCOL_UPPER_FIRST == cachedCollatorLowerFirst && !m_lowerFirst))) { + m_collator = cachedCollator; + cachedCollator = 0; + return; + } + } + } + + m_collator = ucol_open(m_locale, &status); + if (U_FAILURE(status)) { + status = U_ZERO_ERROR; + m_collator = ucol_open("", &status); // Fallback to Unicode Collation Algorithm. + } + ASSERT(U_SUCCESS(status)); + + ucol_setAttribute(m_collator, UCOL_CASE_FIRST, m_lowerFirst ? UCOL_LOWER_FIRST : UCOL_UPPER_FIRST, &status); + ASSERT(U_SUCCESS(status)); +} + +void Collator::releaseCollator() +{ + { + Locker<Mutex> lock(cachedCollatorMutex()); + if (cachedCollator) + ucol_close(cachedCollator); + cachedCollator = m_collator; + m_collator = 0; + } +} + +} + +#endif diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/unicode/icu/UnicodeIcu.h b/src/3rdparty/webkit/JavaScriptCore/wtf/unicode/icu/UnicodeIcu.h new file mode 100644 index 0000000..608aea6 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/unicode/icu/UnicodeIcu.h @@ -0,0 +1,219 @@ +/* + * Copyright (C) 2006 George Staikos <staikos@kde.org> + * Copyright (C) 2006 Alexey Proskuryakov <ap@nypop.com> + * Copyright (C) 2006, 2007, 2008, 2009 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_UNICODE_ICU_H +#define WTF_UNICODE_ICU_H + +#include <stdlib.h> +#include <unicode/uchar.h> +#include <unicode/ustring.h> +#include <unicode/utf16.h> + +namespace WTF { +namespace Unicode { + +enum Direction { + LeftToRight = U_LEFT_TO_RIGHT, + RightToLeft = U_RIGHT_TO_LEFT, + EuropeanNumber = U_EUROPEAN_NUMBER, + EuropeanNumberSeparator = U_EUROPEAN_NUMBER_SEPARATOR, + EuropeanNumberTerminator = U_EUROPEAN_NUMBER_TERMINATOR, + ArabicNumber = U_ARABIC_NUMBER, + CommonNumberSeparator = U_COMMON_NUMBER_SEPARATOR, + BlockSeparator = U_BLOCK_SEPARATOR, + SegmentSeparator = U_SEGMENT_SEPARATOR, + WhiteSpaceNeutral = U_WHITE_SPACE_NEUTRAL, + OtherNeutral = U_OTHER_NEUTRAL, + LeftToRightEmbedding = U_LEFT_TO_RIGHT_EMBEDDING, + LeftToRightOverride = U_LEFT_TO_RIGHT_OVERRIDE, + RightToLeftArabic = U_RIGHT_TO_LEFT_ARABIC, + RightToLeftEmbedding = U_RIGHT_TO_LEFT_EMBEDDING, + RightToLeftOverride = U_RIGHT_TO_LEFT_OVERRIDE, + PopDirectionalFormat = U_POP_DIRECTIONAL_FORMAT, + NonSpacingMark = U_DIR_NON_SPACING_MARK, + BoundaryNeutral = U_BOUNDARY_NEUTRAL +}; + +enum DecompositionType { + DecompositionNone = U_DT_NONE, + DecompositionCanonical = U_DT_CANONICAL, + DecompositionCompat = U_DT_COMPAT, + DecompositionCircle = U_DT_CIRCLE, + DecompositionFinal = U_DT_FINAL, + DecompositionFont = U_DT_FONT, + DecompositionFraction = U_DT_FRACTION, + DecompositionInitial = U_DT_INITIAL, + DecompositionIsolated = U_DT_ISOLATED, + DecompositionMedial = U_DT_MEDIAL, + DecompositionNarrow = U_DT_NARROW, + DecompositionNoBreak = U_DT_NOBREAK, + DecompositionSmall = U_DT_SMALL, + DecompositionSquare = U_DT_SQUARE, + DecompositionSub = U_DT_SUB, + DecompositionSuper = U_DT_SUPER, + DecompositionVertical = U_DT_VERTICAL, + DecompositionWide = U_DT_WIDE, +}; + +enum CharCategory { + NoCategory = 0, + Other_NotAssigned = U_MASK(U_GENERAL_OTHER_TYPES), + Letter_Uppercase = U_MASK(U_UPPERCASE_LETTER), + Letter_Lowercase = U_MASK(U_LOWERCASE_LETTER), + Letter_Titlecase = U_MASK(U_TITLECASE_LETTER), + Letter_Modifier = U_MASK(U_MODIFIER_LETTER), + Letter_Other = U_MASK(U_OTHER_LETTER), + + Mark_NonSpacing = U_MASK(U_NON_SPACING_MARK), + Mark_Enclosing = U_MASK(U_ENCLOSING_MARK), + Mark_SpacingCombining = U_MASK(U_COMBINING_SPACING_MARK), + + Number_DecimalDigit = U_MASK(U_DECIMAL_DIGIT_NUMBER), + Number_Letter = U_MASK(U_LETTER_NUMBER), + Number_Other = U_MASK(U_OTHER_NUMBER), + + Separator_Space = U_MASK(U_SPACE_SEPARATOR), + Separator_Line = U_MASK(U_LINE_SEPARATOR), + Separator_Paragraph = U_MASK(U_PARAGRAPH_SEPARATOR), + + Other_Control = U_MASK(U_CONTROL_CHAR), + Other_Format = U_MASK(U_FORMAT_CHAR), + Other_PrivateUse = U_MASK(U_PRIVATE_USE_CHAR), + Other_Surrogate = U_MASK(U_SURROGATE), + + Punctuation_Dash = U_MASK(U_DASH_PUNCTUATION), + Punctuation_Open = U_MASK(U_START_PUNCTUATION), + Punctuation_Close = U_MASK(U_END_PUNCTUATION), + Punctuation_Connector = U_MASK(U_CONNECTOR_PUNCTUATION), + Punctuation_Other = U_MASK(U_OTHER_PUNCTUATION), + + Symbol_Math = U_MASK(U_MATH_SYMBOL), + Symbol_Currency = U_MASK(U_CURRENCY_SYMBOL), + Symbol_Modifier = U_MASK(U_MODIFIER_SYMBOL), + Symbol_Other = U_MASK(U_OTHER_SYMBOL), + + Punctuation_InitialQuote = U_MASK(U_INITIAL_PUNCTUATION), + Punctuation_FinalQuote = U_MASK(U_FINAL_PUNCTUATION) +}; + +inline UChar32 foldCase(UChar32 c) +{ + return u_foldCase(c, U_FOLD_CASE_DEFAULT); +} + +inline int foldCase(UChar* result, int resultLength, const UChar* src, int srcLength, bool* error) +{ + UErrorCode status = U_ZERO_ERROR; + int realLength = u_strFoldCase(result, resultLength, src, srcLength, U_FOLD_CASE_DEFAULT, &status); + *error = !U_SUCCESS(status); + return realLength; +} + +inline int toLower(UChar* result, int resultLength, const UChar* src, int srcLength, bool* error) +{ + UErrorCode status = U_ZERO_ERROR; + int realLength = u_strToLower(result, resultLength, src, srcLength, "", &status); + *error = !!U_FAILURE(status); + return realLength; +} + +inline UChar32 toLower(UChar32 c) +{ + return u_tolower(c); +} + +inline UChar32 toUpper(UChar32 c) +{ + return u_toupper(c); +} + +inline int toUpper(UChar* result, int resultLength, const UChar* src, int srcLength, bool* error) +{ + UErrorCode status = U_ZERO_ERROR; + int realLength = u_strToUpper(result, resultLength, src, srcLength, "", &status); + *error = !!U_FAILURE(status); + return realLength; +} + +inline UChar32 toTitleCase(UChar32 c) +{ + return u_totitle(c); +} + +inline bool isArabicChar(UChar32 c) +{ + return ublock_getCode(c) == UBLOCK_ARABIC; +} + +inline bool isSeparatorSpace(UChar32 c) +{ + return u_charType(c) == U_SPACE_SEPARATOR; +} + +inline bool isPrintableChar(UChar32 c) +{ + return !!u_isprint(c); +} + +inline bool isPunct(UChar32 c) +{ + return !!u_ispunct(c); +} + +inline UChar32 mirroredChar(UChar32 c) +{ + return u_charMirror(c); +} + +inline CharCategory category(UChar32 c) +{ + return static_cast<CharCategory>(U_GET_GC_MASK(c)); +} + +inline Direction direction(UChar32 c) +{ + return static_cast<Direction>(u_charDirection(c)); +} + +inline bool isLower(UChar32 c) +{ + return !!u_islower(c); +} + +inline uint8_t combiningClass(UChar32 c) +{ + return u_getCombiningClass(c); +} + +inline DecompositionType decompositionType(UChar32 c) +{ + return static_cast<DecompositionType>(u_getIntPropertyValue(c, UCHAR_DECOMPOSITION_TYPE)); +} + +inline int umemcasecmp(const UChar* a, const UChar* b, int len) +{ + return u_memcasecmp(a, b, len, U_FOLD_CASE_DEFAULT); +} + +} } + +#endif // WTF_UNICODE_ICU_H diff --git a/src/3rdparty/webkit/JavaScriptCore/wtf/unicode/qt4/UnicodeQt4.h b/src/3rdparty/webkit/JavaScriptCore/wtf/unicode/qt4/UnicodeQt4.h new file mode 100644 index 0000000..d7d78ce --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/wtf/unicode/qt4/UnicodeQt4.h @@ -0,0 +1,526 @@ +/* + * Copyright (C) 2006 George Staikos <staikos@kde.org> + * Copyright (C) 2006 Alexey Proskuryakov <ap@nypop.com> + * Copyright (C) 2006, 2007, 2008, 2009 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_UNICODE_QT4_H +#define WTF_UNICODE_QT4_H + +#include <QChar> +#include <QString> + +#include <config.h> + +#include <stdint.h> + +#if QT_VERSION >= 0x040300 +QT_BEGIN_NAMESPACE +namespace QUnicodeTables { + struct Properties { + ushort category : 8; + ushort line_break_class : 8; + ushort direction : 8; + ushort combiningClass :8; + ushort joining : 2; + signed short digitValue : 6; /* 5 needed */ + ushort unicodeVersion : 4; + ushort lowerCaseSpecial : 1; + ushort upperCaseSpecial : 1; + ushort titleCaseSpecial : 1; + ushort caseFoldSpecial : 1; /* currently unused */ + signed short mirrorDiff : 16; + signed short lowerCaseDiff : 16; + signed short upperCaseDiff : 16; + signed short titleCaseDiff : 16; + signed short caseFoldDiff : 16; + }; + Q_CORE_EXPORT const Properties * QT_FASTCALL properties(uint ucs4); + Q_CORE_EXPORT const Properties * QT_FASTCALL properties(ushort ucs2); +} +QT_END_NAMESPACE +#endif + +// ugly hack to make UChar compatible with JSChar in API/JSStringRef.h +#if defined(Q_OS_WIN) +typedef wchar_t UChar; +#else +typedef uint16_t UChar; +#endif +typedef uint32_t UChar32; + +// some defines from ICU + +#define U16_IS_LEAD(c) (((c)&0xfffffc00)==0xd800) +#define U16_IS_TRAIL(c) (((c)&0xfffffc00)==0xdc00) +#define U16_SURROGATE_OFFSET ((0xd800<<10UL)+0xdc00-0x10000) +#define U16_GET_SUPPLEMENTARY(lead, trail) \ + (((UChar32)(lead)<<10UL)+(UChar32)(trail)-U16_SURROGATE_OFFSET) + +#define U16_LEAD(supplementary) (UChar)(((supplementary)>>10)+0xd7c0) +#define U16_TRAIL(supplementary) (UChar)(((supplementary)&0x3ff)|0xdc00) + +#define U_IS_SURROGATE(c) (((c)&0xfffff800)==0xd800) +#define U16_IS_SINGLE(c) !U_IS_SURROGATE(c) +#define U16_IS_SURROGATE(c) U_IS_SURROGATE(c) +#define U16_IS_SURROGATE_LEAD(c) (((c)&0x400)==0) + +#define U16_NEXT(s, i, length, c) { \ + (c)=(s)[(i)++]; \ + if(U16_IS_LEAD(c)) { \ + uint16_t __c2; \ + if((i)<(length) && U16_IS_TRAIL(__c2=(s)[(i)])) { \ + ++(i); \ + (c)=U16_GET_SUPPLEMENTARY((c), __c2); \ + } \ + } \ +} + +#define U_MASK(x) ((uint32_t)1<<(x)) + +namespace WTF { +namespace Unicode { + +enum Direction { + LeftToRight = QChar::DirL, + RightToLeft = QChar::DirR, + EuropeanNumber = QChar::DirEN, + EuropeanNumberSeparator = QChar::DirES, + EuropeanNumberTerminator = QChar::DirET, + ArabicNumber = QChar::DirAN, + CommonNumberSeparator = QChar::DirCS, + BlockSeparator = QChar::DirB, + SegmentSeparator = QChar::DirS, + WhiteSpaceNeutral = QChar::DirWS, + OtherNeutral = QChar::DirON, + LeftToRightEmbedding = QChar::DirLRE, + LeftToRightOverride = QChar::DirLRO, + RightToLeftArabic = QChar::DirAL, + RightToLeftEmbedding = QChar::DirRLE, + RightToLeftOverride = QChar::DirRLO, + PopDirectionalFormat = QChar::DirPDF, + NonSpacingMark = QChar::DirNSM, + BoundaryNeutral = QChar::DirBN +}; + +enum DecompositionType { + DecompositionNone = QChar::NoDecomposition, + DecompositionCanonical = QChar::Canonical, + DecompositionCompat = QChar::Compat, + DecompositionCircle = QChar::Circle, + DecompositionFinal = QChar::Final, + DecompositionFont = QChar::Font, + DecompositionFraction = QChar::Fraction, + DecompositionInitial = QChar::Initial, + DecompositionIsolated = QChar::Isolated, + DecompositionMedial = QChar::Medial, + DecompositionNarrow = QChar::Narrow, + DecompositionNoBreak = QChar::NoBreak, + DecompositionSmall = QChar::Small, + DecompositionSquare = QChar::Square, + DecompositionSub = QChar::Sub, + DecompositionSuper = QChar::Super, + DecompositionVertical = QChar::Vertical, + DecompositionWide = QChar::Wide +}; + +enum CharCategory { + NoCategory = 0, + Mark_NonSpacing = U_MASK(QChar::Mark_NonSpacing), + Mark_SpacingCombining = U_MASK(QChar::Mark_SpacingCombining), + Mark_Enclosing = U_MASK(QChar::Mark_Enclosing), + Number_DecimalDigit = U_MASK(QChar::Number_DecimalDigit), + Number_Letter = U_MASK(QChar::Number_Letter), + Number_Other = U_MASK(QChar::Number_Other), + Separator_Space = U_MASK(QChar::Separator_Space), + Separator_Line = U_MASK(QChar::Separator_Line), + Separator_Paragraph = U_MASK(QChar::Separator_Paragraph), + Other_Control = U_MASK(QChar::Other_Control), + Other_Format = U_MASK(QChar::Other_Format), + Other_Surrogate = U_MASK(QChar::Other_Surrogate), + Other_PrivateUse = U_MASK(QChar::Other_PrivateUse), + Other_NotAssigned = U_MASK(QChar::Other_NotAssigned), + Letter_Uppercase = U_MASK(QChar::Letter_Uppercase), + Letter_Lowercase = U_MASK(QChar::Letter_Lowercase), + Letter_Titlecase = U_MASK(QChar::Letter_Titlecase), + Letter_Modifier = U_MASK(QChar::Letter_Modifier), + Letter_Other = U_MASK(QChar::Letter_Other), + Punctuation_Connector = U_MASK(QChar::Punctuation_Connector), + Punctuation_Dash = U_MASK(QChar::Punctuation_Dash), + Punctuation_Open = U_MASK(QChar::Punctuation_Open), + Punctuation_Close = U_MASK(QChar::Punctuation_Close), + Punctuation_InitialQuote = U_MASK(QChar::Punctuation_InitialQuote), + Punctuation_FinalQuote = U_MASK(QChar::Punctuation_FinalQuote), + Punctuation_Other = U_MASK(QChar::Punctuation_Other), + Symbol_Math = U_MASK(QChar::Symbol_Math), + Symbol_Currency = U_MASK(QChar::Symbol_Currency), + Symbol_Modifier = U_MASK(QChar::Symbol_Modifier), + Symbol_Other = U_MASK(QChar::Symbol_Other) +}; + + +#if QT_VERSION >= 0x040300 + +// FIXME: handle surrogates correctly in all methods + +inline UChar32 toLower(UChar32 ch) +{ + return QChar::toLower(ch); +} + +inline int toLower(UChar* result, int resultLength, const UChar* src, int srcLength, bool* error) +{ + const UChar *e = src + srcLength; + const UChar *s = src; + UChar *r = result; + uint rindex = 0; + + // this avoids one out of bounds check in the loop + if (s < e && QChar(*s).isLowSurrogate()) { + if (r) + r[rindex] = *s++; + ++rindex; + } + + int needed = 0; + while (s < e && (rindex < uint(resultLength) || !r)) { + uint c = *s; + if (QChar(c).isLowSurrogate() && QChar(*(s - 1)).isHighSurrogate()) + c = QChar::surrogateToUcs4(*(s - 1), c); + const QUnicodeTables::Properties *prop = QUnicodeTables::properties(c); + if (prop->lowerCaseSpecial) { + QString qstring; + if (c < 0x10000) { + qstring += QChar(c); + } else { + qstring += QChar(*(s-1)); + qstring += QChar(*s); + } + qstring = qstring.toLower(); + for (int i = 0; i < qstring.length(); ++i) { + if (rindex >= uint(resultLength)) { + needed += qstring.length() - i; + break; + } + if (r) + r[rindex] = qstring.at(i).unicode(); + ++rindex; + } + } else { + if (r) + r[rindex] = *s + prop->lowerCaseDiff; + ++rindex; + } + ++s; + } + if (s < e) + needed += e - s; + *error = (needed != 0); + if (rindex < uint(resultLength)) + r[rindex] = 0; + return rindex + needed; +} + +inline UChar32 toUpper(UChar32 ch) +{ + return QChar::toUpper(ch); +} + +inline int toUpper(UChar* result, int resultLength, const UChar* src, int srcLength, bool* error) +{ + const UChar *e = src + srcLength; + const UChar *s = src; + UChar *r = result; + int rindex = 0; + + // this avoids one out of bounds check in the loop + if (s < e && QChar(*s).isLowSurrogate()) { + if (r) + r[rindex] = *s++; + ++rindex; + } + + int needed = 0; + while (s < e && (rindex < resultLength || !r)) { + uint c = *s; + if (QChar(c).isLowSurrogate() && QChar(*(s - 1)).isHighSurrogate()) + c = QChar::surrogateToUcs4(*(s - 1), c); + const QUnicodeTables::Properties *prop = QUnicodeTables::properties(c); + if (prop->upperCaseSpecial) { + QString qstring; + if (c < 0x10000) { + qstring += QChar(c); + } else { + qstring += QChar(*(s-1)); + qstring += QChar(*s); + } + qstring = qstring.toUpper(); + for (int i = 0; i < qstring.length(); ++i) { + if (rindex >= resultLength) { + needed += qstring.length() - i; + break; + } + if (r) + r[rindex] = qstring.at(i).unicode(); + ++rindex; + } + } else { + if (r) + r[rindex] = *s + prop->upperCaseDiff; + ++rindex; + } + ++s; + } + if (s < e) + needed += e - s; + *error = (needed != 0); + if (rindex < resultLength) + r[rindex] = 0; + return rindex + needed; +} + +inline int toTitleCase(UChar32 c) +{ + return QChar::toTitleCase(c); +} + +inline UChar32 foldCase(UChar32 c) +{ + return QChar::toCaseFolded(c); +} + +inline int foldCase(UChar* result, int resultLength, const UChar* src, int srcLength, bool* error) +{ + // FIXME: handle special casing. Easiest with some low level API in Qt + *error = false; + if (resultLength < srcLength) { + *error = true; + return srcLength; + } + for (int i = 0; i < srcLength; ++i) + result[i] = QChar::toCaseFolded(ushort(src[i])); + return srcLength; +} + +inline bool isArabicChar(UChar32 c) +{ + return c >= 0x0600 && c <= 0x06FF; +} + +inline bool isPrintableChar(UChar32 c) +{ + const uint test = U_MASK(QChar::Other_Control) | + U_MASK(QChar::Other_NotAssigned); + return !(U_MASK(QChar::category(c)) & test); +} + +inline bool isSeparatorSpace(UChar32 c) +{ + return QChar::category(c) == QChar::Separator_Space; +} + +inline bool isPunct(UChar32 c) +{ + const uint test = U_MASK(QChar::Punctuation_Connector) | + U_MASK(QChar::Punctuation_Dash) | + U_MASK(QChar::Punctuation_Open) | + U_MASK(QChar::Punctuation_Close) | + U_MASK(QChar::Punctuation_InitialQuote) | + U_MASK(QChar::Punctuation_FinalQuote) | + U_MASK(QChar::Punctuation_Other); + return U_MASK(QChar::category(c)) & test; +} + +inline bool isLower(UChar32 c) +{ + return QChar::category(c) == QChar::Letter_Lowercase; +} + +inline UChar32 mirroredChar(UChar32 c) +{ + return QChar::mirroredChar(c); +} + +inline uint8_t combiningClass(UChar32 c) +{ + return QChar::combiningClass(c); +} + +inline DecompositionType decompositionType(UChar32 c) +{ + return (DecompositionType)QChar::decompositionTag(c); +} + +inline int umemcasecmp(const UChar* a, const UChar* b, int len) +{ + // handle surrogates correctly + for (int i = 0; i < len; ++i) { + uint c1 = QChar::toCaseFolded(ushort(a[i])); + uint c2 = QChar::toCaseFolded(ushort(b[i])); + if (c1 != c2) + return c1 - c2; + } + return 0; +} + +inline Direction direction(UChar32 c) +{ + return (Direction)QChar::direction(c); +} + +inline CharCategory category(UChar32 c) +{ + return (CharCategory) U_MASK(QChar::category(c)); +} + +#else + +inline UChar32 toLower(UChar32 ch) +{ + if (ch > 0xffff) + return ch; + return QChar((unsigned short)ch).toLower().unicode(); +} + +inline int toLower(UChar* result, int resultLength, const UChar* src, int srcLength, bool* error) +{ + *error = false; + if (resultLength < srcLength) { + *error = true; + return srcLength; + } + for (int i = 0; i < srcLength; ++i) + result[i] = QChar(src[i]).toLower().unicode(); + return srcLength; +} + +inline UChar32 toUpper(UChar32 ch) +{ + if (ch > 0xffff) + return ch; + return QChar((unsigned short)ch).toUpper().unicode(); +} + +inline int toUpper(UChar* result, int resultLength, const UChar* src, int srcLength, bool* error) +{ + *error = false; + if (resultLength < srcLength) { + *error = true; + return srcLength; + } + for (int i = 0; i < srcLength; ++i) + result[i] = QChar(src[i]).toUpper().unicode(); + return srcLength; +} + +inline int toTitleCase(UChar32 c) +{ + if (c > 0xffff) + return c; + return QChar((unsigned short)c).toUpper().unicode(); +} + +inline UChar32 foldCase(UChar32 c) +{ + if (c > 0xffff) + return c; + return QChar((unsigned short)c).toLower().unicode(); +} + +inline int foldCase(UChar* result, int resultLength, const UChar* src, int srcLength, bool* error) +{ + return toLower(result, resultLength, src, srcLength, error); +} + +inline bool isPrintableChar(UChar32 c) +{ + return (c & 0xffff0000) == 0 && QChar((unsigned short)c).isPrint(); +} + +inline bool isArabicChar(UChar32 c) +{ + return c >= 0x0600 && c <= 0x06FF; +} + +inline bool isSeparatorSpace(UChar32 c) +{ + return (c & 0xffff0000) == 0 && QChar((unsigned short)c).category() == QChar::Separator_Space; +} + +inline bool isPunct(UChar32 c) +{ + return (c & 0xffff0000) == 0 && QChar((unsigned short)c).isPunct(); +} + +inline bool isLower(UChar32 c) +{ + return (c & 0xffff0000) == 0 && QChar((unsigned short)c).category() == QChar::Letter_Lowercase; +} + +inline UChar32 mirroredChar(UChar32 c) +{ + if (c > 0xffff) + return c; + return QChar(c).mirroredChar().unicode(); +} + +inline uint8_t combiningClass(UChar32 c) +{ + if (c > 0xffff) + return 0; + return QChar((unsigned short)c).combiningClass(); +} + +inline DecompositionType decompositionType(UChar32 c) +{ + if (c > 0xffff) + return DecompositionNone; + return (DecompositionType)QChar(c).decompositionTag(); +} + +inline int umemcasecmp(const UChar* a, const UChar* b, int len) +{ + for (int i = 0; i < len; ++i) { + QChar c1 = QChar(a[i]).toLower(); + QChar c2 = QChar(b[i]).toLower(); + if (c1 != c2) + return c1.unicode() - c2.unicode(); + } + return 0; +} + +inline Direction direction(UChar32 c) +{ + if (c > 0xffff) + return LeftToRight; + return (Direction)QChar(c).direction(); +} + +inline CharCategory category(UChar32 c) +{ + if (c > 0xffff) + return NoCategory; + return (CharCategory) U_MASK(QChar(c).category()); +} + +#endif + +} } + +#endif // WTF_UNICODE_QT4_H |