summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/javascriptcore/JavaScriptCore/jit
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@nokia.com>2009-09-16 07:51:00 (GMT)
committerSimon Hausmann <simon.hausmann@nokia.com>2009-09-16 07:58:16 (GMT)
commit120329adb47dba60f532c1c2fd2ad0f37b812437 (patch)
treefc2311cef4b69fe7294e8f5aab27fe6af2546123 /src/3rdparty/javascriptcore/JavaScriptCore/jit
parentce17ae5a6159d8ce3a5d2cc98f804a2debb860e5 (diff)
downloadQt-120329adb47dba60f532c1c2fd2ad0f37b812437.zip
Qt-120329adb47dba60f532c1c2fd2ad0f37b812437.tar.gz
Qt-120329adb47dba60f532c1c2fd2ad0f37b812437.tar.bz2
Separate the copy of JavaScriptCore that QtScript uses from the copy that
QtWebKit uses. This is needed to decouple QtScript from QtWebKit, as discussed in the WebKit team. Reviewed-by: Kent Hansen
Diffstat (limited to 'src/3rdparty/javascriptcore/JavaScriptCore/jit')
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocator.cpp38
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocator.h249
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp447
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp82
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorWin.cpp60
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.cpp942
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.h728
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JITArithmetic.cpp1378
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JITCall.cpp323
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JITCode.h122
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JITInlineMethods.h475
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JITOpcodes.cpp1187
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JITPropertyAccess.cpp839
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubCall.h170
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.cpp2801
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.h346
16 files changed, 10187 insertions, 0 deletions
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocator.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocator.cpp
new file mode 100644
index 0000000..f6b27ec
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocator.cpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#include "ExecutableAllocator.h"
+
+#if ENABLE(ASSEMBLER)
+
+namespace JSC {
+
+size_t ExecutableAllocator::pageSize = 0;
+
+}
+
+#endif // HAVE(ASSEMBLER)
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocator.h b/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocator.h
new file mode 100644
index 0000000..4ed47e3
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocator.h
@@ -0,0 +1,249 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ExecutableAllocator_h
+#define ExecutableAllocator_h
+
+#include <limits>
+#include <wtf/Assertions.h>
+#include <wtf/PassRefPtr.h>
+#include <wtf/RefCounted.h>
+#include <wtf/UnusedParam.h>
+#include <wtf/Vector.h>
+
+#if PLATFORM(IPHONE)
+#include <libkern/OSCacheControl.h>
+#include <sys/mman.h>
+#endif
+
+#define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize)
+#define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4)
+
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+#define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE)
+#define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC)
+#define INITIAL_PROTECTION_FLAGS PROTECTION_FLAGS_RX
+#else
+#define INITIAL_PROTECTION_FLAGS (PROT_READ | PROT_WRITE | PROT_EXEC)
+#endif
+
+namespace JSC {
+
+inline size_t roundUpAllocationSize(size_t request, size_t granularity)
+{
+ if ((std::numeric_limits<size_t>::max() - granularity) <= request)
+ CRASH(); // Allocation is too large
+
+ // Round up to next page boundary
+ size_t size = request + (granularity - 1);
+ size = size & ~(granularity - 1);
+ ASSERT(size >= request);
+ return size;
+}
+
+}
+
+#if ENABLE(ASSEMBLER)
+
+namespace JSC {
+
+class ExecutablePool : public RefCounted<ExecutablePool> {
+private:
+ struct Allocation {
+ char* pages;
+ size_t size;
+ };
+ typedef Vector<Allocation, 2> AllocationList;
+
+public:
+ static PassRefPtr<ExecutablePool> create(size_t n)
+ {
+ return adoptRef(new ExecutablePool(n));
+ }
+
+ void* alloc(size_t n)
+ {
+ ASSERT(m_freePtr <= m_end);
+
+ // Round 'n' up to a multiple of word size; if all allocations are of
+ // word sized quantities, then all subsequent allocations will be aligned.
+ n = roundUpAllocationSize(n, sizeof(void*));
+
+ if (static_cast<ptrdiff_t>(n) < (m_end - m_freePtr)) {
+ void* result = m_freePtr;
+ m_freePtr += n;
+ return result;
+ }
+
+ // Insufficient space to allocate in the existing pool
+ // so we need allocate into a new pool
+ return poolAllocate(n);
+ }
+
+ ~ExecutablePool()
+ {
+ AllocationList::const_iterator end = m_pools.end();
+ for (AllocationList::const_iterator ptr = m_pools.begin(); ptr != end; ++ptr)
+ ExecutablePool::systemRelease(*ptr);
+ }
+
+ size_t available() const { return (m_pools.size() > 1) ? 0 : m_end - m_freePtr; }
+
+private:
+ static Allocation systemAlloc(size_t n);
+ static void systemRelease(const Allocation& alloc);
+
+ ExecutablePool(size_t n);
+
+ void* poolAllocate(size_t n);
+
+ char* m_freePtr;
+ char* m_end;
+ AllocationList m_pools;
+};
+
+class ExecutableAllocator {
+ enum ProtectionSeting { Writable, Executable };
+
+public:
+ static size_t pageSize;
+ ExecutableAllocator()
+ {
+ if (!pageSize)
+ intializePageSize();
+ m_smallAllocationPool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
+ }
+
+ PassRefPtr<ExecutablePool> poolForSize(size_t n)
+ {
+ // Try to fit in the existing small allocator
+ if (n < m_smallAllocationPool->available())
+ return m_smallAllocationPool;
+
+ // If the request is large, we just provide a unshared allocator
+ if (n > JIT_ALLOCATOR_LARGE_ALLOC_SIZE)
+ return ExecutablePool::create(n);
+
+ // Create a new allocator
+ RefPtr<ExecutablePool> pool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
+
+ // If the new allocator will result in more free space than in
+ // the current small allocator, then we will use it instead
+ if ((pool->available() - n) > m_smallAllocationPool->available())
+ m_smallAllocationPool = pool;
+ return pool.release();
+ }
+
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+ static void makeWritable(void* start, size_t size)
+ {
+ reprotectRegion(start, size, Writable);
+ }
+
+ static void makeExecutable(void* start, size_t size)
+ {
+ reprotectRegion(start, size, Executable);
+ }
+#else
+ static void makeWritable(void*, size_t) {}
+ static void makeExecutable(void*, size_t) {}
+#endif
+
+
+#if PLATFORM(X86) || PLATFORM(X86_64)
+ static void cacheFlush(void*, size_t)
+ {
+ }
+#elif PLATFORM_ARM_ARCH(7) && PLATFORM(IPHONE)
+ static void cacheFlush(void* code, size_t size)
+ {
+ sys_dcache_flush(code, size);
+ sys_icache_invalidate(code, size);
+ }
+#elif PLATFORM(ARM)
+ static void cacheFlush(void* code, size_t size)
+ {
+ #if COMPILER(GCC) && (GCC_VERSION >= 30406)
+ __clear_cache(reinterpret_cast<char*>(code), reinterpret_cast<char*>(code) + size);
+ #else
+ const int syscall = 0xf0002;
+ __asm __volatile (
+ "mov r0, %0\n"
+ "mov r1, %1\n"
+ "mov r7, %2\n"
+ "mov r2, #0x0\n"
+ "swi 0x00000000\n"
+ :
+ : "r" (code), "r" (reinterpret_cast<char*>(code) + size), "r" (syscall)
+ : "r0", "r1", "r7");
+ #endif // COMPILER(GCC) && (GCC_VERSION >= 30406)
+ }
+#endif
+
+private:
+
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+ static void reprotectRegion(void*, size_t, ProtectionSeting);
+#endif
+
+ RefPtr<ExecutablePool> m_smallAllocationPool;
+ static void intializePageSize();
+};
+
+inline ExecutablePool::ExecutablePool(size_t n)
+{
+ size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
+ Allocation mem = systemAlloc(allocSize);
+ m_pools.append(mem);
+ m_freePtr = mem.pages;
+ if (!m_freePtr)
+ CRASH(); // Failed to allocate
+ m_end = m_freePtr + allocSize;
+}
+
+inline void* ExecutablePool::poolAllocate(size_t n)
+{
+ size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
+
+ Allocation result = systemAlloc(allocSize);
+ if (!result.pages)
+ CRASH(); // Failed to allocate
+
+ ASSERT(m_end >= m_freePtr);
+ if ((allocSize - n) > static_cast<size_t>(m_end - m_freePtr)) {
+ // Replace allocation pool
+ m_freePtr = result.pages + n;
+ m_end = result.pages + allocSize;
+ }
+
+ m_pools.append(result);
+ return result.pages;
+}
+
+}
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // !defined(ExecutableAllocator)
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
new file mode 100644
index 0000000..7682b9c
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
@@ -0,0 +1,447 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#include "ExecutableAllocator.h"
+
+#include <errno.h>
+
+#if ENABLE(ASSEMBLER) && PLATFORM(MAC) && PLATFORM(X86_64)
+
+#include "TCSpinLock.h"
+#include <mach/mach_init.h>
+#include <mach/vm_map.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <wtf/AVLTree.h>
+#include <wtf/VMTags.h>
+
+using namespace WTF;
+
+namespace JSC {
+
+#define TWO_GB (2u * 1024u * 1024u * 1024u)
+#define SIXTEEN_MB (16u * 1024u * 1024u)
+
+// FreeListEntry describes a free chunk of memory, stored in the freeList.
+struct FreeListEntry {
+ FreeListEntry(void* pointer, size_t size)
+ : pointer(pointer)
+ , size(size)
+ , nextEntry(0)
+ , less(0)
+ , greater(0)
+ , balanceFactor(0)
+ {
+ }
+
+ // All entries of the same size share a single entry
+ // in the AVLTree, and are linked together in a linked
+ // list, using nextEntry.
+ void* pointer;
+ size_t size;
+ FreeListEntry* nextEntry;
+
+ // These fields are used by AVLTree.
+ FreeListEntry* less;
+ FreeListEntry* greater;
+ int balanceFactor;
+};
+
+// Abstractor class for use in AVLTree.
+// Nodes in the AVLTree are of type FreeListEntry, keyed on
+// (and thus sorted by) their size.
+struct AVLTreeAbstractorForFreeList {
+ typedef FreeListEntry* handle;
+ typedef int32_t size;
+ typedef size_t key;
+
+ handle get_less(handle h) { return h->less; }
+ void set_less(handle h, handle lh) { h->less = lh; }
+ handle get_greater(handle h) { return h->greater; }
+ void set_greater(handle h, handle gh) { h->greater = gh; }
+ int get_balance_factor(handle h) { return h->balanceFactor; }
+ void set_balance_factor(handle h, int bf) { h->balanceFactor = bf; }
+
+ static handle null() { return 0; }
+
+ int compare_key_key(key va, key vb) { return va - vb; }
+ int compare_key_node(key k, handle h) { return compare_key_key(k, h->size); }
+ int compare_node_node(handle h1, handle h2) { return compare_key_key(h1->size, h2->size); }
+};
+
+// Used to reverse sort an array of FreeListEntry pointers.
+static int reverseSortFreeListEntriesByPointer(const void* leftPtr, const void* rightPtr)
+{
+ FreeListEntry* left = *(FreeListEntry**)leftPtr;
+ FreeListEntry* right = *(FreeListEntry**)rightPtr;
+
+ return (intptr_t)(right->pointer) - (intptr_t)(left->pointer);
+}
+
+// Used to reverse sort an array of pointers.
+static int reverseSortCommonSizedAllocations(const void* leftPtr, const void* rightPtr)
+{
+ void* left = *(void**)leftPtr;
+ void* right = *(void**)rightPtr;
+
+ return (intptr_t)right - (intptr_t)left;
+}
+
+class FixedVMPoolAllocator
+{
+ // The free list is stored in a sorted tree.
+ typedef AVLTree<AVLTreeAbstractorForFreeList, 40> SizeSortedFreeTree;
+
+ // Use madvise as apropriate to prevent freed pages from being spilled,
+ // and to attempt to ensure that used memory is reported correctly.
+#if HAVE(MADV_FREE_REUSE)
+ void release(void* position, size_t size)
+ {
+ while (madvise(position, size, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN) { }
+ }
+
+ void reuse(void* position, size_t size)
+ {
+ while (madvise(position, size, MADV_FREE_REUSE) == -1 && errno == EAGAIN) { }
+ }
+#elif HAVE(MADV_DONTNEED)
+ void release(void* position, size_t size)
+ {
+ while (madvise(position, size, MADV_DONTNEED) == -1 && errno == EAGAIN) { }
+ }
+
+ void reuse(void*, size_t) {}
+#else
+ void release(void*, size_t) {}
+ void reuse(void*, size_t) {}
+#endif
+
+ // All addition to the free list should go through this method, rather than
+ // calling insert directly, to avoid multiple entries beging added with the
+ // same key. All nodes being added should be singletons, they should not
+ // already be a part of a chain.
+ void addToFreeList(FreeListEntry* entry)
+ {
+ ASSERT(!entry->nextEntry);
+
+ if (entry->size == m_commonSize) {
+ m_commonSizedAllocations.append(entry->pointer);
+ delete entry;
+ } else if (FreeListEntry* entryInFreeList = m_freeList.search(entry->size, m_freeList.EQUAL)) {
+ // m_freeList already contain an entry for this size - insert this node into the chain.
+ entry->nextEntry = entryInFreeList->nextEntry;
+ entryInFreeList->nextEntry = entry;
+ } else
+ m_freeList.insert(entry);
+ }
+
+ // We do not attempt to coalesce addition, which may lead to fragmentation;
+ // instead we periodically perform a sweep to try to coalesce neigboring
+ // entries in m_freeList. Presently this is triggered at the point 16MB
+ // of memory has been released.
+ void coalesceFreeSpace()
+ {
+ Vector<FreeListEntry*> freeListEntries;
+ SizeSortedFreeTree::Iterator iter;
+ iter.start_iter_least(m_freeList);
+
+ // Empty m_freeList into a Vector.
+ for (FreeListEntry* entry; (entry = *iter); ++iter) {
+ // Each entry in m_freeList might correspond to multiple
+ // free chunks of memory (of the same size). Walk the chain
+ // (this is likely of couse only be one entry long!) adding
+ // each entry to the Vector (at reseting the next in chain
+ // pointer to separate each node out).
+ FreeListEntry* next;
+ do {
+ next = entry->nextEntry;
+ entry->nextEntry = 0;
+ freeListEntries.append(entry);
+ } while ((entry = next));
+ }
+ // All entries are now in the Vector; purge the tree.
+ m_freeList.purge();
+
+ // Reverse-sort the freeListEntries and m_commonSizedAllocations Vectors.
+ // We reverse-sort so that we can logically work forwards through memory,
+ // whilst popping items off the end of the Vectors using last() and removeLast().
+ qsort(freeListEntries.begin(), freeListEntries.size(), sizeof(FreeListEntry*), reverseSortFreeListEntriesByPointer);
+ qsort(m_commonSizedAllocations.begin(), m_commonSizedAllocations.size(), sizeof(void*), reverseSortCommonSizedAllocations);
+
+ // The entries from m_commonSizedAllocations that cannot be
+ // coalesced into larger chunks will be temporarily stored here.
+ Vector<void*> newCommonSizedAllocations;
+
+ // Keep processing so long as entries remain in either of the vectors.
+ while (freeListEntries.size() || m_commonSizedAllocations.size()) {
+ // We're going to try to find a FreeListEntry node that we can coalesce onto.
+ FreeListEntry* coalescionEntry = 0;
+
+ // Is the lowest addressed chunk of free memory of common-size, or is it in the free list?
+ if (m_commonSizedAllocations.size() && (!freeListEntries.size() || (m_commonSizedAllocations.last() < freeListEntries.last()->pointer))) {
+ // Pop an item from the m_commonSizedAllocations vector - this is the lowest
+ // addressed free chunk. Find out the begin and end addresses of the memory chunk.
+ void* begin = m_commonSizedAllocations.last();
+ void* end = (void*)((intptr_t)begin + m_commonSize);
+ m_commonSizedAllocations.removeLast();
+
+ // Try to find another free chunk abutting onto the end of the one we have already found.
+ if (freeListEntries.size() && (freeListEntries.last()->pointer == end)) {
+ // There is an existing FreeListEntry for the next chunk of memory!
+ // we can reuse this. Pop it off the end of m_freeList.
+ coalescionEntry = freeListEntries.last();
+ freeListEntries.removeLast();
+ // Update the existing node to include the common-sized chunk that we also found.
+ coalescionEntry->pointer = (void*)((intptr_t)coalescionEntry->pointer - m_commonSize);
+ coalescionEntry->size += m_commonSize;
+ } else if (m_commonSizedAllocations.size() && (m_commonSizedAllocations.last() == end)) {
+ // There is a second common-sized chunk that can be coalesced.
+ // Allocate a new node.
+ m_commonSizedAllocations.removeLast();
+ coalescionEntry = new FreeListEntry(begin, 2 * m_commonSize);
+ } else {
+ // Nope - this poor little guy is all on his own. :-(
+ // Add him into the newCommonSizedAllocations vector for now, we're
+ // going to end up adding him back into the m_commonSizedAllocations
+ // list when we're done.
+ newCommonSizedAllocations.append(begin);
+ continue;
+ }
+ } else {
+ ASSERT(freeListEntries.size());
+ ASSERT(!m_commonSizedAllocations.size() || (freeListEntries.last()->pointer < m_commonSizedAllocations.last()));
+ // The lowest addressed item is from m_freeList; pop it from the Vector.
+ coalescionEntry = freeListEntries.last();
+ freeListEntries.removeLast();
+ }
+
+ // Right, we have a FreeListEntry, we just need check if there is anything else
+ // to coalesce onto the end.
+ ASSERT(coalescionEntry);
+ while (true) {
+ // Calculate the end address of the chunk we have found so far.
+ void* end = (void*)((intptr_t)coalescionEntry->pointer - coalescionEntry->size);
+
+ // Is there another chunk adjacent to the one we already have?
+ if (freeListEntries.size() && (freeListEntries.last()->pointer == end)) {
+ // Yes - another FreeListEntry -pop it from the list.
+ FreeListEntry* coalescee = freeListEntries.last();
+ freeListEntries.removeLast();
+ // Add it's size onto our existing node.
+ coalescionEntry->size += coalescee->size;
+ delete coalescee;
+ } else if (m_commonSizedAllocations.size() && (m_commonSizedAllocations.last() == end)) {
+ // We can coalesce the next common-sized chunk.
+ m_commonSizedAllocations.removeLast();
+ coalescionEntry->size += m_commonSize;
+ } else
+ break; // Nope, nothing to be added - stop here.
+ }
+
+ // We've coalesced everything we can onto the current chunk.
+ // Add it back into m_freeList.
+ addToFreeList(coalescionEntry);
+ }
+
+ // All chunks of free memory larger than m_commonSize should be
+ // back in m_freeList by now. All that remains to be done is to
+ // copy the contents on the newCommonSizedAllocations back into
+ // the m_commonSizedAllocations Vector.
+ ASSERT(m_commonSizedAllocations.size() == 0);
+ m_commonSizedAllocations.append(newCommonSizedAllocations);
+ }
+
+public:
+
+ FixedVMPoolAllocator(size_t commonSize, size_t totalHeapSize)
+ : m_commonSize(commonSize)
+ , m_countFreedSinceLastCoalesce(0)
+ , m_totalHeapSize(totalHeapSize)
+ {
+ // Cook up an address to allocate at, using the following recipe:
+ // 17 bits of zero, stay in userspace kids.
+ // 26 bits of randomness for ASLR.
+ // 21 bits of zero, at least stay aligned within one level of the pagetables.
+ //
+ // But! - as a temporary workaround for some plugin problems (rdar://problem/6812854),
+ // for now instead of 2^26 bits of ASLR lets stick with 25 bits of randomization plus
+ // 2^24, which should put up somewhere in the middle of usespace (in the address range
+ // 0x200000000000 .. 0x5fffffffffff).
+ intptr_t randomLocation = arc4random() & ((1 << 25) - 1);
+ randomLocation += (1 << 24);
+ randomLocation <<= 21;
+ m_base = mmap(reinterpret_cast<void*>(randomLocation), m_totalHeapSize, INITIAL_PROTECTION_FLAGS, MAP_PRIVATE | MAP_ANON, VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY, 0);
+ if (!m_base)
+ CRASH();
+
+ // For simplicity, we keep all memory in m_freeList in a 'released' state.
+ // This means that we can simply reuse all memory when allocating, without
+ // worrying about it's previous state, and also makes coalescing m_freeList
+ // simpler since we need not worry about the possibility of coalescing released
+ // chunks with non-released ones.
+ release(m_base, m_totalHeapSize);
+ m_freeList.insert(new FreeListEntry(m_base, m_totalHeapSize));
+ }
+
+ void* alloc(size_t size)
+ {
+ void* result;
+
+ // Freed allocations of the common size are not stored back into the main
+ // m_freeList, but are instead stored in a separate vector. If the request
+ // is for a common sized allocation, check this list.
+ if ((size == m_commonSize) && m_commonSizedAllocations.size()) {
+ result = m_commonSizedAllocations.last();
+ m_commonSizedAllocations.removeLast();
+ } else {
+ // Serach m_freeList for a suitable sized chunk to allocate memory from.
+ FreeListEntry* entry = m_freeList.search(size, m_freeList.GREATER_EQUAL);
+
+ // This would be bad news.
+ if (!entry) {
+ // Errk! Lets take a last-ditch desparation attempt at defragmentation...
+ coalesceFreeSpace();
+ // Did that free up a large enough chunk?
+ entry = m_freeList.search(size, m_freeList.GREATER_EQUAL);
+ // No?... *BOOM!*
+ if (!entry)
+ CRASH();
+ }
+ ASSERT(entry->size != m_commonSize);
+
+ // Remove the entry from m_freeList. But! -
+ // Each entry in the tree may represent a chain of multiple chunks of the
+ // same size, and we only want to remove one on them. So, if this entry
+ // does have a chain, just remove the first-but-one item from the chain.
+ if (FreeListEntry* next = entry->nextEntry) {
+ // We're going to leave 'entry' in the tree; remove 'next' from its chain.
+ entry->nextEntry = next->nextEntry;
+ next->nextEntry = 0;
+ entry = next;
+ } else
+ m_freeList.remove(entry->size);
+
+ // Whoo!, we have a result!
+ ASSERT(entry->size >= size);
+ result = entry->pointer;
+
+ // If the allocation exactly fits the chunk we found in the,
+ // m_freeList then the FreeListEntry node is no longer needed.
+ if (entry->size == size)
+ delete entry;
+ else {
+ // There is memory left over, and it is not of the common size.
+ // We can reuse the existing FreeListEntry node to add this back
+ // into m_freeList.
+ entry->pointer = (void*)((intptr_t)entry->pointer + size);
+ entry->size -= size;
+ addToFreeList(entry);
+ }
+ }
+
+ // Call reuse to report to the operating system that this memory is in use.
+ ASSERT(isWithinVMPool(result, size));
+ reuse(result, size);
+ return result;
+ }
+
+ void free(void* pointer, size_t size)
+ {
+ // Call release to report to the operating system that this
+ // memory is no longer in use, and need not be paged out.
+ ASSERT(isWithinVMPool(pointer, size));
+ release(pointer, size);
+
+ // Common-sized allocations are stored in the m_commonSizedAllocations
+ // vector; all other freed chunks are added to m_freeList.
+ if (size == m_commonSize)
+ m_commonSizedAllocations.append(pointer);
+ else
+ addToFreeList(new FreeListEntry(pointer, size));
+
+ // Do some housekeeping. Every time we reach a point that
+ // 16MB of allocations have been freed, sweep m_freeList
+ // coalescing any neighboring fragments.
+ m_countFreedSinceLastCoalesce += size;
+ if (m_countFreedSinceLastCoalesce >= SIXTEEN_MB) {
+ m_countFreedSinceLastCoalesce = 0;
+ coalesceFreeSpace();
+ }
+ }
+
+private:
+
+#ifndef NDEBUG
+ bool isWithinVMPool(void* pointer, size_t size)
+ {
+ return pointer >= m_base && (reinterpret_cast<char*>(pointer) + size <= reinterpret_cast<char*>(m_base) + m_totalHeapSize);
+ }
+#endif
+
+ // Freed space from the most common sized allocations will be held in this list, ...
+ const size_t m_commonSize;
+ Vector<void*> m_commonSizedAllocations;
+
+ // ... and all other freed allocations are held in m_freeList.
+ SizeSortedFreeTree m_freeList;
+
+ // This is used for housekeeping, to trigger defragmentation of the freed lists.
+ size_t m_countFreedSinceLastCoalesce;
+
+ void* m_base;
+ size_t m_totalHeapSize;
+};
+
+void ExecutableAllocator::intializePageSize()
+{
+ ExecutableAllocator::pageSize = getpagesize();
+}
+
+static FixedVMPoolAllocator* allocator = 0;
+static SpinLock spinlock = SPINLOCK_INITIALIZER;
+
+ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t size)
+{
+ SpinLockHolder lock_holder(&spinlock);
+
+ if (!allocator)
+ allocator = new FixedVMPoolAllocator(JIT_ALLOCATOR_LARGE_ALLOC_SIZE, TWO_GB);
+ ExecutablePool::Allocation alloc = {reinterpret_cast<char*>(allocator->alloc(size)), size};
+ return alloc;
+}
+
+void ExecutablePool::systemRelease(const ExecutablePool::Allocation& allocation)
+{
+ SpinLockHolder lock_holder(&spinlock);
+
+ ASSERT(allocator);
+ allocator->free(allocation.pages, allocation.size);
+}
+
+}
+
+#endif // HAVE(ASSEMBLER)
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp
new file mode 100644
index 0000000..4bd5a2c
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#include "ExecutableAllocator.h"
+
+#if ENABLE(ASSEMBLER)
+
+#include <sys/mman.h>
+#include <unistd.h>
+#include <wtf/VMTags.h>
+
+namespace JSC {
+
+#if !(PLATFORM(MAC) && PLATFORM(X86_64))
+
+void ExecutableAllocator::intializePageSize()
+{
+ ExecutableAllocator::pageSize = getpagesize();
+}
+
+ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t n)
+{
+ ExecutablePool::Allocation alloc = { reinterpret_cast<char*>(mmap(NULL, n, INITIAL_PROTECTION_FLAGS, MAP_PRIVATE | MAP_ANON, VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY, 0)), n };
+ return alloc;
+}
+
+void ExecutablePool::systemRelease(const ExecutablePool::Allocation& alloc)
+{
+ int result = munmap(alloc.pages, alloc.size);
+ ASSERT_UNUSED(result, !result);
+}
+
+#endif // !(PLATFORM(MAC) && PLATFORM(X86_64))
+
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSeting setting)
+{
+ if (!pageSize)
+ intializePageSize();
+
+ // Calculate the start of the page containing this region,
+ // and account for this extra memory within size.
+ intptr_t startPtr = reinterpret_cast<intptr_t>(start);
+ intptr_t pageStartPtr = startPtr & ~(pageSize - 1);
+ void* pageStart = reinterpret_cast<void*>(pageStartPtr);
+ size += (startPtr - pageStartPtr);
+
+ // Round size up
+ size += (pageSize - 1);
+ size &= ~(pageSize - 1);
+
+ mprotect(pageStart, size, (setting == Writable) ? PROTECTION_FLAGS_RW : PROTECTION_FLAGS_RX);
+}
+#endif
+
+}
+
+#endif // HAVE(ASSEMBLER)
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorWin.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorWin.cpp
new file mode 100644
index 0000000..a9ba7d0
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorWin.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#include "ExecutableAllocator.h"
+
+#if ENABLE(ASSEMBLER)
+
+#include "windows.h"
+
+namespace JSC {
+
+void ExecutableAllocator::intializePageSize()
+{
+ SYSTEM_INFO system_info;
+ GetSystemInfo(&system_info);
+ ExecutableAllocator::pageSize = system_info.dwPageSize;
+}
+
+ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t n)
+{
+ ExecutablePool::Allocation alloc = {reinterpret_cast<char*>(VirtualAlloc(0, n, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE)), n};
+ return alloc;
+}
+
+void ExecutablePool::systemRelease(const ExecutablePool::Allocation& alloc)
+{
+ VirtualFree(alloc.pages, 0, MEM_RELEASE);
+}
+
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+#error "ASSEMBLER_WX_EXCLUSIVE not yet suported on this platform."
+#endif
+
+}
+
+#endif // HAVE(ASSEMBLER)
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.cpp
new file mode 100644
index 0000000..a0e462b
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.cpp
@@ -0,0 +1,942 @@
+/*
+ * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JIT.h"
+
+// This probably does not belong here; adding here for now as a quick Windows build fix.
+#if ENABLE(ASSEMBLER) && PLATFORM(X86) && !PLATFORM(MAC)
+#include "MacroAssembler.h"
+JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2;
+#endif
+
+#if ENABLE(JIT)
+
+#include "CodeBlock.h"
+#include "Interpreter.h"
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JSArray.h"
+#include "JSFunction.h"
+#include "LinkBuffer.h"
+#include "RepatchBuffer.h"
+#include "ResultType.h"
+#include "SamplingTool.h"
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+using namespace std;
+
+namespace JSC {
+
+void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
+{
+ RepatchBuffer repatchBuffer(codeblock);
+ repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction);
+}
+
+void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
+{
+ RepatchBuffer repatchBuffer(codeblock);
+ repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction);
+}
+
+void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction)
+{
+ RepatchBuffer repatchBuffer(codeblock);
+ repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction);
+}
+
+JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
+ : m_interpreter(globalData->interpreter)
+ , m_globalData(globalData)
+ , m_codeBlock(codeBlock)
+ , m_labels(codeBlock ? codeBlock->instructions().size() : 0)
+ , m_propertyAccessCompilationInfo(codeBlock ? codeBlock->numberOfStructureStubInfos() : 0)
+ , m_callStructureStubCompilationInfo(codeBlock ? codeBlock->numberOfCallLinkInfos() : 0)
+ , m_bytecodeIndex((unsigned)-1)
+ , m_lastResultBytecodeRegister(std::numeric_limits<int>::max())
+ , m_jumpTargetsPosition(0)
+{
+}
+
+void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegisters(src1, regT0, src2, regT1);
+
+ // Jump to a slow case if either operand is a number, or if both are JSCell*s.
+ move(regT0, regT2);
+ orPtr(regT1, regT2);
+ addSlowCase(emitJumpIfJSCell(regT2));
+ addSlowCase(emitJumpIfImmediateNumber(regT2));
+
+ if (type == OpStrictEq)
+ set32(Equal, regT1, regT0, regT0);
+ else
+ set32(NotEqual, regT1, regT0, regT0);
+ emitTagAsBoolImmediate(regT0);
+
+ emitPutVirtualRegister(dst);
+}
+
+void JIT::emitTimeoutCheck()
+{
+ Jump skipTimeout = branchSub32(NonZero, Imm32(1), timeoutCheckRegister);
+ JITStubCall(this, JITStubs::cti_timeout_check).call(timeoutCheckRegister);
+ skipTimeout.link(this);
+
+ killLastResultRegister();
+}
+
+
+#define NEXT_OPCODE(name) \
+ m_bytecodeIndex += OPCODE_LENGTH(name); \
+ break;
+
+#define DEFINE_BINARY_OP(name) \
+ case name: { \
+ JITStubCall stubCall(this, JITStubs::cti_##name); \
+ stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
+ stubCall.addArgument(currentInstruction[3].u.operand, regT2); \
+ stubCall.call(currentInstruction[1].u.operand); \
+ NEXT_OPCODE(name); \
+ }
+
+#define DEFINE_UNARY_OP(name) \
+ case name: { \
+ JITStubCall stubCall(this, JITStubs::cti_##name); \
+ stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
+ stubCall.call(currentInstruction[1].u.operand); \
+ NEXT_OPCODE(name); \
+ }
+
+#define DEFINE_OP(name) \
+ case name: { \
+ emit_##name(currentInstruction); \
+ NEXT_OPCODE(name); \
+ }
+
+#define DEFINE_SLOWCASE_OP(name) \
+ case name: { \
+ emitSlow_##name(currentInstruction, iter); \
+ NEXT_OPCODE(name); \
+ }
+
+void JIT::privateCompileMainPass()
+{
+ Instruction* instructionsBegin = m_codeBlock->instructions().begin();
+ unsigned instructionCount = m_codeBlock->instructions().size();
+
+ m_propertyAccessInstructionIndex = 0;
+ m_globalResolveInfoIndex = 0;
+ m_callLinkInfoIndex = 0;
+
+ for (m_bytecodeIndex = 0; m_bytecodeIndex < instructionCount; ) {
+ Instruction* currentInstruction = instructionsBegin + m_bytecodeIndex;
+ ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeIndex);
+
+#if ENABLE(OPCODE_SAMPLING)
+ if (m_bytecodeIndex > 0) // Avoid the overhead of sampling op_enter twice.
+ sampleInstruction(currentInstruction);
+#endif
+
+ if (m_labels[m_bytecodeIndex].isUsed())
+ killLastResultRegister();
+
+ m_labels[m_bytecodeIndex] = label();
+
+ switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
+ DEFINE_BINARY_OP(op_del_by_val)
+ DEFINE_BINARY_OP(op_div)
+ DEFINE_BINARY_OP(op_in)
+ DEFINE_BINARY_OP(op_less)
+ DEFINE_BINARY_OP(op_lesseq)
+ DEFINE_BINARY_OP(op_urshift)
+ DEFINE_UNARY_OP(op_get_pnames)
+ DEFINE_UNARY_OP(op_is_boolean)
+ DEFINE_UNARY_OP(op_is_function)
+ DEFINE_UNARY_OP(op_is_number)
+ DEFINE_UNARY_OP(op_is_object)
+ DEFINE_UNARY_OP(op_is_string)
+ DEFINE_UNARY_OP(op_is_undefined)
+ DEFINE_UNARY_OP(op_negate)
+ DEFINE_UNARY_OP(op_typeof)
+
+ DEFINE_OP(op_add)
+ DEFINE_OP(op_bitand)
+ DEFINE_OP(op_bitnot)
+ DEFINE_OP(op_bitor)
+ DEFINE_OP(op_bitxor)
+ DEFINE_OP(op_call)
+ DEFINE_OP(op_call_eval)
+ DEFINE_OP(op_call_varargs)
+ DEFINE_OP(op_catch)
+ DEFINE_OP(op_construct)
+ DEFINE_OP(op_construct_verify)
+ DEFINE_OP(op_convert_this)
+ DEFINE_OP(op_init_arguments)
+ DEFINE_OP(op_create_arguments)
+ DEFINE_OP(op_debug)
+ DEFINE_OP(op_del_by_id)
+ DEFINE_OP(op_end)
+ DEFINE_OP(op_enter)
+ DEFINE_OP(op_enter_with_activation)
+ DEFINE_OP(op_eq)
+ DEFINE_OP(op_eq_null)
+ DEFINE_OP(op_get_by_id)
+ DEFINE_OP(op_get_by_val)
+ DEFINE_OP(op_get_global_var)
+ DEFINE_OP(op_get_scoped_var)
+ DEFINE_OP(op_instanceof)
+ DEFINE_OP(op_jeq_null)
+ DEFINE_OP(op_jfalse)
+ DEFINE_OP(op_jmp)
+ DEFINE_OP(op_jmp_scopes)
+ DEFINE_OP(op_jneq_null)
+ DEFINE_OP(op_jneq_ptr)
+ DEFINE_OP(op_jnless)
+ DEFINE_OP(op_jnlesseq)
+ DEFINE_OP(op_jsr)
+ DEFINE_OP(op_jtrue)
+ DEFINE_OP(op_load_varargs)
+ DEFINE_OP(op_loop)
+ DEFINE_OP(op_loop_if_less)
+ DEFINE_OP(op_loop_if_lesseq)
+ DEFINE_OP(op_loop_if_true)
+ DEFINE_OP(op_lshift)
+ DEFINE_OP(op_method_check)
+ DEFINE_OP(op_mod)
+ DEFINE_OP(op_mov)
+ DEFINE_OP(op_mul)
+ DEFINE_OP(op_neq)
+ DEFINE_OP(op_neq_null)
+ DEFINE_OP(op_new_array)
+ DEFINE_OP(op_new_error)
+ DEFINE_OP(op_new_func)
+ DEFINE_OP(op_new_func_exp)
+ DEFINE_OP(op_new_object)
+ DEFINE_OP(op_new_regexp)
+ DEFINE_OP(op_next_pname)
+ DEFINE_OP(op_not)
+ DEFINE_OP(op_nstricteq)
+ DEFINE_OP(op_pop_scope)
+ DEFINE_OP(op_post_dec)
+ DEFINE_OP(op_post_inc)
+ DEFINE_OP(op_pre_dec)
+ DEFINE_OP(op_pre_inc)
+ DEFINE_OP(op_profile_did_call)
+ DEFINE_OP(op_profile_will_call)
+ DEFINE_OP(op_push_new_scope)
+ DEFINE_OP(op_push_scope)
+ DEFINE_OP(op_put_by_id)
+ DEFINE_OP(op_put_by_index)
+ DEFINE_OP(op_put_by_val)
+ DEFINE_OP(op_put_getter)
+ DEFINE_OP(op_put_global_var)
+ DEFINE_OP(op_put_scoped_var)
+ DEFINE_OP(op_put_setter)
+ DEFINE_OP(op_resolve)
+ DEFINE_OP(op_resolve_base)
+ DEFINE_OP(op_resolve_func)
+ DEFINE_OP(op_resolve_global)
+ DEFINE_OP(op_resolve_skip)
+ DEFINE_OP(op_resolve_with_base)
+ DEFINE_OP(op_ret)
+ DEFINE_OP(op_rshift)
+ DEFINE_OP(op_sret)
+ DEFINE_OP(op_strcat)
+ DEFINE_OP(op_stricteq)
+ DEFINE_OP(op_sub)
+ DEFINE_OP(op_switch_char)
+ DEFINE_OP(op_switch_imm)
+ DEFINE_OP(op_switch_string)
+ DEFINE_OP(op_tear_off_activation)
+ DEFINE_OP(op_tear_off_arguments)
+ DEFINE_OP(op_throw)
+ DEFINE_OP(op_to_jsnumber)
+ DEFINE_OP(op_to_primitive)
+
+ case op_get_array_length:
+ case op_get_by_id_chain:
+ case op_get_by_id_generic:
+ case op_get_by_id_proto:
+ case op_get_by_id_proto_list:
+ case op_get_by_id_self:
+ case op_get_by_id_self_list:
+ case op_get_string_length:
+ case op_put_by_id_generic:
+ case op_put_by_id_replace:
+ case op_put_by_id_transition:
+ ASSERT_NOT_REACHED();
+ }
+ }
+
+ ASSERT(m_propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos());
+ ASSERT(m_callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos());
+
+#ifndef NDEBUG
+ // Reset this, in order to guard its use with ASSERTs.
+ m_bytecodeIndex = (unsigned)-1;
+#endif
+}
+
+
+void JIT::privateCompileLinkPass()
+{
+ unsigned jmpTableCount = m_jmpTable.size();
+ for (unsigned i = 0; i < jmpTableCount; ++i)
+ m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeIndex], this);
+ m_jmpTable.clear();
+}
+
+void JIT::privateCompileSlowCases()
+{
+ Instruction* instructionsBegin = m_codeBlock->instructions().begin();
+
+ m_propertyAccessInstructionIndex = 0;
+ m_callLinkInfoIndex = 0;
+
+ for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
+ // FIXME: enable peephole optimizations for slow cases when applicable
+ killLastResultRegister();
+
+ m_bytecodeIndex = iter->to;
+#ifndef NDEBUG
+ unsigned firstTo = m_bytecodeIndex;
+#endif
+ Instruction* currentInstruction = instructionsBegin + m_bytecodeIndex;
+
+ switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
+ DEFINE_SLOWCASE_OP(op_add)
+ DEFINE_SLOWCASE_OP(op_bitand)
+ DEFINE_SLOWCASE_OP(op_bitnot)
+ DEFINE_SLOWCASE_OP(op_bitor)
+ DEFINE_SLOWCASE_OP(op_bitxor)
+ DEFINE_SLOWCASE_OP(op_call)
+ DEFINE_SLOWCASE_OP(op_call_eval)
+ DEFINE_SLOWCASE_OP(op_call_varargs)
+ DEFINE_SLOWCASE_OP(op_construct)
+ DEFINE_SLOWCASE_OP(op_construct_verify)
+ DEFINE_SLOWCASE_OP(op_convert_this)
+ DEFINE_SLOWCASE_OP(op_eq)
+ DEFINE_SLOWCASE_OP(op_get_by_id)
+ DEFINE_SLOWCASE_OP(op_get_by_val)
+ DEFINE_SLOWCASE_OP(op_instanceof)
+ DEFINE_SLOWCASE_OP(op_jfalse)
+ DEFINE_SLOWCASE_OP(op_jnless)
+ DEFINE_SLOWCASE_OP(op_jnlesseq)
+ DEFINE_SLOWCASE_OP(op_jtrue)
+ DEFINE_SLOWCASE_OP(op_loop_if_less)
+ DEFINE_SLOWCASE_OP(op_loop_if_lesseq)
+ DEFINE_SLOWCASE_OP(op_loop_if_true)
+ DEFINE_SLOWCASE_OP(op_lshift)
+ DEFINE_SLOWCASE_OP(op_mod)
+ DEFINE_SLOWCASE_OP(op_mul)
+ DEFINE_SLOWCASE_OP(op_method_check)
+ DEFINE_SLOWCASE_OP(op_neq)
+ DEFINE_SLOWCASE_OP(op_not)
+ DEFINE_SLOWCASE_OP(op_nstricteq)
+ DEFINE_SLOWCASE_OP(op_post_dec)
+ DEFINE_SLOWCASE_OP(op_post_inc)
+ DEFINE_SLOWCASE_OP(op_pre_dec)
+ DEFINE_SLOWCASE_OP(op_pre_inc)
+ DEFINE_SLOWCASE_OP(op_put_by_id)
+ DEFINE_SLOWCASE_OP(op_put_by_val)
+ DEFINE_SLOWCASE_OP(op_rshift)
+ DEFINE_SLOWCASE_OP(op_stricteq)
+ DEFINE_SLOWCASE_OP(op_sub)
+ DEFINE_SLOWCASE_OP(op_to_jsnumber)
+ DEFINE_SLOWCASE_OP(op_to_primitive)
+ default:
+ ASSERT_NOT_REACHED();
+ }
+
+ ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to,"Not enough jumps linked in slow case codegen.");
+ ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
+
+ emitJumpSlowToHot(jump(), 0);
+ }
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ ASSERT(m_propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos());
+#endif
+ ASSERT(m_callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos());
+
+#ifndef NDEBUG
+ // Reset this, in order to guard its use with ASSERTs.
+ m_bytecodeIndex = (unsigned)-1;
+#endif
+}
+
+void JIT::privateCompile()
+{
+ sampleCodeBlock(m_codeBlock);
+#if ENABLE(OPCODE_SAMPLING)
+ sampleInstruction(m_codeBlock->instructions().begin());
+#endif
+
+ // Could use a pop_m, but would need to offset the following instruction if so.
+ preserveReturnAddressAfterCall(regT2);
+ emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC);
+
+ Jump slowRegisterFileCheck;
+ Label afterRegisterFileCheck;
+ if (m_codeBlock->codeType() == FunctionCode) {
+ // In the case of a fast linked call, we do not set this up in the caller.
+ emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock);
+
+ peek(regT0, OBJECT_OFFSETOF(JITStackFrame, registerFile) / sizeof (void*));
+ addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1);
+
+ slowRegisterFileCheck = branchPtr(Above, regT1, Address(regT0, OBJECT_OFFSETOF(RegisterFile, m_end)));
+ afterRegisterFileCheck = label();
+ }
+
+ privateCompileMainPass();
+ privateCompileLinkPass();
+ privateCompileSlowCases();
+
+ if (m_codeBlock->codeType() == FunctionCode) {
+ slowRegisterFileCheck.link(this);
+ m_bytecodeIndex = 0;
+ JITStubCall(this, JITStubs::cti_register_file_check).call();
+#ifndef NDEBUG
+ m_bytecodeIndex = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
+#endif
+ jump(afterRegisterFileCheck);
+ }
+
+ ASSERT(m_jmpTable.isEmpty());
+
+ LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
+
+ // Translate vPC offsets into addresses in JIT generated code, for switch tables.
+ for (unsigned i = 0; i < m_switches.size(); ++i) {
+ SwitchRecord record = m_switches[i];
+ unsigned bytecodeIndex = record.bytecodeIndex;
+
+ if (record.type != SwitchRecord::String) {
+ ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character);
+ ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
+
+ record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeIndex + 3 + record.defaultOffset]);
+
+ for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
+ unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
+ record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeIndex + 3 + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
+ }
+ } else {
+ ASSERT(record.type == SwitchRecord::String);
+
+ record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeIndex + 3 + record.defaultOffset]);
+
+ StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end();
+ for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) {
+ unsigned offset = it->second.branchOffset;
+ it->second.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeIndex + 3 + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
+ }
+ }
+ }
+
+ for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) {
+ HandlerInfo& handler = m_codeBlock->exceptionHandler(i);
+ handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]);
+ }
+
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+
+ if (m_codeBlock->hasExceptionInfo()) {
+ m_codeBlock->callReturnIndexVector().reserveCapacity(m_calls.size());
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter)
+ m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeIndex(patchBuffer.returnAddressOffset(iter->from), iter->bytecodeIndex));
+ }
+
+ // Link absolute addresses for jsr
+ for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter)
+ patchBuffer.patch(iter->storeLocation, patchBuffer.locationOf(iter->target).executableAddress());
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ for (unsigned i = 0; i < m_codeBlock->numberOfStructureStubInfos(); ++i) {
+ StructureStubInfo& info = m_codeBlock->structureStubInfo(i);
+ info.callReturnLocation = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].callReturnLocation);
+ info.hotPathBegin = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].hotPathBegin);
+ }
+#endif
+#if ENABLE(JIT_OPTIMIZE_CALL)
+ for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) {
+ CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
+ info.ownerCodeBlock = m_codeBlock;
+ info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation);
+ info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin);
+ info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther);
+ }
+#endif
+ unsigned methodCallCount = m_methodCallCompilationInfo.size();
+ m_codeBlock->addMethodCallLinkInfos(methodCallCount);
+ for (unsigned i = 0; i < methodCallCount; ++i) {
+ MethodCallLinkInfo& info = m_codeBlock->methodCallLinkInfo(i);
+ info.structureLabel = patchBuffer.locationOf(m_methodCallCompilationInfo[i].structureToCompare);
+ info.callReturnLocation = m_codeBlock->structureStubInfo(m_methodCallCompilationInfo[i].propertyAccessIndex).callReturnLocation;
+ }
+
+ m_codeBlock->setJITCode(patchBuffer.finalizeCode());
+}
+
+void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* globalData, CodePtr* ctiArrayLengthTrampoline, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallPreLink, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk)
+{
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ // (1) The first function provides fast property access for array length
+ Label arrayLengthBegin = align();
+
+ // Check eax is an array
+ Jump array_failureCases1 = emitJumpIfNotJSCell(regT0);
+ Jump array_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
+
+ // Checks out okay! - get the length from the storage
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT0);
+ load32(Address(regT0, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT0);
+
+ Jump array_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImmediateInt));
+
+ // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+
+ ret();
+
+ // (2) The second function provides fast property access for string length
+ Label stringLengthBegin = align();
+
+ // Check eax is a string
+ Jump string_failureCases1 = emitJumpIfNotJSCell(regT0);
+ Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
+
+ // Checks out okay! - get the length from the Ustring.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSString, m_value) + OBJECT_OFFSETOF(UString, m_rep)), regT0);
+ load32(Address(regT0, OBJECT_OFFSETOF(UString::Rep, len)), regT0);
+
+ Jump string_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImmediateInt));
+
+ // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+
+ ret();
+#endif
+
+ // (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
+ COMPILE_ASSERT(sizeof(CodeType) == 4, CodeTypeEnumMustBe32Bit);
+
+ Label virtualCallPreLinkBegin = align();
+
+ // Load the callee CodeBlock* into eax
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3);
+ loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT0);
+ Jump hasCodeBlock1 = branchTestPtr(NonZero, regT0);
+ preserveReturnAddressAfterCall(regT3);
+ restoreArgumentReference();
+ Call callJSFunction1 = call();
+ emitGetJITStubArg(1, regT2);
+ emitGetJITStubArg(3, regT1);
+ restoreReturnAddressBeforeReturn(regT3);
+ hasCodeBlock1.link(this);
+
+ Jump isNativeFunc1 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode));
+
+ // Check argCount matches callee arity.
+ Jump arityCheckOkay1 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1);
+ preserveReturnAddressAfterCall(regT3);
+ emitPutJITStubArg(regT3, 2);
+ emitPutJITStubArg(regT0, 4);
+ restoreArgumentReference();
+ Call callArityCheck1 = call();
+ move(regT1, callFrameRegister);
+ emitGetJITStubArg(1, regT2);
+ emitGetJITStubArg(3, regT1);
+ restoreReturnAddressBeforeReturn(regT3);
+ arityCheckOkay1.link(this);
+ isNativeFunc1.link(this);
+
+ compileOpCallInitializeCallFrame();
+
+ preserveReturnAddressAfterCall(regT3);
+ emitPutJITStubArg(regT3, 2);
+ restoreArgumentReference();
+ Call callDontLazyLinkCall = call();
+ emitGetJITStubArg(1, regT2);
+ restoreReturnAddressBeforeReturn(regT3);
+
+ jump(regT0);
+
+ Label virtualCallLinkBegin = align();
+
+ // Load the callee CodeBlock* into eax
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3);
+ loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT0);
+ Jump hasCodeBlock2 = branchTestPtr(NonZero, regT0);
+ preserveReturnAddressAfterCall(regT3);
+ restoreArgumentReference();
+ Call callJSFunction2 = call();
+ emitGetJITStubArg(1, regT2);
+ emitGetJITStubArg(3, regT1);
+ restoreReturnAddressBeforeReturn(regT3);
+ hasCodeBlock2.link(this);
+
+ Jump isNativeFunc2 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode));
+
+ // Check argCount matches callee arity.
+ Jump arityCheckOkay2 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1);
+ preserveReturnAddressAfterCall(regT3);
+ emitPutJITStubArg(regT3, 2);
+ emitPutJITStubArg(regT0, 4);
+ restoreArgumentReference();
+ Call callArityCheck2 = call();
+ move(regT1, callFrameRegister);
+ emitGetJITStubArg(1, regT2);
+ emitGetJITStubArg(3, regT1);
+ restoreReturnAddressBeforeReturn(regT3);
+ arityCheckOkay2.link(this);
+ isNativeFunc2.link(this);
+
+ compileOpCallInitializeCallFrame();
+
+ preserveReturnAddressAfterCall(regT3);
+ emitPutJITStubArg(regT3, 2);
+ restoreArgumentReference();
+ Call callLazyLinkCall = call();
+ restoreReturnAddressBeforeReturn(regT3);
+
+ jump(regT0);
+
+ Label virtualCallBegin = align();
+
+ // Load the callee CodeBlock* into eax
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3);
+ loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT0);
+ Jump hasCodeBlock3 = branchTestPtr(NonZero, regT0);
+ preserveReturnAddressAfterCall(regT3);
+ restoreArgumentReference();
+ Call callJSFunction3 = call();
+ emitGetJITStubArg(1, regT2);
+ emitGetJITStubArg(3, regT1);
+ restoreReturnAddressBeforeReturn(regT3);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3); // reload the function body nody, so we can reload the code pointer.
+ hasCodeBlock3.link(this);
+
+ Jump isNativeFunc3 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode));
+
+ // Check argCount matches callee arity.
+ Jump arityCheckOkay3 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1);
+ preserveReturnAddressAfterCall(regT3);
+ emitPutJITStubArg(regT3, 2);
+ emitPutJITStubArg(regT0, 4);
+ restoreArgumentReference();
+ Call callArityCheck3 = call();
+ move(regT1, callFrameRegister);
+ emitGetJITStubArg(1, regT2);
+ emitGetJITStubArg(3, regT1);
+ restoreReturnAddressBeforeReturn(regT3);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3); // reload the function body nody, so we can reload the code pointer.
+ arityCheckOkay3.link(this);
+ isNativeFunc3.link(this);
+
+ // load ctiCode from the new codeBlock.
+ loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_jitCode)), regT0);
+
+ compileOpCallInitializeCallFrame();
+ jump(regT0);
+
+
+ Label nativeCallThunk = align();
+ preserveReturnAddressAfterCall(regT0);
+ emitPutToCallFrameHeader(regT0, RegisterFile::ReturnPC); // Push return address
+
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT1);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT1);
+ emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+
+#if PLATFORM(X86_64)
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, X86::ecx);
+
+ // Allocate stack space for our arglist
+ subPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
+ COMPILE_ASSERT((sizeof(ArgList) & 0xf) == 0, ArgList_should_by_16byte_aligned);
+
+ // Set up arguments
+ subPtr(Imm32(1), X86::ecx); // Don't include 'this' in argcount
+
+ // Push argcount
+ storePtr(X86::ecx, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_argCount)));
+
+ // Calculate the start of the callframe header, and store in edx
+ addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int32_t)sizeof(Register)), callFrameRegister, X86::edx);
+
+ // Calculate start of arguments as callframe header - sizeof(Register) * argcount (ecx)
+ mul32(Imm32(sizeof(Register)), X86::ecx, X86::ecx);
+ subPtr(X86::ecx, X86::edx);
+
+ // push pointer to arguments
+ storePtr(X86::edx, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_args)));
+
+ // ArgList is passed by reference so is stackPointerRegister
+ move(stackPointerRegister, X86::ecx);
+
+ // edx currently points to the first argument, edx-sizeof(Register) points to 'this'
+ loadPtr(Address(X86::edx, -(int32_t)sizeof(Register)), X86::edx);
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::esi);
+
+ move(callFrameRegister, X86::edi);
+
+ call(Address(X86::esi, OBJECT_OFFSETOF(JSFunction, m_data)));
+
+ addPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
+#elif PLATFORM(X86)
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
+
+ /* We have two structs that we use to describe the stackframe we set up for our
+ * call to native code. NativeCallFrameStructure describes the how we set up the stack
+ * in advance of the call. NativeFunctionCalleeSignature describes the callframe
+ * as the native code expects it. We do this as we are using the fastcall calling
+ * convention which results in the callee popping its arguments off the stack, but
+ * not the rest of the callframe so we need a nice way to ensure we increment the
+ * stack pointer by the right amount after the call.
+ */
+#if COMPILER(MSVC) || PLATFORM(LINUX)
+ struct NativeCallFrameStructure {
+ // CallFrame* callFrame; // passed in EDX
+ JSObject* callee;
+ JSValue thisValue;
+ ArgList* argPointer;
+ ArgList args;
+ JSValue result;
+ };
+ struct NativeFunctionCalleeSignature {
+ JSObject* callee;
+ JSValue thisValue;
+ ArgList* argPointer;
+ };
+#else
+ struct NativeCallFrameStructure {
+ // CallFrame* callFrame; // passed in ECX
+ // JSObject* callee; // passed in EDX
+ JSValue thisValue;
+ ArgList* argPointer;
+ ArgList args;
+ };
+ struct NativeFunctionCalleeSignature {
+ JSValue thisValue;
+ ArgList* argPointer;
+ };
+#endif
+ const int NativeCallFrameSize = (sizeof(NativeCallFrameStructure) + 15) & ~15;
+ // Allocate system stack frame
+ subPtr(Imm32(NativeCallFrameSize), stackPointerRegister);
+
+ // Set up arguments
+ subPtr(Imm32(1), regT0); // Don't include 'this' in argcount
+
+ // push argcount
+ storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_argCount)));
+
+ // Calculate the start of the callframe header, and store in regT1
+ addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int)sizeof(Register)), callFrameRegister, regT1);
+
+ // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT0)
+ mul32(Imm32(sizeof(Register)), regT0, regT0);
+ subPtr(regT0, regT1);
+ storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_args)));
+
+ // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
+ addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, args)), stackPointerRegister, regT0);
+ storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, argPointer)));
+
+ // regT1 currently points to the first argument, regT1 - sizeof(Register) points to 'this'
+ loadPtr(Address(regT1, -(int)sizeof(Register)), regT1);
+ storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue)));
+
+#if COMPILER(MSVC) || PLATFORM(LINUX)
+ // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
+ addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, result)), stackPointerRegister, X86::ecx);
+
+ // Plant callee
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::eax);
+ storePtr(X86::eax, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, callee)));
+
+ // Plant callframe
+ move(callFrameRegister, X86::edx);
+
+ call(Address(X86::eax, OBJECT_OFFSETOF(JSFunction, m_data)));
+
+ // JSValue is a non-POD type
+ loadPtr(Address(X86::eax), X86::eax);
+#else
+ // Plant callee
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::edx);
+
+ // Plant callframe
+ move(callFrameRegister, X86::ecx);
+ call(Address(X86::edx, OBJECT_OFFSETOF(JSFunction, m_data)));
+#endif
+
+ // We've put a few temporaries on the stack in addition to the actual arguments
+ // so pull them off now
+ addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister);
+
+#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
+#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
+#else
+ breakpoint();
+#endif
+
+ // Check for an exception
+ loadPtr(&(globalData->exception), regT2);
+ Jump exceptionHandler = branchTestPtr(NonZero, regT2);
+
+ // Grab the return address.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+
+ // Restore our caller's "r".
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ // Return.
+ restoreReturnAddressBeforeReturn(regT1);
+ ret();
+
+ // Handle an exception
+ exceptionHandler.link(this);
+ // Grab the return address.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+ move(ImmPtr(&globalData->exceptionLocation), regT2);
+ storePtr(regT1, regT2);
+ move(ImmPtr(reinterpret_cast<void*>(ctiVMThrowTrampoline)), regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
+ restoreReturnAddressBeforeReturn(regT2);
+ ret();
+
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ Call array_failureCases1Call = makeTailRecursiveCall(array_failureCases1);
+ Call array_failureCases2Call = makeTailRecursiveCall(array_failureCases2);
+ Call array_failureCases3Call = makeTailRecursiveCall(array_failureCases3);
+ Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1);
+ Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2);
+ Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
+#endif
+
+ // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
+ LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ patchBuffer.link(array_failureCases1Call, FunctionPtr(JITStubs::cti_op_get_by_id_array_fail));
+ patchBuffer.link(array_failureCases2Call, FunctionPtr(JITStubs::cti_op_get_by_id_array_fail));
+ patchBuffer.link(array_failureCases3Call, FunctionPtr(JITStubs::cti_op_get_by_id_array_fail));
+ patchBuffer.link(string_failureCases1Call, FunctionPtr(JITStubs::cti_op_get_by_id_string_fail));
+ patchBuffer.link(string_failureCases2Call, FunctionPtr(JITStubs::cti_op_get_by_id_string_fail));
+ patchBuffer.link(string_failureCases3Call, FunctionPtr(JITStubs::cti_op_get_by_id_string_fail));
+#endif
+ patchBuffer.link(callArityCheck1, FunctionPtr(JITStubs::cti_op_call_arityCheck));
+ patchBuffer.link(callArityCheck2, FunctionPtr(JITStubs::cti_op_call_arityCheck));
+ patchBuffer.link(callArityCheck3, FunctionPtr(JITStubs::cti_op_call_arityCheck));
+ patchBuffer.link(callJSFunction1, FunctionPtr(JITStubs::cti_op_call_JSFunction));
+ patchBuffer.link(callJSFunction2, FunctionPtr(JITStubs::cti_op_call_JSFunction));
+ patchBuffer.link(callJSFunction3, FunctionPtr(JITStubs::cti_op_call_JSFunction));
+ patchBuffer.link(callDontLazyLinkCall, FunctionPtr(JITStubs::cti_vm_dontLazyLinkCall));
+ patchBuffer.link(callLazyLinkCall, FunctionPtr(JITStubs::cti_vm_lazyLinkCall));
+
+ CodeRef finalCode = patchBuffer.finalizeCode();
+ *executablePool = finalCode.m_executablePool;
+
+ *ctiVirtualCallPreLink = trampolineAt(finalCode, virtualCallPreLinkBegin);
+ *ctiVirtualCallLink = trampolineAt(finalCode, virtualCallLinkBegin);
+ *ctiVirtualCall = trampolineAt(finalCode, virtualCallBegin);
+ *ctiNativeCallThunk = trampolineAt(finalCode, nativeCallThunk);
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ *ctiArrayLengthTrampoline = trampolineAt(finalCode, arrayLengthBegin);
+ *ctiStringLengthTrampoline = trampolineAt(finalCode, stringLengthBegin);
+#else
+ UNUSED_PARAM(ctiArrayLengthTrampoline);
+ UNUSED_PARAM(ctiStringLengthTrampoline);
+#endif
+}
+
+void JIT::emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst)
+{
+ loadPtr(Address(variableObject, OBJECT_OFFSETOF(JSVariableObject, d)), dst);
+ loadPtr(Address(dst, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), dst);
+ loadPtr(Address(dst, index * sizeof(Register)), dst);
+}
+
+void JIT::emitPutVariableObjectRegister(RegisterID src, RegisterID variableObject, int index)
+{
+ loadPtr(Address(variableObject, OBJECT_OFFSETOF(JSVariableObject, d)), variableObject);
+ loadPtr(Address(variableObject, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), variableObject);
+ storePtr(src, Address(variableObject, index * sizeof(Register)));
+}
+
+void JIT::unlinkCall(CallLinkInfo* callLinkInfo)
+{
+ // When the JSFunction is deleted the pointer embedded in the instruction stream will no longer be valid
+ // (and, if a new JSFunction happened to be constructed at the same location, we could get a false positive
+ // match). Reset the check so it no longer matches.
+ RepatchBuffer repatchBuffer(callLinkInfo->ownerCodeBlock);
+ repatchBuffer.repatch(callLinkInfo->hotPathBegin, JSValue::encode(JSValue()));
+}
+
+void JIT::linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JITCode& code, CallLinkInfo* callLinkInfo, int callerArgCount, JSGlobalData* globalData)
+{
+ ASSERT(calleeCodeBlock);
+ RepatchBuffer repatchBuffer(callerCodeBlock);
+
+ // Currently we only link calls with the exact number of arguments.
+ // If this is a native call calleeCodeBlock is null so the number of parameters is unimportant
+ if (callerArgCount == calleeCodeBlock->m_numParameters || calleeCodeBlock->codeType() == NativeCode) {
+ ASSERT(!callLinkInfo->isLinked());
+
+ if (calleeCodeBlock)
+ calleeCodeBlock->addCaller(callLinkInfo);
+
+ repatchBuffer.repatch(callLinkInfo->hotPathBegin, callee);
+ repatchBuffer.relink(callLinkInfo->hotPathOther, code.addressForCall());
+ }
+
+ // patch the call so we do not continue to try to link.
+ repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs.ctiVirtualCall());
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.h b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.h
new file mode 100644
index 0000000..ceffe59
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.h
@@ -0,0 +1,728 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JIT_h
+#define JIT_h
+
+#include <wtf/Platform.h>
+
+// OBJECT_OFFSETOF: Like the C++ offsetof macro, but you can use it with classes.
+// The magic number 0x4000 is insignificant. We use it to avoid using NULL, since
+// NULL can cause compiler problems, especially in cases of multiple inheritance.
+#define OBJECT_OFFSETOF(class, field) (reinterpret_cast<ptrdiff_t>(&(reinterpret_cast<class*>(0x4000)->field)) - 0x4000)
+
+#if ENABLE(JIT)
+
+// We've run into some problems where changing the size of the class JIT leads to
+// performance fluctuations. Try forcing alignment in an attempt to stabalize this.
+#if COMPILER(GCC)
+#define JIT_CLASS_ALIGNMENT __attribute__ ((aligned (32)))
+#else
+#define JIT_CLASS_ALIGNMENT
+#endif
+
+#include "CodeBlock.h"
+#include "Interpreter.h"
+#include "JITCode.h"
+#include "JITStubs.h"
+#include "Opcode.h"
+#include "RegisterFile.h"
+#include "MacroAssembler.h"
+#include "Profiler.h"
+#include <bytecode/SamplingTool.h>
+#include <wtf/AlwaysInline.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+ class CodeBlock;
+ class JIT;
+ class JSPropertyNameIterator;
+ class Interpreter;
+ class Register;
+ class RegisterFile;
+ class ScopeChainNode;
+ class SimpleJumpTable;
+ class StringJumpTable;
+ class StructureChain;
+
+ struct CallLinkInfo;
+ struct Instruction;
+ struct OperandTypes;
+ struct PolymorphicAccessStructureList;
+ struct StructureStubInfo;
+
+ struct CallRecord {
+ MacroAssembler::Call from;
+ unsigned bytecodeIndex;
+ void* to;
+
+ CallRecord()
+ {
+ }
+
+ CallRecord(MacroAssembler::Call from, unsigned bytecodeIndex, void* to = 0)
+ : from(from)
+ , bytecodeIndex(bytecodeIndex)
+ , to(to)
+ {
+ }
+ };
+
+ struct JumpTable {
+ MacroAssembler::Jump from;
+ unsigned toBytecodeIndex;
+
+ JumpTable(MacroAssembler::Jump f, unsigned t)
+ : from(f)
+ , toBytecodeIndex(t)
+ {
+ }
+ };
+
+ struct SlowCaseEntry {
+ MacroAssembler::Jump from;
+ unsigned to;
+ unsigned hint;
+
+ SlowCaseEntry(MacroAssembler::Jump f, unsigned t, unsigned h = 0)
+ : from(f)
+ , to(t)
+ , hint(h)
+ {
+ }
+ };
+
+ struct SwitchRecord {
+ enum Type {
+ Immediate,
+ Character,
+ String
+ };
+
+ Type type;
+
+ union {
+ SimpleJumpTable* simpleJumpTable;
+ StringJumpTable* stringJumpTable;
+ } jumpTable;
+
+ unsigned bytecodeIndex;
+ unsigned defaultOffset;
+
+ SwitchRecord(SimpleJumpTable* jumpTable, unsigned bytecodeIndex, unsigned defaultOffset, Type type)
+ : type(type)
+ , bytecodeIndex(bytecodeIndex)
+ , defaultOffset(defaultOffset)
+ {
+ this->jumpTable.simpleJumpTable = jumpTable;
+ }
+
+ SwitchRecord(StringJumpTable* jumpTable, unsigned bytecodeIndex, unsigned defaultOffset)
+ : type(String)
+ , bytecodeIndex(bytecodeIndex)
+ , defaultOffset(defaultOffset)
+ {
+ this->jumpTable.stringJumpTable = jumpTable;
+ }
+ };
+
+ struct PropertyStubCompilationInfo {
+ MacroAssembler::Call callReturnLocation;
+ MacroAssembler::Label hotPathBegin;
+ };
+
+ struct StructureStubCompilationInfo {
+ MacroAssembler::DataLabelPtr hotPathBegin;
+ MacroAssembler::Call hotPathOther;
+ MacroAssembler::Call callReturnLocation;
+ };
+
+ struct MethodCallCompilationInfo {
+ MethodCallCompilationInfo(unsigned propertyAccessIndex)
+ : propertyAccessIndex(propertyAccessIndex)
+ {
+ }
+
+ MacroAssembler::DataLabelPtr structureToCompare;
+ unsigned propertyAccessIndex;
+ };
+
+ // Near calls can only be patched to other JIT code, regular calls can be patched to JIT code or relinked to stub functions.
+ void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction);
+ void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction);
+ void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction);
+
+ class JIT : private MacroAssembler {
+ friend class JITStubCall;
+ friend class CallEvalJITStub;
+
+ using MacroAssembler::Jump;
+ using MacroAssembler::JumpList;
+ using MacroAssembler::Label;
+
+ // NOTES:
+ //
+ // regT0 has two special meanings. The return value from a stub
+ // call will always be in regT0, and by default (unless
+ // a register is specified) emitPutVirtualRegister() will store
+ // the value from regT0.
+ //
+ // regT3 is required to be callee-preserved.
+ //
+ // tempRegister2 is has no such dependencies. It is important that
+ // on x86/x86-64 it is ecx for performance reasons, since the
+ // MacroAssembler will need to plant register swaps if it is not -
+ // however the code will still function correctly.
+#if PLATFORM(X86_64)
+ static const RegisterID returnValueRegister = X86::eax;
+ static const RegisterID cachedResultRegister = X86::eax;
+ static const RegisterID firstArgumentRegister = X86::edi;
+
+ static const RegisterID timeoutCheckRegister = X86::r12;
+ static const RegisterID callFrameRegister = X86::r13;
+ static const RegisterID tagTypeNumberRegister = X86::r14;
+ static const RegisterID tagMaskRegister = X86::r15;
+
+ static const RegisterID regT0 = X86::eax;
+ static const RegisterID regT1 = X86::edx;
+ static const RegisterID regT2 = X86::ecx;
+ static const RegisterID regT3 = X86::ebx;
+
+ static const FPRegisterID fpRegT0 = X86::xmm0;
+ static const FPRegisterID fpRegT1 = X86::xmm1;
+ static const FPRegisterID fpRegT2 = X86::xmm2;
+#elif PLATFORM(X86)
+ static const RegisterID returnValueRegister = X86::eax;
+ static const RegisterID cachedResultRegister = X86::eax;
+ // On x86 we always use fastcall conventions = but on
+ // OS X if might make more sense to just use regparm.
+ static const RegisterID firstArgumentRegister = X86::ecx;
+
+ static const RegisterID timeoutCheckRegister = X86::esi;
+ static const RegisterID callFrameRegister = X86::edi;
+
+ static const RegisterID regT0 = X86::eax;
+ static const RegisterID regT1 = X86::edx;
+ static const RegisterID regT2 = X86::ecx;
+ static const RegisterID regT3 = X86::ebx;
+
+ static const FPRegisterID fpRegT0 = X86::xmm0;
+ static const FPRegisterID fpRegT1 = X86::xmm1;
+ static const FPRegisterID fpRegT2 = X86::xmm2;
+#elif PLATFORM_ARM_ARCH(7)
+ static const RegisterID returnValueRegister = ARM::r0;
+ static const RegisterID cachedResultRegister = ARM::r0;
+ static const RegisterID firstArgumentRegister = ARM::r0;
+
+ static const RegisterID regT0 = ARM::r0;
+ static const RegisterID regT1 = ARM::r1;
+ static const RegisterID regT2 = ARM::r2;
+ static const RegisterID regT3 = ARM::r4;
+
+ static const RegisterID callFrameRegister = ARM::r5;
+ static const RegisterID timeoutCheckRegister = ARM::r6;
+
+ static const FPRegisterID fpRegT0 = ARM::d0;
+ static const FPRegisterID fpRegT1 = ARM::d1;
+ static const FPRegisterID fpRegT2 = ARM::d2;
+#else
+ #error "JIT not supported on this platform."
+#endif
+
+ static const int patchGetByIdDefaultStructure = -1;
+ // Magic number - initial offset cannot be representable as a signed 8bit value, or the X86Assembler
+ // will compress the displacement, and we may not be able to fit a patched offset.
+ static const int patchGetByIdDefaultOffset = 256;
+
+#if PLATFORM(X86_64)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 10;
+ static const int patchOffsetPutByIdExternalLoad = 20;
+ static const int patchLengthPutByIdExternalLoad = 4;
+ static const int patchOffsetPutByIdPropertyMapOffset = 31;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 10;
+ static const int patchOffsetGetByIdBranchToSlowCase = 20;
+ static const int patchOffsetGetByIdExternalLoad = 20;
+ static const int patchLengthGetByIdExternalLoad = 4;
+ static const int patchOffsetGetByIdPropertyMapOffset = 31;
+ static const int patchOffsetGetByIdPutResult = 31;
+#if ENABLE(OPCODE_SAMPLING)
+ static const int patchOffsetGetByIdSlowCaseCall = 66;
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 44;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 9;
+
+ static const int patchOffsetMethodCheckProtoObj = 20;
+ static const int patchOffsetMethodCheckProtoStruct = 30;
+ static const int patchOffsetMethodCheckPutFunction = 50;
+#elif PLATFORM(X86)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 7;
+ static const int patchOffsetPutByIdExternalLoad = 13;
+ static const int patchLengthPutByIdExternalLoad = 3;
+ static const int patchOffsetPutByIdPropertyMapOffset = 22;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 7;
+ static const int patchOffsetGetByIdBranchToSlowCase = 13;
+ static const int patchOffsetGetByIdExternalLoad = 13;
+ static const int patchLengthGetByIdExternalLoad = 3;
+ static const int patchOffsetGetByIdPropertyMapOffset = 22;
+ static const int patchOffsetGetByIdPutResult = 22;
+#if ENABLE(OPCODE_SAMPLING) && USE(JIT_STUB_ARGUMENT_VA_LIST)
+ static const int patchOffsetGetByIdSlowCaseCall = 31;
+#elif ENABLE(OPCODE_SAMPLING)
+ static const int patchOffsetGetByIdSlowCaseCall = 33;
+#elif USE(JIT_STUB_ARGUMENT_VA_LIST)
+ static const int patchOffsetGetByIdSlowCaseCall = 21;
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 23;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 6;
+
+ static const int patchOffsetMethodCheckProtoObj = 11;
+ static const int patchOffsetMethodCheckProtoStruct = 18;
+ static const int patchOffsetMethodCheckPutFunction = 29;
+#elif PLATFORM_ARM_ARCH(7)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 10;
+ static const int patchOffsetPutByIdExternalLoad = 20;
+ static const int patchLengthPutByIdExternalLoad = 12;
+ static const int patchOffsetPutByIdPropertyMapOffset = 40;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 10;
+ static const int patchOffsetGetByIdBranchToSlowCase = 20;
+ static const int patchOffsetGetByIdExternalLoad = 20;
+ static const int patchLengthGetByIdExternalLoad = 12;
+ static const int patchOffsetGetByIdPropertyMapOffset = 40;
+ static const int patchOffsetGetByIdPutResult = 44;
+#if ENABLE(OPCODE_SAMPLING)
+ static const int patchOffsetGetByIdSlowCaseCall = 0; // FIMXE
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 28;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 10;
+
+ static const int patchOffsetMethodCheckProtoObj = 18;
+ static const int patchOffsetMethodCheckProtoStruct = 28;
+ static const int patchOffsetMethodCheckPutFunction = 46;
+#endif
+
+ public:
+ static void compile(JSGlobalData* globalData, CodeBlock* codeBlock)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.privateCompile();
+ }
+
+ static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.privateCompileGetByIdProto(stubInfo, structure, prototypeStructure, cachedOffset, returnAddress, callFrame);
+ }
+
+ static void compileGetByIdSelfList(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.privateCompileGetByIdSelfList(stubInfo, polymorphicStructures, currentIndex, structure, cachedOffset);
+ }
+ static void compileGetByIdProtoList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.privateCompileGetByIdProtoList(stubInfo, prototypeStructureList, currentIndex, structure, prototypeStructure, cachedOffset, callFrame);
+ }
+ static void compileGetByIdChainList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.privateCompileGetByIdChainList(stubInfo, prototypeStructureList, currentIndex, structure, chain, count, cachedOffset, callFrame);
+ }
+
+ static void compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.privateCompileGetByIdChain(stubInfo, structure, chain, count, cachedOffset, returnAddress, callFrame);
+ }
+
+ static void compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.privateCompilePutByIdTransition(stubInfo, oldStructure, newStructure, cachedOffset, chain, returnAddress);
+ }
+
+ static void compileCTIMachineTrampolines(JSGlobalData* globalData, RefPtr<ExecutablePool>* executablePool, CodePtr* ctiArrayLengthTrampoline, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallPreLink, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk)
+ {
+ JIT jit(globalData);
+ jit.privateCompileCTIMachineTrampolines(executablePool, globalData, ctiArrayLengthTrampoline, ctiStringLengthTrampoline, ctiVirtualCallPreLink, ctiVirtualCallLink, ctiVirtualCall, ctiNativeCallThunk);
+ }
+
+ static void patchGetByIdSelf(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress);
+ static void patchPutByIdReplace(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress);
+ static void patchMethodCallProto(CodeBlock* codeblock, MethodCallLinkInfo&, JSFunction*, Structure*, JSObject*);
+
+ static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, ReturnAddressPtr returnAddress)
+ {
+ JIT jit(globalData, codeBlock);
+ return jit.privateCompilePatchGetArrayLength(returnAddress);
+ }
+
+ static void linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JITCode&, CallLinkInfo*, int callerArgCount, JSGlobalData*);
+ static void unlinkCall(CallLinkInfo*);
+
+ private:
+ struct JSRInfo {
+ DataLabelPtr storeLocation;
+ Label target;
+
+ JSRInfo(DataLabelPtr storeLocation, Label targetLocation)
+ : storeLocation(storeLocation)
+ , target(targetLocation)
+ {
+ }
+ };
+
+ JIT(JSGlobalData*, CodeBlock* = 0);
+
+ void privateCompileMainPass();
+ void privateCompileLinkPass();
+ void privateCompileSlowCases();
+ void privateCompile();
+ void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
+ void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, size_t cachedOffset);
+ void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame);
+ void privateCompileGetByIdChainList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame);
+ void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
+ void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, size_t cachedOffset, StructureChain*, ReturnAddressPtr returnAddress);
+
+ void privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* data, CodePtr* ctiArrayLengthTrampoline, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallPreLink, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk);
+ void privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress);
+
+ void addSlowCase(Jump);
+ void addJump(Jump, int);
+ void emitJumpSlowToHot(Jump, int);
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ void compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, unsigned propertyAccessInstructionIndex);
+ void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex, bool isMethodCheck = false);
+#endif
+ void compileOpCall(OpcodeID, Instruction* instruction, unsigned callLinkInfoIndex);
+ void compileOpCallVarargs(Instruction* instruction);
+ void compileOpCallInitializeCallFrame();
+ void compileOpCallSetupArgs(Instruction*);
+ void compileOpCallVarargsSetupArgs(Instruction*);
+ void compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID);
+ void compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter);
+ void compileOpConstructSetupArgs(Instruction*);
+ enum CompileOpStrictEqType { OpStrictEq, OpNStrictEq };
+ void compileOpStrictEq(Instruction* instruction, CompileOpStrictEqType type);
+
+ void compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset);
+ void compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID result, size_t cachedOffset);
+ void compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset);
+
+ // Arithmetic Ops
+
+ void emit_op_add(Instruction*);
+ void emit_op_sub(Instruction*);
+ void emit_op_mul(Instruction*);
+ void emit_op_mod(Instruction*);
+ void emit_op_bitand(Instruction*);
+ void emit_op_lshift(Instruction*);
+ void emit_op_rshift(Instruction*);
+ void emit_op_jnless(Instruction*);
+ void emit_op_jnlesseq(Instruction*);
+ void emit_op_pre_inc(Instruction*);
+ void emit_op_pre_dec(Instruction*);
+ void emit_op_post_inc(Instruction*);
+ void emit_op_post_dec(Instruction*);
+ void emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_bitand(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_lshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_rshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jnless(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jnlesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_pre_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_pre_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_post_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_post_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
+
+ void emit_op_get_by_val(Instruction*);
+ void emit_op_put_by_val(Instruction*);
+ void emit_op_put_by_index(Instruction*);
+ void emit_op_put_getter(Instruction*);
+ void emit_op_put_setter(Instruction*);
+ void emit_op_del_by_id(Instruction*);
+
+ void emit_op_mov(Instruction*);
+ void emit_op_end(Instruction*);
+ void emit_op_jmp(Instruction*);
+ void emit_op_loop(Instruction*);
+ void emit_op_loop_if_less(Instruction*);
+ void emit_op_loop_if_lesseq(Instruction*);
+ void emit_op_new_object(Instruction*);
+ void emit_op_put_by_id(Instruction*);
+ void emit_op_get_by_id(Instruction*);
+ void emit_op_instanceof(Instruction*);
+ void emit_op_new_func(Instruction*);
+ void emit_op_call(Instruction*);
+ void emit_op_call_eval(Instruction*);
+ void emit_op_method_check(Instruction*);
+ void emit_op_load_varargs(Instruction*);
+ void emit_op_call_varargs(Instruction*);
+ void emit_op_construct(Instruction*);
+ void emit_op_get_global_var(Instruction*);
+ void emit_op_put_global_var(Instruction*);
+ void emit_op_get_scoped_var(Instruction*);
+ void emit_op_put_scoped_var(Instruction*);
+ void emit_op_tear_off_activation(Instruction*);
+ void emit_op_tear_off_arguments(Instruction*);
+ void emit_op_ret(Instruction*);
+ void emit_op_new_array(Instruction*);
+ void emit_op_resolve(Instruction*);
+ void emit_op_construct_verify(Instruction*);
+ void emit_op_to_primitive(Instruction*);
+ void emit_op_strcat(Instruction*);
+ void emit_op_resolve_func(Instruction*);
+ void emit_op_loop_if_true(Instruction*);
+ void emit_op_resolve_base(Instruction*);
+ void emit_op_resolve_skip(Instruction*);
+ void emit_op_resolve_global(Instruction*);
+ void emit_op_not(Instruction*);
+ void emit_op_jfalse(Instruction*);
+ void emit_op_jeq_null(Instruction*);
+ void emit_op_jneq_null(Instruction*);
+ void emit_op_jneq_ptr(Instruction*);
+ void emit_op_unexpected_load(Instruction*);
+ void emit_op_jsr(Instruction*);
+ void emit_op_sret(Instruction*);
+ void emit_op_eq(Instruction*);
+ void emit_op_bitnot(Instruction*);
+ void emit_op_resolve_with_base(Instruction*);
+ void emit_op_new_func_exp(Instruction*);
+ void emit_op_jtrue(Instruction*);
+ void emit_op_neq(Instruction*);
+ void emit_op_bitxor(Instruction*);
+ void emit_op_new_regexp(Instruction*);
+ void emit_op_bitor(Instruction*);
+ void emit_op_throw(Instruction*);
+ void emit_op_next_pname(Instruction*);
+ void emit_op_push_scope(Instruction*);
+ void emit_op_pop_scope(Instruction*);
+ void emit_op_stricteq(Instruction*);
+ void emit_op_nstricteq(Instruction*);
+ void emit_op_to_jsnumber(Instruction*);
+ void emit_op_push_new_scope(Instruction*);
+ void emit_op_catch(Instruction*);
+ void emit_op_jmp_scopes(Instruction*);
+ void emit_op_switch_imm(Instruction*);
+ void emit_op_switch_char(Instruction*);
+ void emit_op_switch_string(Instruction*);
+ void emit_op_new_error(Instruction*);
+ void emit_op_debug(Instruction*);
+ void emit_op_eq_null(Instruction*);
+ void emit_op_neq_null(Instruction*);
+ void emit_op_enter(Instruction*);
+ void emit_op_enter_with_activation(Instruction*);
+ void emit_op_init_arguments(Instruction*);
+ void emit_op_create_arguments(Instruction*);
+ void emit_op_convert_this(Instruction*);
+ void emit_op_profile_will_call(Instruction*);
+ void emit_op_profile_did_call(Instruction*);
+
+ void emitSlow_op_convert_this(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_construct_verify(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_to_primitive(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_loop_if_less(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_loop_if_lesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_loop_if_true(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_not(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jfalse(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_bitnot(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jtrue(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_bitxor(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_bitor(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_eq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_neq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_stricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_nstricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_instanceof(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_call(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_call_eval(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_call_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_construct(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_to_jsnumber(Instruction*, Vector<SlowCaseEntry>::iterator&);
+
+#if ENABLE(JIT_OPTIMIZE_ARITHMETIC)
+ void compileBinaryArithOp(OpcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi);
+ void compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi);
+#endif
+
+ void emitGetVirtualRegister(int src, RegisterID dst);
+ void emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2);
+ void emitPutVirtualRegister(unsigned dst, RegisterID from = regT0);
+
+ void emitPutJITStubArg(RegisterID src, unsigned argumentNumber);
+ void emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch);
+ void emitPutJITStubArgConstant(unsigned value, unsigned argumentNumber);
+ void emitPutJITStubArgConstant(void* value, unsigned argumentNumber);
+ void emitGetJITStubArg(unsigned argumentNumber, RegisterID dst);
+
+ void emitInitRegister(unsigned dst);
+
+ void emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry);
+ void emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry);
+ void emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister);
+ void emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister);
+
+ JSValue getConstantOperand(unsigned src);
+ int32_t getConstantOperandImmediateInt(unsigned src);
+ bool isOperandConstantImmediateInt(unsigned src);
+
+ Jump emitJumpIfJSCell(RegisterID);
+ Jump emitJumpIfBothJSCells(RegisterID, RegisterID, RegisterID);
+ void emitJumpSlowCaseIfJSCell(RegisterID);
+ Jump emitJumpIfNotJSCell(RegisterID);
+ void emitJumpSlowCaseIfNotJSCell(RegisterID);
+ void emitJumpSlowCaseIfNotJSCell(RegisterID, int VReg);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ JIT::Jump emitJumpIfImmediateNumber(RegisterID);
+ JIT::Jump emitJumpIfNotImmediateNumber(RegisterID);
+#else
+ JIT::Jump emitJumpIfImmediateNumber(RegisterID reg)
+ {
+ return emitJumpIfImmediateInteger(reg);
+ }
+
+ JIT::Jump emitJumpIfNotImmediateNumber(RegisterID reg)
+ {
+ return emitJumpIfNotImmediateInteger(reg);
+ }
+#endif
+
+ Jump getSlowCase(Vector<SlowCaseEntry>::iterator& iter)
+ {
+ return iter++->from;
+ }
+ void linkSlowCase(Vector<SlowCaseEntry>::iterator& iter)
+ {
+ iter->from.link(this);
+ ++iter;
+ }
+ void linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&, int vReg);
+
+ JIT::Jump emitJumpIfImmediateInteger(RegisterID);
+ JIT::Jump emitJumpIfNotImmediateInteger(RegisterID);
+ JIT::Jump emitJumpIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
+ void emitJumpSlowCaseIfNotImmediateInteger(RegisterID);
+ void emitJumpSlowCaseIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
+
+ Jump checkStructure(RegisterID reg, Structure* structure);
+
+#if !USE(ALTERNATE_JSIMMEDIATE)
+ void emitFastArithDeTagImmediate(RegisterID);
+ Jump emitFastArithDeTagImmediateJumpIfZero(RegisterID);
+#endif
+ void emitFastArithReTagImmediate(RegisterID src, RegisterID dest);
+ void emitFastArithImmToInt(RegisterID);
+ void emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest);
+
+ void emitTagAsBoolImmediate(RegisterID reg);
+
+ void restoreArgumentReference();
+ void restoreArgumentReferenceForTrampoline();
+
+ Call emitNakedCall(CodePtr function = CodePtr());
+ void preserveReturnAddressAfterCall(RegisterID);
+ void restoreReturnAddressBeforeReturn(RegisterID);
+ void restoreReturnAddressBeforeReturn(Address);
+
+ void emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst);
+ void emitPutVariableObjectRegister(RegisterID src, RegisterID variableObject, int index);
+
+ void emitTimeoutCheck();
+#ifndef NDEBUG
+ void printBytecodeOperandTypes(unsigned src1, unsigned src2);
+#endif
+
+ void killLastResultRegister();
+
+
+#if ENABLE(SAMPLING_FLAGS)
+ void setSamplingFlag(int32_t);
+ void clearSamplingFlag(int32_t);
+#endif
+
+#if ENABLE(SAMPLING_COUNTERS)
+ void emitCount(AbstractSamplingCounter&, uint32_t = 1);
+#endif
+
+#if ENABLE(OPCODE_SAMPLING)
+ void sampleInstruction(Instruction*, bool = false);
+#endif
+
+#if ENABLE(CODEBLOCK_SAMPLING)
+ void sampleCodeBlock(CodeBlock*);
+#else
+ void sampleCodeBlock(CodeBlock*) {}
+#endif
+
+ Interpreter* m_interpreter;
+ JSGlobalData* m_globalData;
+ CodeBlock* m_codeBlock;
+
+ Vector<CallRecord> m_calls;
+ Vector<Label> m_labels;
+ Vector<PropertyStubCompilationInfo> m_propertyAccessCompilationInfo;
+ Vector<StructureStubCompilationInfo> m_callStructureStubCompilationInfo;
+ Vector<MethodCallCompilationInfo> m_methodCallCompilationInfo;
+ Vector<JumpTable> m_jmpTable;
+
+ unsigned m_bytecodeIndex;
+ Vector<JSRInfo> m_jsrSites;
+ Vector<SlowCaseEntry> m_slowCases;
+ Vector<SwitchRecord> m_switches;
+
+ int m_lastResultBytecodeRegister;
+ unsigned m_jumpTargetsPosition;
+
+ unsigned m_propertyAccessInstructionIndex;
+ unsigned m_globalResolveInfoIndex;
+ unsigned m_callLinkInfoIndex;
+ } JIT_CLASS_ALIGNMENT;
+
+}
+
+#endif // ENABLE(JIT)
+
+#endif // JIT_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITArithmetic.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITArithmetic.cpp
new file mode 100644
index 0000000..15808e2
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITArithmetic.cpp
@@ -0,0 +1,1378 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JIT.h"
+
+#if ENABLE(JIT)
+
+#include "CodeBlock.h"
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JSArray.h"
+#include "JSFunction.h"
+#include "Interpreter.h"
+#include "ResultType.h"
+#include "SamplingTool.h"
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+
+using namespace std;
+
+namespace JSC {
+
+void JIT::emit_op_lshift(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegisters(op1, regT0, op2, regT2);
+ // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT2);
+ emitFastArithImmToInt(regT0);
+ emitFastArithImmToInt(regT2);
+#if !PLATFORM(X86)
+ // Mask with 0x1f as per ecma-262 11.7.2 step 7.
+ // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
+ and32(Imm32(0x1f), regT2);
+#endif
+ lshift32(regT2, regT0);
+#if !USE(ALTERNATE_JSIMMEDIATE)
+ addSlowCase(branchAdd32(Overflow, regT0, regT0));
+ signExtend32ToPtr(regT0, regT0);
+#endif
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+#if USE(ALTERNATE_JSIMMEDIATE)
+ UNUSED_PARAM(op1);
+ UNUSED_PARAM(op2);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+#else
+ // If we are limited to 32-bit immediates there is a third slow case, which required the operands to have been reloaded.
+ Jump notImm1 = getSlowCase(iter);
+ Jump notImm2 = getSlowCase(iter);
+ linkSlowCase(iter);
+ emitGetVirtualRegisters(op1, regT0, op2, regT2);
+ notImm1.link(this);
+ notImm2.link(this);
+#endif
+ JITStubCall stubCall(this, JITStubs::cti_op_lshift);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT2);
+ stubCall.call(result);
+}
+
+void JIT::emit_op_rshift(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op2)) {
+ // isOperandConstantImmediateInt(op2) => 1 SlowCase
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ // Mask with 0x1f as per ecma-262 11.7.2 step 7.
+#if USE(ALTERNATE_JSIMMEDIATE)
+ rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
+#else
+ rshiftPtr(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
+#endif
+ } else {
+ emitGetVirtualRegisters(op1, regT0, op2, regT2);
+ if (supportsFloatingPointTruncate()) {
+ Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ // supportsFloatingPoint() && USE(ALTERNATE_JSIMMEDIATE) => 3 SlowCases
+ addSlowCase(emitJumpIfNotImmediateNumber(regT0));
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT0);
+ addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
+#else
+ // supportsFloatingPoint() && !USE(ALTERNATE_JSIMMEDIATE) => 5 SlowCases (of which 1 IfNotJSCell)
+ emitJumpSlowCaseIfNotJSCell(regT0, op1);
+ addSlowCase(checkStructure(regT0, m_globalData->numberStructure.get()));
+ loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
+ addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
+ addSlowCase(branchAdd32(Overflow, regT0, regT0));
+#endif
+ lhsIsInt.link(this);
+ emitJumpSlowCaseIfNotImmediateInteger(regT2);
+ } else {
+ // !supportsFloatingPoint() => 2 SlowCases
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT2);
+ }
+ emitFastArithImmToInt(regT2);
+#if !PLATFORM(X86)
+ // Mask with 0x1f as per ecma-262 11.7.2 step 7.
+ // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
+ and32(Imm32(0x1f), regT2);
+#endif
+#if USE(ALTERNATE_JSIMMEDIATE)
+ rshift32(regT2, regT0);
+#else
+ rshiftPtr(regT2, regT0);
+#endif
+ }
+#if USE(ALTERNATE_JSIMMEDIATE)
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+#else
+ orPtr(Imm32(JSImmediate::TagTypeNumber), regT0);
+#endif
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, JITStubs::cti_op_rshift);
+
+ if (isOperandConstantImmediateInt(op2)) {
+ linkSlowCase(iter);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(op2, regT2);
+ } else {
+ if (supportsFloatingPointTruncate()) {
+#if USE(ALTERNATE_JSIMMEDIATE)
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+#else
+ linkSlowCaseIfNotJSCell(iter, op1);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+#endif
+ // We're reloading op1 to regT0 as we can no longer guarantee that
+ // we have not munged the operand. It may have already been shifted
+ // correctly, but it still will not have been tagged.
+ stubCall.addArgument(op1, regT0);
+ stubCall.addArgument(regT2);
+ } else {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT2);
+ }
+ }
+
+ stubCall.call(result);
+}
+
+void JIT::emit_op_jnless(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ // We generate inline code for the following cases in the fast path:
+ // - int immediate to constant int immediate
+ // - constant int immediate to int immediate
+ // - int immediate to int immediate
+
+ if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ int32_t op2imm = getConstantOperandImmediateInt(op2);
+#else
+ int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
+#endif
+ addJump(branch32(GreaterThanOrEqual, regT0, Imm32(op2imm)), target + 3);
+ } else if (isOperandConstantImmediateInt(op1)) {
+ emitGetVirtualRegister(op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ int32_t op1imm = getConstantOperandImmediateInt(op1);
+#else
+ int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
+#endif
+ addJump(branch32(LessThanOrEqual, regT1, Imm32(op1imm)), target + 3);
+ } else {
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+
+ addJump(branch32(GreaterThanOrEqual, regT0, regT1), target + 3);
+ }
+}
+
+void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ // We generate inline code for the following cases in the slow path:
+ // - floating-point number to constant int immediate
+ // - constant int immediate to floating-point number
+ // - floating-point number to floating-point number.
+
+ if (isOperandConstantImmediateInt(op2)) {
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+#if USE(ALTERNATE_JSIMMEDIATE)
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT0);
+#else
+ Jump fail1;
+ if (!m_codeBlock->isKnownNotImmediate(op1))
+ fail1 = emitJumpIfNotJSCell(regT0);
+
+ Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
+ loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
+#endif
+
+ int32_t op2imm = getConstantOperand(op2).getInt32Fast();;
+
+ move(Imm32(op2imm), regT1);
+ convertInt32ToDouble(regT1, fpRegT1);
+
+ emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqual, fpRegT1, fpRegT0), target + 3);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
+
+#if USE(ALTERNATE_JSIMMEDIATE)
+ fail1.link(this);
+#else
+ if (!m_codeBlock->isKnownNotImmediate(op1))
+ fail1.link(this);
+ fail2.link(this);
+#endif
+ }
+
+ JITStubCall stubCall(this, JITStubs::cti_op_jless);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
+
+ } else if (isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+#if USE(ALTERNATE_JSIMMEDIATE)
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT1, fpRegT1);
+#else
+ Jump fail1;
+ if (!m_codeBlock->isKnownNotImmediate(op2))
+ fail1 = emitJumpIfNotJSCell(regT1);
+
+ Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
+ loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
+#endif
+
+ int32_t op1imm = getConstantOperand(op1).getInt32Fast();;
+
+ move(Imm32(op1imm), regT0);
+ convertInt32ToDouble(regT0, fpRegT0);
+
+ emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqual, fpRegT1, fpRegT0), target + 3);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
+
+#if USE(ALTERNATE_JSIMMEDIATE)
+ fail1.link(this);
+#else
+ if (!m_codeBlock->isKnownNotImmediate(op2))
+ fail1.link(this);
+ fail2.link(this);
+#endif
+ }
+
+ JITStubCall stubCall(this, JITStubs::cti_op_jless);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
+
+ } else {
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+#if USE(ALTERNATE_JSIMMEDIATE)
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
+ Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
+ Jump fail3 = emitJumpIfImmediateInteger(regT1);
+ addPtr(tagTypeNumberRegister, regT0);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT0, fpRegT0);
+ movePtrToDouble(regT1, fpRegT1);
+#else
+ Jump fail1;
+ if (!m_codeBlock->isKnownNotImmediate(op1))
+ fail1 = emitJumpIfNotJSCell(regT0);
+
+ Jump fail2;
+ if (!m_codeBlock->isKnownNotImmediate(op2))
+ fail2 = emitJumpIfNotJSCell(regT1);
+
+ Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
+ Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
+ loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
+ loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
+#endif
+
+ emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqual, fpRegT1, fpRegT0), target + 3);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
+
+#if USE(ALTERNATE_JSIMMEDIATE)
+ fail1.link(this);
+ fail2.link(this);
+ fail3.link(this);
+#else
+ if (!m_codeBlock->isKnownNotImmediate(op1))
+ fail1.link(this);
+ if (!m_codeBlock->isKnownNotImmediate(op2))
+ fail2.link(this);
+ fail3.link(this);
+ fail4.link(this);
+#endif
+ }
+
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_jless);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
+ }
+}
+
+void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ // We generate inline code for the following cases in the fast path:
+ // - int immediate to constant int immediate
+ // - constant int immediate to int immediate
+ // - int immediate to int immediate
+
+ if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ int32_t op2imm = getConstantOperandImmediateInt(op2);
+#else
+ int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
+#endif
+ addJump(branch32(GreaterThan, regT0, Imm32(op2imm)), target + 3);
+ } else if (isOperandConstantImmediateInt(op1)) {
+ emitGetVirtualRegister(op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ int32_t op1imm = getConstantOperandImmediateInt(op1);
+#else
+ int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
+#endif
+ addJump(branch32(LessThan, regT1, Imm32(op1imm)), target + 3);
+ } else {
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+
+ addJump(branch32(GreaterThan, regT0, regT1), target + 3);
+ }
+}
+
+void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ // We generate inline code for the following cases in the slow path:
+ // - floating-point number to constant int immediate
+ // - constant int immediate to floating-point number
+ // - floating-point number to floating-point number.
+
+ if (isOperandConstantImmediateInt(op2)) {
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+#if USE(ALTERNATE_JSIMMEDIATE)
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT0);
+#else
+ Jump fail1;
+ if (!m_codeBlock->isKnownNotImmediate(op1))
+ fail1 = emitJumpIfNotJSCell(regT0);
+
+ Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
+ loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
+#endif
+
+ int32_t op2imm = getConstantOperand(op2).getInt32Fast();;
+
+ move(Imm32(op2imm), regT1);
+ convertInt32ToDouble(regT1, fpRegT1);
+
+ emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT1, fpRegT0), target + 3);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
+
+#if USE(ALTERNATE_JSIMMEDIATE)
+ fail1.link(this);
+#else
+ if (!m_codeBlock->isKnownNotImmediate(op1))
+ fail1.link(this);
+ fail2.link(this);
+#endif
+ }
+
+ JITStubCall stubCall(this, JITStubs::cti_op_jlesseq);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
+
+ } else if (isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+#if USE(ALTERNATE_JSIMMEDIATE)
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT1, fpRegT1);
+#else
+ Jump fail1;
+ if (!m_codeBlock->isKnownNotImmediate(op2))
+ fail1 = emitJumpIfNotJSCell(regT1);
+
+ Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
+ loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
+#endif
+
+ int32_t op1imm = getConstantOperand(op1).getInt32Fast();;
+
+ move(Imm32(op1imm), regT0);
+ convertInt32ToDouble(regT0, fpRegT0);
+
+ emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT1, fpRegT0), target + 3);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
+
+#if USE(ALTERNATE_JSIMMEDIATE)
+ fail1.link(this);
+#else
+ if (!m_codeBlock->isKnownNotImmediate(op2))
+ fail1.link(this);
+ fail2.link(this);
+#endif
+ }
+
+ JITStubCall stubCall(this, JITStubs::cti_op_jlesseq);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
+
+ } else {
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+#if USE(ALTERNATE_JSIMMEDIATE)
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
+ Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
+ Jump fail3 = emitJumpIfImmediateInteger(regT1);
+ addPtr(tagTypeNumberRegister, regT0);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT0, fpRegT0);
+ movePtrToDouble(regT1, fpRegT1);
+#else
+ Jump fail1;
+ if (!m_codeBlock->isKnownNotImmediate(op1))
+ fail1 = emitJumpIfNotJSCell(regT0);
+
+ Jump fail2;
+ if (!m_codeBlock->isKnownNotImmediate(op2))
+ fail2 = emitJumpIfNotJSCell(regT1);
+
+ Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
+ Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
+ loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
+ loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
+#endif
+
+ emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT1, fpRegT0), target + 3);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
+
+#if USE(ALTERNATE_JSIMMEDIATE)
+ fail1.link(this);
+ fail2.link(this);
+ fail3.link(this);
+#else
+ if (!m_codeBlock->isKnownNotImmediate(op1))
+ fail1.link(this);
+ if (!m_codeBlock->isKnownNotImmediate(op2))
+ fail2.link(this);
+ fail3.link(this);
+ fail4.link(this);
+#endif
+ }
+
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_jlesseq);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
+ }
+}
+
+void JIT::emit_op_bitand(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op1)) {
+ emitGetVirtualRegister(op2, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ int32_t imm = getConstantOperandImmediateInt(op1);
+ andPtr(Imm32(imm), regT0);
+ if (imm >= 0)
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+#else
+ andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)))), regT0);
+#endif
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ int32_t imm = getConstantOperandImmediateInt(op2);
+ andPtr(Imm32(imm), regT0);
+ if (imm >= 0)
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+#else
+ andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)))), regT0);
+#endif
+ } else {
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ andPtr(regT1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ }
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter);
+ if (isOperandConstantImmediateInt(op1)) {
+ JITStubCall stubCall(this, JITStubs::cti_op_bitand);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(regT0);
+ stubCall.call(result);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ JITStubCall stubCall(this, JITStubs::cti_op_bitand);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
+ } else {
+ JITStubCall stubCall(this, JITStubs::cti_op_bitand);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(regT1);
+ stubCall.call(result);
+ }
+}
+
+void JIT::emit_op_post_inc(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(srcDst, regT0);
+ move(regT0, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ addSlowCase(branchAdd32(Overflow, Imm32(1), regT1));
+ emitFastArithIntToImmNoCheck(regT1, regT1);
+#else
+ addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1));
+ signExtend32ToPtr(regT1, regT1);
+#endif
+ emitPutVirtualRegister(srcDst, regT1);
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_post_inc);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(Imm32(srcDst));
+ stubCall.call(result);
+}
+
+void JIT::emit_op_post_dec(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(srcDst, regT0);
+ move(regT0, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ addSlowCase(branchSub32(Zero, Imm32(1), regT1));
+ emitFastArithIntToImmNoCheck(regT1, regT1);
+#else
+ addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1));
+ signExtend32ToPtr(regT1, regT1);
+#endif
+ emitPutVirtualRegister(srcDst, regT1);
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_post_dec);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(Imm32(srcDst));
+ stubCall.call(result);
+}
+
+void JIT::emit_op_pre_inc(Instruction* currentInstruction)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ emitGetVirtualRegister(srcDst, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+#else
+ addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0));
+ signExtend32ToPtr(regT0, regT0);
+#endif
+ emitPutVirtualRegister(srcDst);
+}
+
+void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ Jump notImm = getSlowCase(iter);
+ linkSlowCase(iter);
+ emitGetVirtualRegister(srcDst, regT0);
+ notImm.link(this);
+ JITStubCall stubCall(this, JITStubs::cti_op_pre_inc);
+ stubCall.addArgument(regT0);
+ stubCall.call(srcDst);
+}
+
+void JIT::emit_op_pre_dec(Instruction* currentInstruction)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ emitGetVirtualRegister(srcDst, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ addSlowCase(branchSub32(Zero, Imm32(1), regT0));
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+#else
+ addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0));
+ signExtend32ToPtr(regT0, regT0);
+#endif
+ emitPutVirtualRegister(srcDst);
+}
+
+void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ Jump notImm = getSlowCase(iter);
+ linkSlowCase(iter);
+ emitGetVirtualRegister(srcDst, regT0);
+ notImm.link(this);
+ JITStubCall stubCall(this, JITStubs::cti_op_pre_dec);
+ stubCall.addArgument(regT0);
+ stubCall.call(srcDst);
+}
+
+/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
+
+#if PLATFORM(X86) || PLATFORM(X86_64)
+
+void JIT::emit_op_mod(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
+ emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
+ emitJumpSlowCaseIfNotImmediateInteger(X86::ecx);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ addSlowCase(branchPtr(Equal, X86::ecx, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))));
+ m_assembler.cdq();
+ m_assembler.idivl_r(X86::ecx);
+#else
+ emitFastArithDeTagImmediate(X86::eax);
+ addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86::ecx));
+ m_assembler.cdq();
+ m_assembler.idivl_r(X86::ecx);
+ signExtend32ToPtr(X86::edx, X86::edx);
+#endif
+ emitFastArithReTagImmediate(X86::edx, X86::eax);
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+
+#if USE(ALTERNATE_JSIMMEDIATE)
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+#else
+ Jump notImm1 = getSlowCase(iter);
+ Jump notImm2 = getSlowCase(iter);
+ linkSlowCase(iter);
+ emitFastArithReTagImmediate(X86::eax, X86::eax);
+ emitFastArithReTagImmediate(X86::ecx, X86::ecx);
+ notImm1.link(this);
+ notImm2.link(this);
+#endif
+ JITStubCall stubCall(this, JITStubs::cti_op_mod);
+ stubCall.addArgument(X86::eax);
+ stubCall.addArgument(X86::ecx);
+ stubCall.call(result);
+}
+
+#else // PLATFORM(X86) || PLATFORM(X86_64)
+
+void JIT::emit_op_mod(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, JITStubs::cti_op_mod);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
+}
+
+void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ ASSERT_NOT_REACHED();
+}
+
+#endif // PLATFORM(X86) || PLATFORM(X86_64)
+
+/* ------------------------------ END: OP_MOD ------------------------------ */
+
+#if !ENABLE(JIT_OPTIMIZE_ARITHMETIC)
+
+/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_ARITHMETIC) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
+
+void JIT::emit_op_add(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, JITStubs::cti_op_add);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
+}
+
+void JIT::emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ ASSERT_NOT_REACHED();
+}
+
+void JIT::emit_op_mul(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, JITStubs::cti_op_mul);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
+}
+
+void JIT::emitSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ ASSERT_NOT_REACHED();
+}
+
+void JIT::emit_op_sub(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, JITStubs::cti_op_sub);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
+}
+
+void JIT::emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ ASSERT_NOT_REACHED();
+}
+
+#elif USE(ALTERNATE_JSIMMEDIATE) // *AND* ENABLE(JIT_OPTIMIZE_ARITHMETIC)
+
+/* ------------------------------ BEGIN: USE(ALTERNATE_JSIMMEDIATE) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
+
+void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsigned op2, OperandTypes)
+{
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+ if (opcodeID == op_add)
+ addSlowCase(branchAdd32(Overflow, regT1, regT0));
+ else if (opcodeID == op_sub)
+ addSlowCase(branchSub32(Overflow, regT1, regT0));
+ else {
+ ASSERT(opcodeID == op_mul);
+ addSlowCase(branchMul32(Overflow, regT1, regT0));
+ addSlowCase(branchTest32(Zero, regT0));
+ }
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+}
+
+void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned result, unsigned op1, unsigned, OperandTypes types)
+{
+ // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
+ COMPILE_ASSERT(((JSImmediate::TagTypeNumber + JSImmediate::DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0);
+
+ Jump notImm1 = getSlowCase(iter);
+ Jump notImm2 = getSlowCase(iter);
+
+ linkSlowCase(iter); // Integer overflow case - we could handle this in JIT code, but this is likely rare.
+ if (opcodeID == op_mul) // op_mul has an extra slow case to handle 0 * negative number.
+ linkSlowCase(iter);
+ emitGetVirtualRegister(op1, regT0);
+
+ Label stubFunctionCall(this);
+ JITStubCall stubCall(this, opcodeID == op_add ? JITStubs::cti_op_add : opcodeID == op_sub ? JITStubs::cti_op_sub : JITStubs::cti_op_mul);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call(result);
+ Jump end = jump();
+
+ // if we get here, eax is not an int32, edx not yet checked.
+ notImm1.link(this);
+ if (!types.first().definitelyIsNumber())
+ emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
+ if (!types.second().definitelyIsNumber())
+ emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT1);
+ Jump op2isDouble = emitJumpIfNotImmediateInteger(regT1);
+ convertInt32ToDouble(regT1, fpRegT2);
+ Jump op2wasInteger = jump();
+
+ // if we get here, eax IS an int32, edx is not.
+ notImm2.link(this);
+ if (!types.second().definitelyIsNumber())
+ emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
+ convertInt32ToDouble(regT0, fpRegT1);
+ op2isDouble.link(this);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT1, fpRegT2);
+ op2wasInteger.link(this);
+
+ if (opcodeID == op_add)
+ addDouble(fpRegT2, fpRegT1);
+ else if (opcodeID == op_sub)
+ subDouble(fpRegT2, fpRegT1);
+ else {
+ ASSERT(opcodeID == op_mul);
+ mulDouble(fpRegT2, fpRegT1);
+ }
+ moveDoubleToPtr(fpRegT1, regT0);
+ subPtr(tagTypeNumberRegister, regT0);
+ emitPutVirtualRegister(result, regT0);
+
+ end.link(this);
+}
+
+void JIT::emit_op_add(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
+ JITStubCall stubCall(this, JITStubs::cti_op_add);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
+ return;
+ }
+
+ if (isOperandConstantImmediateInt(op1)) {
+ emitGetVirtualRegister(op2, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1)), regT0));
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2)), regT0));
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+ } else
+ compileBinaryArithOp(op_add, result, op1, op2, types);
+
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op1) || isOperandConstantImmediateInt(op2)) {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_add);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
+ } else
+ compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
+}
+
+void JIT::emit_op_mul(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ // For now, only plant a fast int case if the constant operand is greater than zero.
+ int32_t value;
+ if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
+ emitGetVirtualRegister(op2, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
+ emitFastArithReTagImmediate(regT0, regT0);
+ } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
+ emitFastArithReTagImmediate(regT0, regT0);
+ } else
+ compileBinaryArithOp(op_mul, result, op1, op2, types);
+
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if ((isOperandConstantImmediateInt(op1) && (getConstantOperandImmediateInt(op1) > 0))
+ || (isOperandConstantImmediateInt(op2) && (getConstantOperandImmediateInt(op2) > 0))) {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
+ JITStubCall stubCall(this, JITStubs::cti_op_mul);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
+ } else
+ compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, types);
+}
+
+void JIT::emit_op_sub(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ compileBinaryArithOp(op_sub, result, op1, op2, types);
+
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ compileBinaryArithOpSlowCase(op_sub, iter, result, op1, op2, types);
+}
+
+#else // !ENABLE(JIT_OPTIMIZE_ARITHMETIC)
+
+/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_ARITHMETIC) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
+
+void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
+{
+ Structure* numberStructure = m_globalData->numberStructure.get();
+ Jump wasJSNumberCell1;
+ Jump wasJSNumberCell2;
+
+ emitGetVirtualRegisters(src1, regT0, src2, regT1);
+
+ if (types.second().isReusable() && supportsFloatingPoint()) {
+ ASSERT(types.second().mightBeNumber());
+
+ // Check op2 is a number
+ Jump op2imm = emitJumpIfImmediateInteger(regT1);
+ if (!types.second().definitelyIsNumber()) {
+ emitJumpSlowCaseIfNotJSCell(regT1, src2);
+ addSlowCase(checkStructure(regT1, numberStructure));
+ }
+
+ // (1) In this case src2 is a reusable number cell.
+ // Slow case if src1 is not a number type.
+ Jump op1imm = emitJumpIfImmediateInteger(regT0);
+ if (!types.first().definitelyIsNumber()) {
+ emitJumpSlowCaseIfNotJSCell(regT0, src1);
+ addSlowCase(checkStructure(regT0, numberStructure));
+ }
+
+ // (1a) if we get here, src1 is also a number cell
+ loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
+ Jump loadedDouble = jump();
+ // (1b) if we get here, src1 is an immediate
+ op1imm.link(this);
+ emitFastArithImmToInt(regT0);
+ convertInt32ToDouble(regT0, fpRegT0);
+ // (1c)
+ loadedDouble.link(this);
+ if (opcodeID == op_add)
+ addDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
+ else if (opcodeID == op_sub)
+ subDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
+ else {
+ ASSERT(opcodeID == op_mul);
+ mulDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
+ }
+
+ // Store the result to the JSNumberCell and jump.
+ storeDouble(fpRegT0, Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)));
+ move(regT1, regT0);
+ emitPutVirtualRegister(dst);
+ wasJSNumberCell2 = jump();
+
+ // (2) This handles cases where src2 is an immediate number.
+ // Two slow cases - either src1 isn't an immediate, or the subtract overflows.
+ op2imm.link(this);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ } else if (types.first().isReusable() && supportsFloatingPoint()) {
+ ASSERT(types.first().mightBeNumber());
+
+ // Check op1 is a number
+ Jump op1imm = emitJumpIfImmediateInteger(regT0);
+ if (!types.first().definitelyIsNumber()) {
+ emitJumpSlowCaseIfNotJSCell(regT0, src1);
+ addSlowCase(checkStructure(regT0, numberStructure));
+ }
+
+ // (1) In this case src1 is a reusable number cell.
+ // Slow case if src2 is not a number type.
+ Jump op2imm = emitJumpIfImmediateInteger(regT1);
+ if (!types.second().definitelyIsNumber()) {
+ emitJumpSlowCaseIfNotJSCell(regT1, src2);
+ addSlowCase(checkStructure(regT1, numberStructure));
+ }
+
+ // (1a) if we get here, src2 is also a number cell
+ loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
+ Jump loadedDouble = jump();
+ // (1b) if we get here, src2 is an immediate
+ op2imm.link(this);
+ emitFastArithImmToInt(regT1);
+ convertInt32ToDouble(regT1, fpRegT1);
+ // (1c)
+ loadedDouble.link(this);
+ loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
+ if (opcodeID == op_add)
+ addDouble(fpRegT1, fpRegT0);
+ else if (opcodeID == op_sub)
+ subDouble(fpRegT1, fpRegT0);
+ else {
+ ASSERT(opcodeID == op_mul);
+ mulDouble(fpRegT1, fpRegT0);
+ }
+ storeDouble(fpRegT0, Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)));
+ emitPutVirtualRegister(dst);
+
+ // Store the result to the JSNumberCell and jump.
+ storeDouble(fpRegT0, Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)));
+ emitPutVirtualRegister(dst);
+ wasJSNumberCell1 = jump();
+
+ // (2) This handles cases where src1 is an immediate number.
+ // Two slow cases - either src2 isn't an immediate, or the subtract overflows.
+ op1imm.link(this);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+ } else
+ emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+
+ if (opcodeID == op_add) {
+ emitFastArithDeTagImmediate(regT0);
+ addSlowCase(branchAdd32(Overflow, regT1, regT0));
+ } else if (opcodeID == op_sub) {
+ addSlowCase(branchSub32(Overflow, regT1, regT0));
+ signExtend32ToPtr(regT0, regT0);
+ emitFastArithReTagImmediate(regT0, regT0);
+ } else {
+ ASSERT(opcodeID == op_mul);
+ // convert eax & edx from JSImmediates to ints, and check if either are zero
+ emitFastArithImmToInt(regT1);
+ Jump op1Zero = emitFastArithDeTagImmediateJumpIfZero(regT0);
+ Jump op2NonZero = branchTest32(NonZero, regT1);
+ op1Zero.link(this);
+ // if either input is zero, add the two together, and check if the result is < 0.
+ // If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate.
+ move(regT0, regT2);
+ addSlowCase(branchAdd32(Signed, regT1, regT2));
+ // Skip the above check if neither input is zero
+ op2NonZero.link(this);
+ addSlowCase(branchMul32(Overflow, regT1, regT0));
+ signExtend32ToPtr(regT0, regT0);
+ emitFastArithReTagImmediate(regT0, regT0);
+ }
+ emitPutVirtualRegister(dst);
+
+ if (types.second().isReusable() && supportsFloatingPoint())
+ wasJSNumberCell2.link(this);
+ else if (types.first().isReusable() && supportsFloatingPoint())
+ wasJSNumberCell1.link(this);
+}
+
+void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
+{
+ linkSlowCase(iter);
+ if (types.second().isReusable() && supportsFloatingPoint()) {
+ if (!types.first().definitelyIsNumber()) {
+ linkSlowCaseIfNotJSCell(iter, src1);
+ linkSlowCase(iter);
+ }
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCaseIfNotJSCell(iter, src2);
+ linkSlowCase(iter);
+ }
+ } else if (types.first().isReusable() && supportsFloatingPoint()) {
+ if (!types.first().definitelyIsNumber()) {
+ linkSlowCaseIfNotJSCell(iter, src1);
+ linkSlowCase(iter);
+ }
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCaseIfNotJSCell(iter, src2);
+ linkSlowCase(iter);
+ }
+ }
+ linkSlowCase(iter);
+
+ // additional entry point to handle -0 cases.
+ if (opcodeID == op_mul)
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, opcodeID == op_add ? JITStubs::cti_op_add : opcodeID == op_sub ? JITStubs::cti_op_sub : JITStubs::cti_op_mul);
+ stubCall.addArgument(src1, regT2);
+ stubCall.addArgument(src2, regT2);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_add(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op1)) {
+ emitGetVirtualRegister(op2, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), regT0));
+ signExtend32ToPtr(regT0, regT0);
+ emitPutVirtualRegister(result);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), regT0));
+ signExtend32ToPtr(regT0, regT0);
+ emitPutVirtualRegister(result);
+ } else {
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+ if (types.first().mightBeNumber() && types.second().mightBeNumber())
+ compileBinaryArithOp(op_add, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
+ else {
+ JITStubCall stubCall(this, JITStubs::cti_op_add);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
+ }
+ }
+}
+
+void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op1)) {
+ Jump notImm = getSlowCase(iter);
+ linkSlowCase(iter);
+ sub32(Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), regT0);
+ notImm.link(this);
+ JITStubCall stubCall(this, JITStubs::cti_op_add);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(regT0);
+ stubCall.call(result);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ Jump notImm = getSlowCase(iter);
+ linkSlowCase(iter);
+ sub32(Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), regT0);
+ notImm.link(this);
+ JITStubCall stubCall(this, JITStubs::cti_op_add);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
+ } else {
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+ ASSERT(types.first().mightBeNumber() && types.second().mightBeNumber());
+ compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types);
+ }
+}
+
+void JIT::emit_op_mul(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ // For now, only plant a fast int case if the constant operand is greater than zero.
+ int32_t value;
+ if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
+ emitGetVirtualRegister(op2, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitFastArithDeTagImmediate(regT0);
+ addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
+ signExtend32ToPtr(regT0, regT0);
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(result);
+ } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitFastArithDeTagImmediate(regT0);
+ addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
+ signExtend32ToPtr(regT0, regT0);
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(result);
+ } else
+ compileBinaryArithOp(op_mul, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
+}
+
+void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if ((isOperandConstantImmediateInt(op1) && (getConstantOperandImmediateInt(op1) > 0))
+ || (isOperandConstantImmediateInt(op2) && (getConstantOperandImmediateInt(op2) > 0))) {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
+ JITStubCall stubCall(this, JITStubs::cti_op_mul);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
+ } else
+ compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
+}
+
+void JIT::emit_op_sub(Instruction* currentInstruction)
+{
+ compileBinaryArithOp(op_sub, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
+}
+
+void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileBinaryArithOpSlowCase(op_sub, iter, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
+}
+
+#endif // !ENABLE(JIT_OPTIMIZE_ARITHMETIC)
+
+/* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITCall.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITCall.cpp
new file mode 100644
index 0000000..abdb5ed
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITCall.cpp
@@ -0,0 +1,323 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JIT.h"
+
+#if ENABLE(JIT)
+
+#include "CodeBlock.h"
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JSArray.h"
+#include "JSFunction.h"
+#include "Interpreter.h"
+#include "ResultType.h"
+#include "SamplingTool.h"
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+using namespace std;
+
+namespace JSC {
+
+void JIT::compileOpCallInitializeCallFrame()
+{
+ store32(regT1, Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))));
+
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain
+
+ storePtr(ImmPtr(JSValue::encode(JSValue())), Address(callFrameRegister, RegisterFile::OptionalCalleeArguments * static_cast<int>(sizeof(Register))));
+ storePtr(regT2, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register))));
+ storePtr(regT1, Address(callFrameRegister, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register))));
+}
+
+void JIT::compileOpCallSetupArgs(Instruction* instruction)
+{
+ int argCount = instruction[3].u.operand;
+ int registerOffset = instruction[4].u.operand;
+
+ // ecx holds func
+ emitPutJITStubArg(regT2, 1);
+ emitPutJITStubArgConstant(argCount, 3);
+ emitPutJITStubArgConstant(registerOffset, 2);
+}
+
+void JIT::compileOpCallVarargsSetupArgs(Instruction* instruction)
+{
+ int registerOffset = instruction[4].u.operand;
+
+ // ecx holds func
+ emitPutJITStubArg(regT2, 1);
+ emitPutJITStubArg(regT1, 3);
+ addPtr(Imm32(registerOffset), regT1, regT0);
+ emitPutJITStubArg(regT0, 2);
+}
+
+void JIT::compileOpConstructSetupArgs(Instruction* instruction)
+{
+ int argCount = instruction[3].u.operand;
+ int registerOffset = instruction[4].u.operand;
+ int proto = instruction[5].u.operand;
+ int thisRegister = instruction[6].u.operand;
+
+ // ecx holds func
+ emitPutJITStubArg(regT2, 1);
+ emitPutJITStubArgConstant(registerOffset, 2);
+ emitPutJITStubArgConstant(argCount, 3);
+ emitPutJITStubArgFromVirtualRegister(proto, 4, regT0);
+ emitPutJITStubArgConstant(thisRegister, 5);
+}
+
+void JIT::compileOpCallVarargs(Instruction* instruction)
+{
+ int dst = instruction[1].u.operand;
+ int callee = instruction[2].u.operand;
+ int argCountRegister = instruction[3].u.operand;
+
+ emitGetVirtualRegister(argCountRegister, regT1);
+ emitGetVirtualRegister(callee, regT2);
+ compileOpCallVarargsSetupArgs(instruction);
+
+ // Check for JSFunctions.
+ emitJumpSlowCaseIfNotJSCell(regT2);
+ addSlowCase(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsFunctionVPtr)));
+
+ // Speculatively roll the callframe, assuming argCount will match the arity.
+ mul32(Imm32(sizeof(Register)), regT0, regT0);
+ intptr_t offset = (intptr_t)sizeof(Register) * (intptr_t)RegisterFile::CallerFrame;
+ addPtr(Imm32((int32_t)offset), regT0, regT3);
+ addPtr(callFrameRegister, regT3);
+ storePtr(callFrameRegister, regT3);
+ addPtr(regT0, callFrameRegister);
+ emitNakedCall(m_globalData->jitStubs.ctiVirtualCall());
+
+ // Put the return value in dst. In the interpreter, op_ret does this.
+ emitPutVirtualRegister(dst);
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = instruction[1].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_call_NotJSFunction);
+ stubCall.call(dst); // In the interpreter, the callee puts the return value in dst.
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+#if !ENABLE(JIT_OPTIMIZE_CALL)
+
+/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
+
+void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
+{
+ int dst = instruction[1].u.operand;
+ int callee = instruction[2].u.operand;
+ int argCount = instruction[3].u.operand;
+ int registerOffset = instruction[4].u.operand;
+
+ // Handle eval
+ Jump wasEval;
+ if (opcodeID == op_call_eval) {
+ CallEvalJITStub(this, instruction).call();
+ wasEval = branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(JSValue())));
+ }
+
+ emitGetVirtualRegister(callee, regT2);
+ // The arguments have been set up on the hot path for op_call_eval
+ if (opcodeID == op_call)
+ compileOpCallSetupArgs(instruction);
+ else if (opcodeID == op_construct)
+ compileOpConstructSetupArgs(instruction);
+
+ // Check for JSFunctions.
+ emitJumpSlowCaseIfNotJSCell(regT2);
+ addSlowCase(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsFunctionVPtr)));
+
+ // First, in the case of a construct, allocate the new object.
+ if (opcodeID == op_construct) {
+ JITStubCall(this, JITStubs::cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
+ emitGetVirtualRegister(callee, regT2);
+ }
+
+ // Speculatively roll the callframe, assuming argCount will match the arity.
+ storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
+ addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
+ move(Imm32(argCount), regT1);
+
+ emitNakedCall(m_globalData->jitStubs.ctiVirtualCall());
+
+ if (opcodeID == op_call_eval)
+ wasEval.link(this);
+
+ // Put the return value in dst. In the interpreter, op_ret does this.
+ emitPutVirtualRegister(dst);
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned, OpcodeID opcodeID)
+{
+ int dst = instruction[1].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, opcodeID == op_construct ? JITStubs::cti_op_construct_NotJSConstruct : JITStubs::cti_op_call_NotJSFunction);
+ stubCall.call(dst); // In the interpreter, the callee puts the return value in dst.
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+#else // !ENABLE(JIT_OPTIMIZE_CALL)
+
+/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
+
+void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
+{
+ int dst = instruction[1].u.operand;
+ int callee = instruction[2].u.operand;
+ int argCount = instruction[3].u.operand;
+ int registerOffset = instruction[4].u.operand;
+
+ // Handle eval
+ Jump wasEval;
+ if (opcodeID == op_call_eval) {
+ CallEvalJITStub(this, instruction).call();
+ wasEval = branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(JSValue())));
+ }
+
+ // This plants a check for a cached JSFunction value, so we can plant a fast link to the callee.
+ // This deliberately leaves the callee in ecx, used when setting up the stack frame below
+ emitGetVirtualRegister(callee, regT2);
+ DataLabelPtr addressOfLinkedFunctionCheck;
+ Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT2, addressOfLinkedFunctionCheck, ImmPtr(JSValue::encode(JSValue())));
+ addSlowCase(jumpToSlow);
+ ASSERT(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow) == patchOffsetOpCallCompareToJump);
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
+
+ // The following is the fast case, only used whan a callee can be linked.
+
+ // In the case of OpConstruct, call out to a cti_ function to create the new object.
+ if (opcodeID == op_construct) {
+ int proto = instruction[5].u.operand;
+ int thisRegister = instruction[6].u.operand;
+
+ emitPutJITStubArg(regT2, 1);
+ emitPutJITStubArgFromVirtualRegister(proto, 4, regT0);
+ JITStubCall stubCall(this, JITStubs::cti_op_construct_JSConstruct);
+ stubCall.call(thisRegister);
+ emitGetVirtualRegister(callee, regT2);
+ }
+
+ // Fast version of stack frame initialization, directly relative to edi.
+ // Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee
+ storePtr(ImmPtr(JSValue::encode(JSValue())), Address(callFrameRegister, (registerOffset + RegisterFile::OptionalCalleeArguments) * static_cast<int>(sizeof(Register))));
+ storePtr(regT2, Address(callFrameRegister, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register))));
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain
+ store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register))));
+ storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register))));
+ storePtr(regT1, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register))));
+ addPtr(Imm32(registerOffset * sizeof(Register)), callFrameRegister);
+
+ // Call to the callee
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
+
+ if (opcodeID == op_call_eval)
+ wasEval.link(this);
+
+ // Put the return value in dst. In the interpreter, op_ret does this.
+ emitPutVirtualRegister(dst);
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID)
+{
+ int dst = instruction[1].u.operand;
+ int callee = instruction[2].u.operand;
+ int argCount = instruction[3].u.operand;
+ int registerOffset = instruction[4].u.operand;
+
+ linkSlowCase(iter);
+
+ // The arguments have been set up on the hot path for op_call_eval
+ if (opcodeID == op_call)
+ compileOpCallSetupArgs(instruction);
+ else if (opcodeID == op_construct)
+ compileOpConstructSetupArgs(instruction);
+
+ // Fast check for JS function.
+ Jump callLinkFailNotObject = emitJumpIfNotJSCell(regT2);
+ Jump callLinkFailNotJSFunction = branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsFunctionVPtr));
+
+ // First, in the case of a construct, allocate the new object.
+ if (opcodeID == op_construct) {
+ JITStubCall(this, JITStubs::cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
+ emitGetVirtualRegister(callee, regT2);
+ }
+
+ // Speculatively roll the callframe, assuming argCount will match the arity.
+ storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
+ addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
+ move(Imm32(argCount), regT1);
+
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_globalData->jitStubs.ctiVirtualCallPreLink());
+
+ // Put the return value in dst.
+ emitPutVirtualRegister(dst);
+ sampleCodeBlock(m_codeBlock);
+
+ // If not, we need an extra case in the if below!
+ ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
+
+ // Done! - return back to the hot path.
+ if (opcodeID == op_construct)
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_construct));
+ else
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_call));
+
+ // This handles host functions
+ callLinkFailNotObject.link(this);
+ callLinkFailNotJSFunction.link(this);
+ JITStubCall(this, opcodeID == op_construct ? JITStubs::cti_op_construct_NotJSConstruct : JITStubs::cti_op_call_NotJSFunction).call();
+
+ emitPutVirtualRegister(dst);
+ sampleCodeBlock(m_codeBlock);
+}
+
+/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
+
+#endif // !ENABLE(JIT_OPTIMIZE_CALL)
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITCode.h b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITCode.h
new file mode 100644
index 0000000..b502c8a
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITCode.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITCode_h
+#define JITCode_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(JIT)
+
+#include "CallFrame.h"
+#include "JSValue.h"
+#include "MacroAssemblerCodeRef.h"
+#include "Profiler.h"
+
+namespace JSC {
+
+ class JSGlobalData;
+ class RegisterFile;
+
+ class JITCode {
+ typedef MacroAssemblerCodeRef CodeRef;
+ typedef MacroAssemblerCodePtr CodePtr;
+ public:
+ JITCode()
+ {
+ }
+
+ JITCode(const CodeRef ref)
+ : m_ref(ref)
+ {
+ }
+
+ bool operator !() const
+ {
+ return !m_ref.m_code.executableAddress();
+ }
+
+ CodePtr addressForCall()
+ {
+ return m_ref.m_code;
+ }
+
+ // This function returns the offset in bytes of 'pointerIntoCode' into
+ // this block of code. The pointer provided must be a pointer into this
+ // block of code. It is ASSERTed that no codeblock >4gb in size.
+ unsigned offsetOf(void* pointerIntoCode)
+ {
+ intptr_t result = reinterpret_cast<intptr_t>(pointerIntoCode) - reinterpret_cast<intptr_t>(m_ref.m_code.executableAddress());
+ ASSERT(static_cast<intptr_t>(static_cast<unsigned>(result)) == result);
+ return static_cast<unsigned>(result);
+ }
+
+ // Execute the code!
+ inline JSValue execute(RegisterFile* registerFile, CallFrame* callFrame, JSGlobalData* globalData, JSValue* exception)
+ {
+ return JSValue::decode(ctiTrampoline(
+#if PLATFORM(X86_64)
+ 0, 0, 0, 0, 0, 0,
+#endif
+ m_ref.m_code.executableAddress(), registerFile, callFrame, exception, Profiler::enabledProfilerReference(), globalData));
+ }
+
+ void* start()
+ {
+ return m_ref.m_code.dataLocation();
+ }
+
+ size_t size()
+ {
+ ASSERT(m_ref.m_code.executableAddress());
+ return m_ref.m_size;
+ }
+
+ ExecutablePool* getExecutablePool()
+ {
+ return m_ref.m_executablePool.get();
+ }
+
+ // Host functions are a bit special; they have a m_code pointer but they
+ // do not individully ref the executable pool containing the trampoline.
+ static JITCode HostFunction(CodePtr code)
+ {
+ return JITCode(code.dataLocation(), 0, 0);
+ }
+
+ private:
+ JITCode(void* code, PassRefPtr<ExecutablePool> executablePool, size_t size)
+ : m_ref(code, executablePool, size)
+ {
+ }
+
+ CodeRef m_ref;
+ };
+
+};
+
+#endif
+
+#endif
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITInlineMethods.h b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITInlineMethods.h
new file mode 100644
index 0000000..f03d635
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITInlineMethods.h
@@ -0,0 +1,475 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITInlineMethods_h
+#define JITInlineMethods_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+ALWAYS_INLINE void JIT::killLastResultRegister()
+{
+ m_lastResultBytecodeRegister = std::numeric_limits<int>::max();
+}
+
+// get arg puts an arg from the SF register array into a h/w register
+ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
+{
+ ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+
+ // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
+ if (m_codeBlock->isConstantRegisterIndex(src)) {
+ JSValue value = m_codeBlock->getConstant(src);
+ move(ImmPtr(JSValue::encode(value)), dst);
+ killLastResultRegister();
+ return;
+ }
+
+ if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src)) {
+ bool atJumpTarget = false;
+ while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeIndex) {
+ if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeIndex)
+ atJumpTarget = true;
+ ++m_jumpTargetsPosition;
+ }
+
+ if (!atJumpTarget) {
+ // The argument we want is already stored in eax
+ if (dst != cachedResultRegister)
+ move(cachedResultRegister, dst);
+ killLastResultRegister();
+ return;
+ }
+ }
+
+ loadPtr(Address(callFrameRegister, src * sizeof(Register)), dst);
+ killLastResultRegister();
+}
+
+ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2)
+{
+ if (src2 == m_lastResultBytecodeRegister) {
+ emitGetVirtualRegister(src2, dst2);
+ emitGetVirtualRegister(src1, dst1);
+ } else {
+ emitGetVirtualRegister(src1, dst1);
+ emitGetVirtualRegister(src2, dst2);
+ }
+}
+
+// puts an arg onto the stack, as an arg to a context threaded function.
+ALWAYS_INLINE void JIT::emitPutJITStubArg(RegisterID src, unsigned argumentNumber)
+{
+ poke(src, argumentNumber);
+}
+
+ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(unsigned value, unsigned argumentNumber)
+{
+ poke(Imm32(value), argumentNumber);
+}
+
+ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(void* value, unsigned argumentNumber)
+{
+ poke(ImmPtr(value), argumentNumber);
+}
+
+ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
+{
+ peek(dst, argumentNumber);
+}
+
+ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
+{
+ ASSERT(m_codeBlock->isConstantRegisterIndex(src));
+ return m_codeBlock->getConstant(src);
+}
+
+ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(unsigned src)
+{
+ return getConstantOperand(src).getInt32Fast();
+}
+
+ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
+{
+ return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32Fast();
+}
+
+// get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
+ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch)
+{
+ if (m_codeBlock->isConstantRegisterIndex(src)) {
+ JSValue value = m_codeBlock->getConstant(src);
+ emitPutJITStubArgConstant(JSValue::encode(value), argumentNumber);
+ } else {
+ loadPtr(Address(callFrameRegister, src * sizeof(Register)), scratch);
+ emitPutJITStubArg(scratch, argumentNumber);
+ }
+
+ killLastResultRegister();
+}
+
+ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
+{
+ storePtr(from, Address(callFrameRegister, entry * sizeof(Register)));
+}
+
+ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry)
+{
+ storePtr(ImmPtr(value), Address(callFrameRegister, entry * sizeof(Register)));
+}
+
+ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
+{
+ loadPtr(Address(from, entry * sizeof(Register)), to);
+ killLastResultRegister();
+}
+
+ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
+{
+ load32(Address(from, entry * sizeof(Register)), to);
+ killLastResultRegister();
+}
+
+ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
+{
+ storePtr(from, Address(callFrameRegister, dst * sizeof(Register)));
+ m_lastResultBytecodeRegister = (from == cachedResultRegister) ? dst : std::numeric_limits<int>::max();
+ // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
+}
+
+ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
+{
+ storePtr(ImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
+ // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
+}
+
+ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
+{
+ ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+
+ Call nakedCall = nearCall();
+ m_calls.append(CallRecord(nakedCall, m_bytecodeIndex, function.executableAddress()));
+ return nakedCall;
+}
+
+#if PLATFORM(X86) || PLATFORM(X86_64)
+
+ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
+{
+ pop(reg);
+}
+
+ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
+{
+ push(reg);
+}
+
+ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
+{
+ push(address);
+}
+
+#elif PLATFORM_ARM_ARCH(7)
+
+ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
+{
+ move(linkRegister, reg);
+}
+
+ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
+{
+ move(reg, linkRegister);
+}
+
+ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
+{
+ loadPtr(address, linkRegister);
+}
+
+#endif
+
+#if USE(JIT_STUB_ARGUMENT_VA_LIST)
+ALWAYS_INLINE void JIT::restoreArgumentReference()
+{
+ poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
+}
+ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() {}
+#else
+ALWAYS_INLINE void JIT::restoreArgumentReference()
+{
+ move(stackPointerRegister, firstArgumentRegister);
+ poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
+}
+ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
+{
+#if PLATFORM(X86)
+ // Within a trampoline the return address will be on the stack at this point.
+ addPtr(Imm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister);
+#elif PLATFORM_ARM_ARCH(7)
+ move(stackPointerRegister, firstArgumentRegister);
+#endif
+ // In the trampoline on x86-64, the first argument register is not overwritten.
+}
+#endif
+
+ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
+{
+ return branchPtr(NotEqual, Address(reg, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(structure));
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg)
+{
+#if USE(ALTERNATE_JSIMMEDIATE)
+ return branchTestPtr(Zero, reg, tagMaskRegister);
+#else
+ return branchTest32(Zero, reg, Imm32(JSImmediate::TagMask));
+#endif
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfBothJSCells(RegisterID reg1, RegisterID reg2, RegisterID scratch)
+{
+ move(reg1, scratch);
+ orPtr(reg2, scratch);
+ return emitJumpIfJSCell(scratch);
+}
+
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg)
+{
+ addSlowCase(emitJumpIfJSCell(reg));
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg)
+{
+#if USE(ALTERNATE_JSIMMEDIATE)
+ return branchTestPtr(NonZero, reg, tagMaskRegister);
+#else
+ return branchTest32(NonZero, reg, Imm32(JSImmediate::TagMask));
+#endif
+}
+
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg)
+{
+ addSlowCase(emitJumpIfNotJSCell(reg));
+}
+
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg)
+{
+ if (!m_codeBlock->isKnownNotImmediate(vReg))
+ emitJumpSlowCaseIfNotJSCell(reg);
+}
+
+ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
+{
+ if (!m_codeBlock->isKnownNotImmediate(vReg))
+ linkSlowCase(iter);
+}
+
+#if USE(ALTERNATE_JSIMMEDIATE)
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateNumber(RegisterID reg)
+{
+ return branchTestPtr(NonZero, reg, tagTypeNumberRegister);
+}
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateNumber(RegisterID reg)
+{
+ return branchTestPtr(Zero, reg, tagTypeNumberRegister);
+}
+#endif
+
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
+{
+#if USE(ALTERNATE_JSIMMEDIATE)
+ return branchPtr(AboveOrEqual, reg, tagTypeNumberRegister);
+#else
+ return branchTest32(NonZero, reg, Imm32(JSImmediate::TagTypeNumber));
+#endif
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg)
+{
+#if USE(ALTERNATE_JSIMMEDIATE)
+ return branchPtr(Below, reg, tagTypeNumberRegister);
+#else
+ return branchTest32(Zero, reg, Imm32(JSImmediate::TagTypeNumber));
+#endif
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
+{
+ move(reg1, scratch);
+ andPtr(reg2, scratch);
+ return emitJumpIfNotImmediateInteger(scratch);
+}
+
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg)
+{
+ addSlowCase(emitJumpIfNotImmediateInteger(reg));
+}
+
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
+{
+ addSlowCase(emitJumpIfNotImmediateIntegers(reg1, reg2, scratch));
+}
+
+#if !USE(ALTERNATE_JSIMMEDIATE)
+ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg)
+{
+ subPtr(Imm32(JSImmediate::TagTypeNumber), reg);
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg)
+{
+ return branchSubPtr(Zero, Imm32(JSImmediate::TagTypeNumber), reg);
+}
+#endif
+
+ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest)
+{
+#if USE(ALTERNATE_JSIMMEDIATE)
+ emitFastArithIntToImmNoCheck(src, dest);
+#else
+ if (src != dest)
+ move(src, dest);
+ addPtr(Imm32(JSImmediate::TagTypeNumber), dest);
+#endif
+}
+
+ALWAYS_INLINE void JIT::emitFastArithImmToInt(RegisterID reg)
+{
+#if USE(ALTERNATE_JSIMMEDIATE)
+ UNUSED_PARAM(reg);
+#else
+ rshiftPtr(Imm32(JSImmediate::IntegerPayloadShift), reg);
+#endif
+}
+
+// operand is int32_t, must have been zero-extended if register is 64-bit.
+ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
+{
+#if USE(ALTERNATE_JSIMMEDIATE)
+ if (src != dest)
+ move(src, dest);
+ orPtr(tagTypeNumberRegister, dest);
+#else
+ signExtend32ToPtr(src, dest);
+ addPtr(dest, dest);
+ emitFastArithReTagImmediate(dest, dest);
+#endif
+}
+
+ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
+{
+ lshift32(Imm32(JSImmediate::ExtendedPayloadShift), reg);
+ or32(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), reg);
+}
+
+ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
+{
+ ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+
+ m_slowCases.append(SlowCaseEntry(jump, m_bytecodeIndex));
+}
+
+ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
+{
+ ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+
+ m_jmpTable.append(JumpTable(jump, m_bytecodeIndex + relativeOffset));
+}
+
+ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
+{
+ ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+
+ jump.linkTo(m_labels[m_bytecodeIndex + relativeOffset], this);
+}
+
+#if ENABLE(SAMPLING_FLAGS)
+ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag)
+{
+ ASSERT(flag >= 1);
+ ASSERT(flag <= 32);
+ or32(Imm32(1u << (flag - 1)), AbsoluteAddress(&SamplingFlags::s_flags));
+}
+
+ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag)
+{
+ ASSERT(flag >= 1);
+ ASSERT(flag <= 32);
+ and32(Imm32(~(1u << (flag - 1))), AbsoluteAddress(&SamplingFlags::s_flags));
+}
+#endif
+
+#if ENABLE(SAMPLING_COUNTERS)
+ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, uint32_t count)
+{
+#if PLATFORM(X86_64) // Or any other 64-bit plattform.
+ addPtr(Imm32(count), AbsoluteAddress(&counter.m_counter));
+#elif PLATFORM(X86) // Or any other little-endian 32-bit plattform.
+ intptr_t hiWord = reinterpret_cast<intptr_t>(&counter.m_counter) + sizeof(int32_t);
+ add32(Imm32(count), AbsoluteAddress(&counter.m_counter));
+ addWithCarry32(Imm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord)));
+#else
+#error "SAMPLING_FLAGS not implemented on this platform."
+#endif
+}
+#endif
+
+#if ENABLE(OPCODE_SAMPLING)
+#if PLATFORM(X86_64)
+ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
+{
+ move(ImmPtr(m_interpreter->sampler()->sampleSlot()), X86::ecx);
+ storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86::ecx);
+}
+#else
+ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
+{
+ storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot());
+}
+#endif
+#endif
+
+#if ENABLE(CODEBLOCK_SAMPLING)
+#if PLATFORM(X86_64)
+ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
+{
+ move(ImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86::ecx);
+ storePtr(ImmPtr(codeBlock), X86::ecx);
+}
+#else
+ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
+{
+ storePtr(ImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot());
+}
+#endif
+#endif
+}
+
+#endif // ENABLE(JIT)
+
+#endif
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITOpcodes.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITOpcodes.cpp
new file mode 100644
index 0000000..da541c5
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITOpcodes.cpp
@@ -0,0 +1,1187 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JIT.h"
+
+#if ENABLE(JIT)
+
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JSArray.h"
+#include "JSCell.h"
+
+namespace JSC {
+
+#define RECORD_JUMP_TARGET(targetOffset) \
+ do { m_labels[m_bytecodeIndex + (targetOffset)].used(); } while (false)
+
+void JIT::emit_op_mov(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
+
+ if (m_codeBlock->isConstantRegisterIndex(src)) {
+ storePtr(ImmPtr(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register)));
+ if (dst == m_lastResultBytecodeRegister)
+ killLastResultRegister();
+ } else if ((src == m_lastResultBytecodeRegister) || (dst == m_lastResultBytecodeRegister)) {
+ // If either the src or dst is the cached register go though
+ // get/put registers to make sure we track this correctly.
+ emitGetVirtualRegister(src, regT0);
+ emitPutVirtualRegister(dst);
+ } else {
+ // Perform the copy via regT1; do not disturb any mapping in regT0.
+ loadPtr(Address(callFrameRegister, src * sizeof(Register)), regT1);
+ storePtr(regT1, Address(callFrameRegister, dst * sizeof(Register)));
+ }
+}
+
+void JIT::emit_op_end(Instruction* currentInstruction)
+{
+ if (m_codeBlock->needsFullScopeChain())
+ JITStubCall(this, JITStubs::cti_op_end).call();
+ ASSERT(returnValueRegister != callFrameRegister);
+ emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
+ restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
+ ret();
+}
+
+void JIT::emit_op_jmp(Instruction* currentInstruction)
+{
+ unsigned target = currentInstruction[1].u.operand;
+ addJump(jump(), target + 1);
+ RECORD_JUMP_TARGET(target + 1);
+}
+
+void JIT::emit_op_loop(Instruction* currentInstruction)
+{
+ emitTimeoutCheck();
+
+ unsigned target = currentInstruction[1].u.operand;
+ addJump(jump(), target + 1);
+}
+
+void JIT::emit_op_loop_if_less(Instruction* currentInstruction)
+{
+ emitTimeoutCheck();
+
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+ if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ int32_t op2imm = getConstantOperandImmediateInt(op2);
+#else
+ int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
+#endif
+ addJump(branch32(LessThan, regT0, Imm32(op2imm)), target + 3);
+ } else if (isOperandConstantImmediateInt(op1)) {
+ emitGetVirtualRegister(op2, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ int32_t op1imm = getConstantOperandImmediateInt(op1);
+#else
+ int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
+#endif
+ addJump(branch32(GreaterThan, regT0, Imm32(op1imm)), target + 3);
+ } else {
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+ addJump(branch32(LessThan, regT0, regT1), target + 3);
+ }
+}
+
+void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
+{
+ emitTimeoutCheck();
+
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+ if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ int32_t op2imm = getConstantOperandImmediateInt(op2);
+#else
+ int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
+#endif
+ addJump(branch32(LessThanOrEqual, regT0, Imm32(op2imm)), target + 3);
+ } else {
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+ addJump(branch32(LessThanOrEqual, regT0, regT1), target + 3);
+ }
+}
+
+void JIT::emit_op_new_object(Instruction* currentInstruction)
+{
+ JITStubCall(this, JITStubs::cti_op_new_object).call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_instanceof(Instruction* currentInstruction)
+{
+ // Load the operands (baseVal, proto, and value respectively) into registers.
+ // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
+ emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
+ emitGetVirtualRegister(currentInstruction[4].u.operand, regT1);
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT2);
+
+ // Check that baseVal & proto are cells.
+ emitJumpSlowCaseIfNotJSCell(regT0);
+ emitJumpSlowCaseIfNotJSCell(regT1);
+
+ // Check that baseVal is an object, that it 'ImplementsHasInstance' but that it does not 'OverridesHasInstance'.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
+ addSlowCase(branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
+ addSlowCase(branchTest32(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsDefaultHasInstance)));
+
+ // If value is not an Object, return false.
+ Jump valueIsImmediate = emitJumpIfNotJSCell(regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
+ Jump valueIsNotObject = branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType));
+
+ // Check proto is object.
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
+ addSlowCase(branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
+
+ // Optimistically load the result true, and start looping.
+ // Initially, regT1 still contains proto and regT2 still contains value.
+ // As we loop regT2 will be updated with its prototype, recursively walking the prototype chain.
+ move(ImmPtr(JSValue::encode(jsBoolean(true))), regT0);
+ Label loop(this);
+
+ // Load the prototype of the object in regT2. If this is equal to regT1 - WIN!
+ // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2);
+ Jump isInstance = branchPtr(Equal, regT2, regT1);
+ branchPtr(NotEqual, regT2, ImmPtr(JSValue::encode(jsNull())), loop);
+
+ // We get here either by dropping out of the loop, or if value was not an Object. Result is false.
+ valueIsImmediate.link(this);
+ valueIsNotObject.link(this);
+ move(ImmPtr(JSValue::encode(jsBoolean(false))), regT0);
+
+ // isInstance jumps right down to here, to skip setting the result to false (it has already set true).
+ isInstance.link(this);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_new_func(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_new_func);
+ stubCall.addArgument(ImmPtr(m_codeBlock->function(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_call(Instruction* currentInstruction)
+{
+ compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_call_eval(Instruction* currentInstruction)
+{
+ compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_load_varargs(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_load_varargs);
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_call_varargs(Instruction* currentInstruction)
+{
+ compileOpCallVarargs(currentInstruction);
+}
+
+void JIT::emit_op_construct(Instruction* currentInstruction)
+{
+ compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_get_global_var(Instruction* currentInstruction)
+{
+ JSVariableObject* globalObject = static_cast<JSVariableObject*>(currentInstruction[2].u.jsCell);
+ move(ImmPtr(globalObject), regT0);
+ emitGetVariableObjectRegister(regT0, currentInstruction[3].u.operand, regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_put_global_var(Instruction* currentInstruction)
+{
+ emitGetVirtualRegister(currentInstruction[3].u.operand, regT1);
+ JSVariableObject* globalObject = static_cast<JSVariableObject*>(currentInstruction[1].u.jsCell);
+ move(ImmPtr(globalObject), regT0);
+ emitPutVariableObjectRegister(regT1, regT0, currentInstruction[2].u.operand);
+}
+
+void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
+{
+ int skip = currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain();
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0);
+ while (skip--)
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, object)), regT0);
+ emitGetVariableObjectRegister(regT0, currentInstruction[2].u.operand, regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
+{
+ int skip = currentInstruction[2].u.operand + m_codeBlock->needsFullScopeChain();
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1);
+ emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
+ while (skip--)
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, next)), regT1);
+
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, object)), regT1);
+ emitPutVariableObjectRegister(regT0, regT1, currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_tear_off_activation);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+ stubCall.call();
+}
+
+void JIT::emit_op_tear_off_arguments(Instruction*)
+{
+ JITStubCall(this, JITStubs::cti_op_tear_off_arguments).call();
+}
+
+void JIT::emit_op_ret(Instruction* currentInstruction)
+{
+#ifdef QT_BUILD_SCRIPT_LIB
+ JITStubCall stubCall(this, JITStubs::cti_op_debug_return);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.call();
+#endif
+ // We could JIT generate the deref, only calling out to C when the refcount hits zero.
+ if (m_codeBlock->needsFullScopeChain())
+ JITStubCall(this, JITStubs::cti_op_ret_scopeChain).call();
+
+ ASSERT(callFrameRegister != regT1);
+ ASSERT(regT1 != returnValueRegister);
+ ASSERT(returnValueRegister != callFrameRegister);
+
+ // Return the result in %eax.
+ emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
+
+ // Grab the return address.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+
+ // Restore our caller's "r".
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ // Return.
+ restoreReturnAddressBeforeReturn(regT1);
+ ret();
+}
+
+void JIT::emit_op_new_array(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_new_array);
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_resolve);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_construct_verify(Instruction* currentInstruction)
+{
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+
+ emitJumpSlowCaseIfNotJSCell(regT0);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ addSlowCase(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType)));
+
+}
+
+void JIT::emit_op_to_primitive(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(src, regT0);
+
+ Jump isImm = emitJumpIfNotJSCell(regT0);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
+ isImm.link(this);
+
+ if (dst != src)
+ emitPutVirtualRegister(dst);
+
+}
+
+void JIT::emit_op_strcat(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_strcat);
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_func(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_resolve_func);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.call(currentInstruction[2].u.operand);
+}
+
+void JIT::emit_op_loop_if_true(Instruction* currentInstruction)
+{
+ emitTimeoutCheck();
+
+ unsigned target = currentInstruction[2].u.operand;
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+
+ Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0))));
+ addJump(emitJumpIfImmediateInteger(regT0), target + 2);
+
+ addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))), target + 2);
+ addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))));
+
+ isZero.link(this);
+};
+void JIT::emit_op_resolve_base(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_resolve_base);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_resolve_skip);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain()));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_global(Instruction* currentInstruction)
+{
+ // Fast case
+ void* globalObject = currentInstruction[2].u.jsCell;
+ Identifier* ident = &m_codeBlock->identifier(currentInstruction[3].u.operand);
+
+ unsigned currentIndex = m_globalResolveInfoIndex++;
+ void* structureAddress = &(m_codeBlock->globalResolveInfo(currentIndex).structure);
+ void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset);
+
+ // Check Structure of global object
+ move(ImmPtr(globalObject), regT0);
+ loadPtr(structureAddress, regT1);
+ Jump noMatch = branchPtr(NotEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure))); // Structures don't match
+
+ // Load cached property
+ // Assume that the global object always uses external storage.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSGlobalObject, m_externalStorage)), regT0);
+ load32(offsetAddr, regT1);
+ loadPtr(BaseIndex(regT0, regT1, ScalePtr), regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ Jump end = jump();
+
+ // Slow case
+ noMatch.link(this);
+ JITStubCall stubCall(this, JITStubs::cti_op_resolve_global);
+ stubCall.addArgument(ImmPtr(globalObject));
+ stubCall.addArgument(ImmPtr(ident));
+ stubCall.addArgument(Imm32(currentIndex));
+ stubCall.call(currentInstruction[1].u.operand);
+ end.link(this);
+}
+
+void JIT::emit_op_not(Instruction* currentInstruction)
+{
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
+ xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), regT0);
+ addSlowCase(branchTestPtr(NonZero, regT0, Imm32(static_cast<int32_t>(~JSImmediate::ExtendedPayloadBitBoolValue))));
+ xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue)), regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_jfalse(Instruction* currentInstruction)
+{
+ unsigned target = currentInstruction[2].u.operand;
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+
+ addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))), target + 2);
+ Jump isNonZero = emitJumpIfImmediateInteger(regT0);
+
+ addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))), target + 2);
+ addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))));
+
+ isNonZero.link(this);
+ RECORD_JUMP_TARGET(target + 2);
+};
+void JIT::emit_op_jeq_null(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(src, regT0);
+ Jump isImmediate = emitJumpIfNotJSCell(regT0);
+
+ // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ addJump(branchTest32(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
+ Jump wasNotImmediate = jump();
+
+ // Now handle the immediate cases - undefined & null
+ isImmediate.link(this);
+ andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
+ addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNull()))), target + 2);
+
+ wasNotImmediate.link(this);
+ RECORD_JUMP_TARGET(target + 2);
+};
+void JIT::emit_op_jneq_null(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(src, regT0);
+ Jump isImmediate = emitJumpIfNotJSCell(regT0);
+
+ // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ addJump(branchTest32(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
+ Jump wasNotImmediate = jump();
+
+ // Now handle the immediate cases - undefined & null
+ isImmediate.link(this);
+ andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
+ addJump(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsNull()))), target + 2);
+
+ wasNotImmediate.link(this);
+ RECORD_JUMP_TARGET(target + 2);
+}
+
+void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ JSCell* ptr = currentInstruction[2].u.jsCell;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegister(src, regT0);
+ addJump(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(JSValue(ptr)))), target + 3);
+
+ RECORD_JUMP_TARGET(target + 3);
+}
+
+void JIT::emit_op_jsr(Instruction* currentInstruction)
+{
+ int retAddrDst = currentInstruction[1].u.operand;
+ int target = currentInstruction[2].u.operand;
+ DataLabelPtr storeLocation = storePtrWithPatch(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst));
+ addJump(jump(), target + 2);
+ m_jsrSites.append(JSRInfo(storeLocation, label()));
+ killLastResultRegister();
+ RECORD_JUMP_TARGET(target + 2);
+}
+
+void JIT::emit_op_sret(Instruction* currentInstruction)
+{
+ jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand));
+ killLastResultRegister();
+}
+
+void JIT::emit_op_eq(Instruction* currentInstruction)
+{
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+ emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+ set32(Equal, regT1, regT0, regT0);
+ emitTagAsBoolImmediate(regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_bitnot(Instruction* currentInstruction)
+{
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ not32(regT0);
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+#else
+ xorPtr(Imm32(~JSImmediate::TagTypeNumber), regT0);
+#endif
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_resolve_with_base);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.call(currentInstruction[2].u.operand);
+}
+
+void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_new_func_exp);
+ stubCall.addArgument(ImmPtr(m_codeBlock->functionExpression(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_jtrue(Instruction* currentInstruction)
+{
+ unsigned target = currentInstruction[2].u.operand;
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+
+ Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0))));
+ addJump(emitJumpIfImmediateInteger(regT0), target + 2);
+
+ addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))), target + 2);
+ addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))));
+
+ isZero.link(this);
+ RECORD_JUMP_TARGET(target + 2);
+}
+
+void JIT::emit_op_neq(Instruction* currentInstruction)
+{
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+ emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+ set32(NotEqual, regT1, regT0, regT0);
+ emitTagAsBoolImmediate(regT0);
+
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+
+}
+
+void JIT::emit_op_bitxor(Instruction* currentInstruction)
+{
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+ emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+ xorPtr(regT1, regT0);
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_new_regexp(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_new_regexp);
+ stubCall.addArgument(ImmPtr(m_codeBlock->regexp(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_bitor(Instruction* currentInstruction)
+{
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+ emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+ orPtr(regT1, regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_throw(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_throw);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+ stubCall.call();
+ ASSERT(regT0 == returnValueRegister);
+#ifndef NDEBUG
+ // cti_op_throw always changes it's return address,
+ // this point in the code should never be reached.
+ breakpoint();
+#endif
+}
+
+void JIT::emit_op_next_pname(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_next_pname);
+ stubCall.addArgument(currentInstruction[2].u.operand, regT2);
+ stubCall.call();
+ Jump endOfIter = branchTestPtr(Zero, regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ addJump(jump(), currentInstruction[3].u.operand + 3);
+ endOfIter.link(this);
+}
+
+void JIT::emit_op_push_scope(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_push_scope);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_pop_scope(Instruction*)
+{
+ JITStubCall(this, JITStubs::cti_op_pop_scope).call();
+}
+
+void JIT::emit_op_stricteq(Instruction* currentInstruction)
+{
+ compileOpStrictEq(currentInstruction, OpStrictEq);
+}
+
+void JIT::emit_op_nstricteq(Instruction* currentInstruction)
+{
+ compileOpStrictEq(currentInstruction, OpNStrictEq);
+}
+
+void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
+{
+ int srcVReg = currentInstruction[2].u.operand;
+ emitGetVirtualRegister(srcVReg, regT0);
+
+ Jump wasImmediate = emitJumpIfImmediateInteger(regT0);
+
+ emitJumpSlowCaseIfNotJSCell(regT0, srcVReg);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ addSlowCase(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(NumberType)));
+
+ wasImmediate.link(this);
+
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_push_new_scope(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_push_new_scope);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_catch(Instruction* currentInstruction)
+{
+ killLastResultRegister(); // FIXME: Implicitly treat op_catch as a labeled statement, and remove this line of code.
+ peek(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+#ifdef QT_BUILD_SCRIPT_LIB
+ JITStubCall stubCall(this, JITStubs::cti_op_debug_catch);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.call();
+#endif
+}
+
+void JIT::emit_op_jmp_scopes(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_jmp_scopes);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.call();
+ addJump(jump(), currentInstruction[2].u.operand + 2);
+ RECORD_JUMP_TARGET(currentInstruction[2].u.operand + 2);
+}
+
+void JIT::emit_op_switch_imm(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate));
+ jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+
+ JITStubCall stubCall(this, JITStubs::cti_op_switch_imm);
+ stubCall.addArgument(scrutinee, regT2);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_switch_char(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Character));
+ jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+
+ JITStubCall stubCall(this, JITStubs::cti_op_switch_char);
+ stubCall.addArgument(scrutinee, regT2);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_switch_string(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset));
+
+ JITStubCall stubCall(this, JITStubs::cti_op_switch_string);
+ stubCall.addArgument(scrutinee, regT2);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_new_error(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_new_error);
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(ImmPtr(JSValue::encode(m_codeBlock->getConstant(currentInstruction[3].u.operand))));
+ stubCall.addArgument(Imm32(m_bytecodeIndex));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_debug(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_debug);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[4].u.operand));
+ stubCall.call();
+}
+
+void JIT::emit_op_eq_null(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(src1, regT0);
+ Jump isImmediate = emitJumpIfNotJSCell(regT0);
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ setTest32(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0);
+
+ Jump wasNotImmediate = jump();
+
+ isImmediate.link(this);
+
+ andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
+ setPtr(Equal, regT0, Imm32(JSImmediate::FullTagTypeNull), regT0);
+
+ wasNotImmediate.link(this);
+
+ emitTagAsBoolImmediate(regT0);
+ emitPutVirtualRegister(dst);
+
+}
+
+void JIT::emit_op_neq_null(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(src1, regT0);
+ Jump isImmediate = emitJumpIfNotJSCell(regT0);
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ setTest32(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0);
+
+ Jump wasNotImmediate = jump();
+
+ isImmediate.link(this);
+
+ andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
+ setPtr(NotEqual, regT0, Imm32(JSImmediate::FullTagTypeNull), regT0);
+
+ wasNotImmediate.link(this);
+
+ emitTagAsBoolImmediate(regT0);
+ emitPutVirtualRegister(dst);
+
+}
+
+void JIT::emit_op_enter(Instruction*)
+{
+ // Even though CTI doesn't use them, we initialize our constant
+ // registers to zap stale pointers, to avoid unnecessarily prolonging
+ // object lifetime and increasing GC pressure.
+ size_t count = m_codeBlock->m_numVars;
+ for (size_t j = 0; j < count; ++j)
+ emitInitRegister(j);
+
+}
+
+void JIT::emit_op_enter_with_activation(Instruction* currentInstruction)
+{
+ // Even though CTI doesn't use them, we initialize our constant
+ // registers to zap stale pointers, to avoid unnecessarily prolonging
+ // object lifetime and increasing GC pressure.
+ size_t count = m_codeBlock->m_numVars;
+ for (size_t j = 0; j < count; ++j)
+ emitInitRegister(j);
+
+ JITStubCall(this, JITStubs::cti_op_push_activation).call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_create_arguments(Instruction*)
+{
+ Jump argsCreated = branchTestPtr(NonZero, Address(callFrameRegister, sizeof(Register) * RegisterFile::ArgumentsRegister));
+ if (m_codeBlock->m_numParameters == 1)
+ JITStubCall(this, JITStubs::cti_op_create_arguments_no_params).call();
+ else
+ JITStubCall(this, JITStubs::cti_op_create_arguments).call();
+ argsCreated.link(this);
+}
+
+void JIT::emit_op_init_arguments(Instruction*)
+{
+ storePtr(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * RegisterFile::ArgumentsRegister));
+}
+
+void JIT::emit_op_convert_this(Instruction* currentInstruction)
+{
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+
+ emitJumpSlowCaseIfNotJSCell(regT0);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1);
+ addSlowCase(branchTest32(NonZero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
+
+}
+
+void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
+{
+ peek(regT1, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof (void*));
+ Jump noProfiler = branchTestPtr(Zero, Address(regT1));
+
+ JITStubCall stubCall(this, JITStubs::cti_op_profile_will_call);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT1);
+ stubCall.call();
+ noProfiler.link(this);
+
+}
+
+void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
+{
+ peek(regT1, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof (void*));
+ Jump noProfiler = branchTestPtr(Zero, Address(regT1));
+
+ JITStubCall stubCall(this, JITStubs::cti_op_profile_did_call);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT1);
+ stubCall.call();
+ noProfiler.link(this);
+}
+
+
+// Slow cases
+
+void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_convert_this);
+ stubCall.addArgument(regT0);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_construct_verify(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, JITStubs::cti_op_to_primitive);
+ stubCall.addArgument(regT0);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ // The slow void JIT::emitSlow_that handles accesses to arrays (below) may jump back up to here.
+ Label beginGetByValSlow(this);
+
+ Jump notImm = getSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ emitFastArithIntToImmNoCheck(regT1, regT1);
+
+ notImm.link(this);
+ JITStubCall stubCall(this, JITStubs::cti_op_get_by_val);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call(currentInstruction[1].u.operand);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
+
+ // This is slow void JIT::emitSlow_that handles accesses to arrays above the fast cut-off.
+ // First, check if this is an access to the vector
+ linkSlowCase(iter);
+ branch32(AboveOrEqual, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength)), beginGetByValSlow);
+
+ // okay, missed the fast region, but it is still in the vector. Get the value.
+ loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT2);
+ // Check whether the value loaded is zero; if so we need to return undefined.
+ branchTestPtr(Zero, regT2, beginGetByValSlow);
+ move(regT2, regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand, regT0);
+}
+
+void JIT::emitSlow_op_loop_if_less(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+ if (isOperandConstantImmediateInt(op2)) {
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_loop_if_less);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
+ } else if (isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_loop_if_less);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(regT0);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
+ } else {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_loop_if_less);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
+ }
+}
+
+void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+ if (isOperandConstantImmediateInt(op2)) {
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_loop_if_lesseq);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(currentInstruction[2].u.operand, regT2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
+ } else {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_loop_if_lesseq);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
+ }
+}
+
+void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ // Normal slow cases - either is not an immediate imm, or is an array.
+ Jump notImm = getSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ emitFastArithIntToImmNoCheck(regT1, regT1);
+
+ notImm.link(this); {
+ JITStubCall stubCall(this, JITStubs::cti_op_put_by_val);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+ stubCall.call();
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_put_by_val));
+ }
+
+ // slow cases for immediate int accesses to arrays
+ linkSlowCase(iter);
+ linkSlowCase(iter); {
+ JITStubCall stubCall(this, JITStubs::cti_op_put_by_val_array);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+ stubCall.call();
+ }
+}
+
+void JIT::emitSlow_op_loop_if_true(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_jtrue);
+ stubCall.addArgument(regT0);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), currentInstruction[2].u.operand + 2);
+}
+
+void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), regT0);
+ JITStubCall stubCall(this, JITStubs::cti_op_not);
+ stubCall.addArgument(regT0);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_jtrue);
+ stubCall.addArgument(regT0);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), currentInstruction[2].u.operand + 2); // inverted!
+}
+
+void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_bitnot);
+ stubCall.addArgument(regT0);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_jtrue);
+ stubCall.addArgument(regT0);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), currentInstruction[2].u.operand + 2);
+}
+
+void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_bitxor);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_bitor);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_eq);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_neq);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_stricteq);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_nstricteq);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_instanceof);
+ stubCall.addArgument(currentInstruction[2].u.operand, regT2);
+ stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+ stubCall.addArgument(currentInstruction[4].u.operand, regT2);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call);
+}
+
+void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call_eval);
+}
+
+void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallVarargsSlowCase(currentInstruction, iter);
+}
+
+void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_construct);
+}
+
+void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCaseIfNotJSCell(iter, currentInstruction[2].u.operand);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, JITStubs::cti_op_to_jsnumber);
+ stubCall.addArgument(regT0);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITPropertyAccess.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITPropertyAccess.cpp
new file mode 100644
index 0000000..c1e5c29
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -0,0 +1,839 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JIT.h"
+
+#if ENABLE(JIT)
+
+#include "CodeBlock.h"
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JSArray.h"
+#include "JSFunction.h"
+#include "Interpreter.h"
+#include "LinkBuffer.h"
+#include "RepatchBuffer.h"
+#include "ResultType.h"
+#include "SamplingTool.h"
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+using namespace std;
+
+namespace JSC {
+
+void JIT::emit_op_get_by_val(Instruction* currentInstruction)
+{
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
+ // We check the value as if it was a uint32 against the m_fastAccessCutoff - which will always fail if
+ // number was signed since m_fastAccessCutoff is always less than intmax (since the total allocation
+ // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value
+ // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
+ // extending since it makes it easier to re-tag the value in the slow case.
+ zeroExtend32ToPtr(regT1, regT1);
+#else
+ emitFastArithImmToInt(regT1);
+#endif
+ emitJumpSlowCaseIfNotJSCell(regT0);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
+
+ // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
+ addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff))));
+
+ // Get the value from the vector
+ loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_put_by_val(Instruction* currentInstruction)
+{
+ emitGetVirtualRegisters(currentInstruction[1].u.operand, regT0, currentInstruction[2].u.operand, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ // See comment in op_get_by_val.
+ zeroExtend32ToPtr(regT1, regT1);
+#else
+ emitFastArithImmToInt(regT1);
+#endif
+ emitJumpSlowCaseIfNotJSCell(regT0);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
+
+ // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
+ Jump inFastVector = branch32(Below, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff)));
+ // No; oh well, check if the access if within the vector - if so, we may still be okay.
+ addSlowCase(branch32(AboveOrEqual, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength))));
+
+ // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
+ // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
+ addSlowCase(branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))));
+
+ // All good - put the value into the array.
+ inFastVector.link(this);
+ emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
+ storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+}
+
+void JIT::emit_op_put_by_index(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_put_by_index);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+ stubCall.call();
+}
+
+void JIT::emit_op_put_getter(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_put_getter);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+ stubCall.call();
+}
+
+void JIT::emit_op_put_setter(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_put_setter);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+ stubCall.call();
+}
+
+void JIT::emit_op_del_by_id(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_del_by_id);
+ stubCall.addArgument(currentInstruction[2].u.operand, regT2);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+
+#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
+
+// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
+void JIT::emit_op_method_check(Instruction*) {}
+void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
+#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
+#error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
+#endif
+
+void JIT::emit_op_get_by_id(Instruction* currentInstruction)
+{
+ unsigned resultVReg = currentInstruction[1].u.operand;
+ unsigned baseVReg = currentInstruction[2].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ emitGetVirtualRegister(baseVReg, regT0);
+ JITStubCall stubCall(this, JITStubs::cti_op_get_by_id_generic);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(ImmPtr(ident));
+ stubCall.call(resultVReg);
+
+ m_propertyAccessInstructionIndex++;
+}
+
+void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ ASSERT_NOT_REACHED();
+}
+
+void JIT::emit_op_put_by_id(Instruction* currentInstruction)
+{
+ unsigned baseVReg = currentInstruction[1].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
+ unsigned valueVReg = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
+
+ JITStubCall stubCall(this, JITStubs::cti_op_put_by_id_generic);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(ImmPtr(ident));
+ stubCall.addArgument(regT1);
+ stubCall.call();
+
+ m_propertyAccessInstructionIndex++;
+}
+
+void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ ASSERT_NOT_REACHED();
+}
+
+#else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
+
+#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
+
+void JIT::emit_op_method_check(Instruction* currentInstruction)
+{
+ // Assert that the following instruction is a get_by_id.
+ ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
+
+ currentInstruction += OPCODE_LENGTH(op_method_check);
+ unsigned resultVReg = currentInstruction[1].u.operand;
+ unsigned baseVReg = currentInstruction[2].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ emitGetVirtualRegister(baseVReg, regT0);
+
+ // Do the method check - check the object & its prototype's structure inline (this is the common case).
+ m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
+ MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
+ Jump notCell = emitJumpIfNotJSCell(regT0);
+ Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT1);
+ Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+
+ // This will be relinked to load the function without doing a load.
+ DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
+ Jump match = jump();
+
+ ASSERT(differenceBetween(info.structureToCompare, protoObj) == patchOffsetMethodCheckProtoObj);
+ ASSERT(differenceBetween(info.structureToCompare, protoStructureToCompare) == patchOffsetMethodCheckProtoStruct);
+ ASSERT(differenceBetween(info.structureToCompare, putFunction) == patchOffsetMethodCheckPutFunction);
+
+ // Link the failure cases here.
+ notCell.link(this);
+ structureCheck.link(this);
+ protoStructureCheck.link(this);
+
+ // Do a regular(ish) get_by_id (the slow case will be link to
+ // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
+ compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++);
+
+ match.link(this);
+ emitPutVirtualRegister(resultVReg);
+
+ // We've already generated the following get_by_id, so make sure it's skipped over.
+ m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
+}
+
+void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ currentInstruction += OPCODE_LENGTH(op_method_check);
+ unsigned resultVReg = currentInstruction[1].u.operand;
+ unsigned baseVReg = currentInstruction[2].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, m_propertyAccessInstructionIndex++, true);
+
+ // We've already generated the following get_by_id, so make sure it's skipped over.
+ m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
+}
+
+#else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
+
+// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
+void JIT::emit_op_method_check(Instruction*) {}
+void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
+
+#endif
+
+void JIT::emit_op_get_by_id(Instruction* currentInstruction)
+{
+ unsigned resultVReg = currentInstruction[1].u.operand;
+ unsigned baseVReg = currentInstruction[2].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ emitGetVirtualRegister(baseVReg, regT0);
+ compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++);
+ emitPutVirtualRegister(resultVReg);
+}
+
+void JIT::compileGetByIdHotPath(int, int baseVReg, Identifier*, unsigned propertyAccessInstructionIndex)
+{
+ // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
+ // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
+ // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
+ // to jump back to if one of these trampolies finds a match.
+
+ emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
+
+ Label hotPathBegin(this);
+ m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
+
+ DataLabelPtr structureToCompare;
+ Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ addSlowCase(structureCheck);
+ ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure);
+ ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase);
+
+ Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
+ Label externalLoadComplete(this);
+ ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetGetByIdExternalLoad);
+ ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthGetByIdExternalLoad);
+
+ DataLabel32 displacementLabel = loadPtrWithAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
+ ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetGetByIdPropertyMapOffset);
+
+ Label putResult(this);
+ ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult);
+}
+
+void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned resultVReg = currentInstruction[1].u.operand;
+ unsigned baseVReg = currentInstruction[2].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, m_propertyAccessInstructionIndex++, false);
+}
+
+void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex, bool isMethodCheck)
+{
+ // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
+ // so that we only need track one pointer into the slow case code - we track a pointer to the location
+ // of the call (which we can use to look up the patch information), but should a array-length or
+ // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
+ // the distance from the call to the head of the slow case.
+
+ linkSlowCaseIfNotJSCell(iter, baseVReg);
+ linkSlowCase(iter);
+
+#ifndef NDEBUG
+ Label coldPathBegin(this);
+#endif
+ JITStubCall stubCall(this, isMethodCheck ? JITStubs::cti_op_get_by_id_method_check : JITStubs::cti_op_get_by_id);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(ImmPtr(ident));
+ Call call = stubCall.call(resultVReg);
+
+ ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
+
+ // Track the location of the call; this will be used to recover patch information.
+ m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
+}
+
+void JIT::emit_op_put_by_id(Instruction* currentInstruction)
+{
+ unsigned baseVReg = currentInstruction[1].u.operand;
+ unsigned valueVReg = currentInstruction[3].u.operand;
+
+ unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
+
+ // In order to be able to patch both the Structure, and the object offset, we store one pointer,
+ // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
+ // such that the Structure & offset are always at the same distance from this.
+
+ emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
+
+ // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
+ emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
+
+ Label hotPathBegin(this);
+ m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
+
+ // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
+ DataLabelPtr structureToCompare;
+ addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
+ ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure);
+
+ // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
+ Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
+ Label externalLoadComplete(this);
+ ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetPutByIdExternalLoad);
+ ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad);
+
+ DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT0, patchGetByIdDefaultOffset));
+ ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetPutByIdPropertyMapOffset);
+}
+
+void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned baseVReg = currentInstruction[1].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
+
+ unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
+
+ linkSlowCaseIfNotJSCell(iter, baseVReg);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, JITStubs::cti_op_put_by_id);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(ImmPtr(ident));
+ stubCall.addArgument(regT1);
+ Call call = stubCall.call();
+
+ // Track the location of the call; this will be used to recover patch information.
+ m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
+}
+
+// Compile a store into an object's property storage. May overwrite the
+// value in objectReg.
+void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset)
+{
+ int offset = cachedOffset * sizeof(JSValue);
+ if (structure->isUsingInlineStorage())
+ offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage);
+ else
+ loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
+ storePtr(value, Address(base, offset));
+}
+
+// Compile a load from an object's property storage. May overwrite base.
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset)
+{
+ int offset = cachedOffset * sizeof(JSValue);
+ if (structure->isUsingInlineStorage())
+ offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage);
+ else
+ loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
+ loadPtr(Address(base, offset), result);
+}
+
+void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID result, size_t cachedOffset)
+{
+ if (base->isUsingInlineStorage())
+ loadPtr(static_cast<void*>(&base->m_inlineStorage[cachedOffset]), result);
+ else {
+ PropertyStorage* protoPropertyStorage = &base->m_externalStorage;
+ loadPtr(static_cast<void*>(protoPropertyStorage), temp);
+ loadPtr(Address(temp, cachedOffset * sizeof(JSValue)), result);
+ }
+}
+
+void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress)
+{
+ JumpList failureCases;
+ // Check eax is an object of the right Structure.
+ failureCases.append(emitJumpIfNotJSCell(regT0));
+ failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure)));
+ JumpList successCases;
+
+ // ecx = baseObject
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ // proto(ecx) = baseObject->structure()->prototype()
+ failureCases.append(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType)));
+
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2);
+
+ // ecx = baseObject->m_structure
+ for (RefPtr<Structure>* it = chain->head(); *it; ++it) {
+ // null check the prototype
+ successCases.append(branchPtr(Equal, regT2, ImmPtr(JSValue::encode(jsNull()))));
+
+ // Check the structure id
+ failureCases.append(branchPtr(NotEqual, Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(it->get())));
+
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ failureCases.append(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType)));
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2);
+ }
+
+ successCases.link(this);
+
+ Call callTarget;
+
+ // emit a call only if storage realloc is needed
+ bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
+ if (willNeedStorageRealloc) {
+ // This trampoline was called to like a JIT stub; before we can can call again we need to
+ // remove the return address from the stack, to prevent the stack from becoming misaligned.
+ preserveReturnAddressAfterCall(regT3);
+
+ JITStubCall stubCall(this, JITStubs::cti_op_put_by_id_transition_realloc);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
+ stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
+ stubCall.addArgument(regT1); // This argument is not used in the stub; we set it up on the stack so that it can be restored, below.
+ stubCall.call(regT0);
+ emitGetJITStubArg(4, regT1);
+
+ restoreReturnAddressBeforeReturn(regT3);
+ }
+
+ // Assumes m_refCount can be decremented easily, refcount decrement is safe as
+ // codeblock should ensure oldStructure->m_refCount > 0
+ sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
+ add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
+ storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)));
+
+ // write the value
+ compilePutDirectOffset(regT0, regT1, newStructure, cachedOffset);
+
+ ret();
+
+ ASSERT(!failureCases.empty());
+ failureCases.link(this);
+ restoreArgumentReferenceForTrampoline();
+ Call failureCall = tailRecursiveCall();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ patchBuffer.link(failureCall, FunctionPtr(JITStubs::cti_op_put_by_id_fail));
+
+ if (willNeedStorageRealloc) {
+ ASSERT(m_calls.size() == 1);
+ patchBuffer.link(m_calls[0].from, FunctionPtr(JITStubs::cti_op_put_by_id_transition_realloc));
+ }
+
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
+}
+
+void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
+{
+ RepatchBuffer repatchBuffer(codeBlock);
+
+ // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
+ // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_self_fail));
+
+ int offset = sizeof(JSValue) * cachedOffset;
+
+ // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
+ // and makes the subsequent load's offset automatically correct
+ if (structure->isUsingInlineStorage())
+ repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad));
+
+ // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset), offset);
+}
+
+void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto)
+{
+ RepatchBuffer repatchBuffer(codeBlock);
+
+ ASSERT(!methodCallLinkInfo.cachedStructure);
+ methodCallLinkInfo.cachedStructure = structure;
+ structure->ref();
+
+ Structure* prototypeStructure = proto->structure();
+ ASSERT(!methodCallLinkInfo.cachedPrototypeStructure);
+ methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure;
+ prototypeStructure->ref();
+
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure);
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
+}
+
+void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
+{
+ RepatchBuffer repatchBuffer(codeBlock);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ // Should probably go to JITStubs::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(JITStubs::cti_op_put_by_id_generic));
+
+ int offset = sizeof(JSValue) * cachedOffset;
+
+ // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
+ // and makes the subsequent load's offset automatically correct
+ if (structure->isUsingInlineStorage())
+ repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad));
+
+ // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset), offset);
+}
+
+void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
+{
+ StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
+
+ // Check eax is an array
+ Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
+
+ // Checks out okay! - get the length from the storage
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
+ load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
+
+ Jump failureCases2 = branch32(Above, regT2, Imm32(JSImmediate::maxImmediateInt));
+
+ emitFastArithIntToImmNoCheck(regT2, regT0);
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+ patchBuffer.link(failureCases1, slowCaseBegin);
+ patchBuffer.link(failureCases2, slowCaseBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ // Track the stub we have created so that it will be deleted later.
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_array_fail));
+}
+
+void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+{
+ // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
+ // referencing the prototype object - let's speculatively load it's table nice and early!)
+ JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
+
+ // Check eax is an object of the right Structure.
+ Jump failureCases1 = checkStructure(regT0, structure);
+
+ // Check the prototype object's Structure had not changed.
+ Structure** prototypeStructureAddress = &(protoObject->m_structure);
+#if PLATFORM(X86_64)
+ move(ImmPtr(prototypeStructure), regT3);
+ Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
+#else
+ Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
+#endif
+
+ // Checks out okay! - getDirectOffset
+ compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
+
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+ patchBuffer.link(failureCases1, slowCaseBegin);
+ patchBuffer.link(failureCases2, slowCaseBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ // Track the stub we have created so that it will be deleted later.
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_proto_list));
+}
+
+void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
+{
+ Jump failureCase = checkStructure(regT0, structure);
+ compileGetDirectOffset(regT0, regT0, structure, cachedOffset);
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
+ if (!lastProtoBegin)
+ lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+
+ patchBuffer.link(failureCase, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+
+ structure->ref();
+ polymorphicStructures->list[currentIndex].set(entryLabel, structure);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+}
+
+void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame)
+{
+ // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
+ // referencing the prototype object - let's speculatively load it's table nice and early!)
+ JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
+
+ // Check eax is an object of the right Structure.
+ Jump failureCases1 = checkStructure(regT0, structure);
+
+ // Check the prototype object's Structure had not changed.
+ Structure** prototypeStructureAddress = &(protoObject->m_structure);
+#if PLATFORM(X86_64)
+ move(ImmPtr(prototypeStructure), regT3);
+ Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
+#else
+ Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
+#endif
+
+ // Checks out okay! - getDirectOffset
+ compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
+
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
+ patchBuffer.link(failureCases1, lastProtoBegin);
+ patchBuffer.link(failureCases2, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+
+ structure->ref();
+ prototypeStructure->ref();
+ prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+}
+
+void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
+{
+ ASSERT(count);
+
+ JumpList bucketsOfFail;
+
+ // Check eax is an object of the right Structure.
+ Jump baseObjectCheck = checkStructure(regT0, structure);
+ bucketsOfFail.append(baseObjectCheck);
+
+ Structure* currStructure = structure;
+ RefPtr<Structure>* chainEntries = chain->head();
+ JSObject* protoObject = 0;
+ for (unsigned i = 0; i < count; ++i) {
+ protoObject = asObject(currStructure->prototypeForLookup(callFrame));
+ currStructure = chainEntries[i].get();
+
+ // Check the prototype object's Structure had not changed.
+ Structure** prototypeStructureAddress = &(protoObject->m_structure);
+#if PLATFORM(X86_64)
+ move(ImmPtr(currStructure), regT3);
+ bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
+#else
+ bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
+#endif
+ }
+ ASSERT(protoObject);
+
+ compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
+
+ patchBuffer.link(bucketsOfFail, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+
+ // Track the stub we have created so that it will be deleted later.
+ structure->ref();
+ chain->ref();
+ prototypeStructures->list[currentIndex].set(entryLabel, structure, chain);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+}
+
+void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+{
+ ASSERT(count);
+
+ JumpList bucketsOfFail;
+
+ // Check eax is an object of the right Structure.
+ bucketsOfFail.append(checkStructure(regT0, structure));
+
+ Structure* currStructure = structure;
+ RefPtr<Structure>* chainEntries = chain->head();
+ JSObject* protoObject = 0;
+ for (unsigned i = 0; i < count; ++i) {
+ protoObject = asObject(currStructure->prototypeForLookup(callFrame));
+ currStructure = chainEntries[i].get();
+
+ // Check the prototype object's Structure had not changed.
+ Structure** prototypeStructureAddress = &(protoObject->m_structure);
+#if PLATFORM(X86_64)
+ move(ImmPtr(currStructure), regT3);
+ bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
+#else
+ bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
+#endif
+ }
+ ASSERT(protoObject);
+
+ compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ // Track the stub we have created so that it will be deleted later.
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_proto_list));
+}
+
+/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
+
+#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubCall.h b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubCall.h
new file mode 100644
index 0000000..bc07178
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubCall.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITStubCall_h
+#define JITStubCall_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+ class JITStubCall {
+ public:
+ JITStubCall(JIT* jit, JSObject* (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
+ : m_jit(jit)
+ , m_stub(reinterpret_cast<void*>(stub))
+ , m_returnType(Value)
+ , m_argumentIndex(1) // Index 0 is reserved for restoreArgumentReference();
+ {
+ }
+
+ JITStubCall(JIT* jit, JSPropertyNameIterator* (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
+ : m_jit(jit)
+ , m_stub(reinterpret_cast<void*>(stub))
+ , m_returnType(Value)
+ , m_argumentIndex(1) // Index 0 is reserved for restoreArgumentReference();
+ {
+ }
+
+ JITStubCall(JIT* jit, void* (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
+ : m_jit(jit)
+ , m_stub(reinterpret_cast<void*>(stub))
+ , m_returnType(Value)
+ , m_argumentIndex(1) // Index 0 is reserved for restoreArgumentReference();
+ {
+ }
+
+ JITStubCall(JIT* jit, int (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
+ : m_jit(jit)
+ , m_stub(reinterpret_cast<void*>(stub))
+ , m_returnType(Value)
+ , m_argumentIndex(1) // Index 0 is reserved for restoreArgumentReference();
+ {
+ }
+
+ JITStubCall(JIT* jit, void (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
+ : m_jit(jit)
+ , m_stub(reinterpret_cast<void*>(stub))
+ , m_returnType(Void)
+ , m_argumentIndex(1) // Index 0 is reserved for restoreArgumentReference();
+ {
+ }
+
+ // Arguments are added first to last.
+
+ void addArgument(JIT::Imm32 argument)
+ {
+ m_jit->poke(argument, m_argumentIndex);
+ ++m_argumentIndex;
+ }
+
+ void addArgument(JIT::ImmPtr argument)
+ {
+ m_jit->poke(argument, m_argumentIndex);
+ ++m_argumentIndex;
+ }
+
+ void addArgument(JIT::RegisterID argument)
+ {
+ m_jit->poke(argument, m_argumentIndex);
+ ++m_argumentIndex;
+ }
+
+ void addArgument(unsigned src, JIT::RegisterID scratchRegister) // src is a virtual register.
+ {
+ if (m_jit->m_codeBlock->isConstantRegisterIndex(src))
+ addArgument(JIT::ImmPtr(JSValue::encode(m_jit->m_codeBlock->getConstant(src))));
+ else {
+ m_jit->loadPtr(JIT::Address(JIT::callFrameRegister, src * sizeof(Register)), scratchRegister);
+ addArgument(scratchRegister);
+ }
+ m_jit->killLastResultRegister();
+ }
+
+ JIT::Call call()
+ {
+#if ENABLE(OPCODE_SAMPLING)
+ if (m_jit->m_bytecodeIndex != (unsigned)-1)
+ m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeIndex, true);
+#endif
+
+ m_jit->restoreArgumentReference();
+ JIT::Call call = m_jit->call();
+ m_jit->m_calls.append(CallRecord(call, m_jit->m_bytecodeIndex, m_stub));
+
+#if ENABLE(OPCODE_SAMPLING)
+ if (m_jit->m_bytecodeIndex != (unsigned)-1)
+ m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeIndex, false);
+#endif
+
+ m_jit->killLastResultRegister();
+ return call;
+ }
+
+ JIT::Call call(unsigned dst) // dst is a virtual register.
+ {
+ ASSERT(m_returnType == Value);
+ JIT::Call call = this->call();
+ m_jit->emitPutVirtualRegister(dst);
+ return call;
+ }
+
+ JIT::Call call(JIT::RegisterID dst)
+ {
+ ASSERT(m_returnType == Value);
+ JIT::Call call = this->call();
+ if (dst != JIT::returnValueRegister)
+ m_jit->move(JIT::returnValueRegister, dst);
+ return call;
+ }
+
+ private:
+ JIT* m_jit;
+ void* m_stub;
+ enum { Value, Void } m_returnType;
+ size_t m_argumentIndex;
+ };
+
+ class CallEvalJITStub : public JITStubCall {
+ public:
+ CallEvalJITStub(JIT* jit, Instruction* instruction)
+ : JITStubCall(jit, JITStubs::cti_op_call_eval)
+ {
+ int callee = instruction[2].u.operand;
+ int argCount = instruction[3].u.operand;
+ int registerOffset = instruction[4].u.operand;
+
+ addArgument(callee, JIT::regT2);
+ addArgument(JIT::Imm32(registerOffset));
+ addArgument(JIT::Imm32(argCount));
+ }
+ };
+}
+
+#endif // ENABLE(JIT)
+
+#endif // JITStubCall_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.cpp
new file mode 100644
index 0000000..0a5eb07
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.cpp
@@ -0,0 +1,2801 @@
+/*
+ * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITStubs.h"
+
+#if ENABLE(JIT)
+
+#include "Arguments.h"
+#include "CallFrame.h"
+#include "CodeBlock.h"
+#include "Collector.h"
+#include "Debugger.h"
+#include "ExceptionHelpers.h"
+#include "GlobalEvalFunction.h"
+#include "JIT.h"
+#include "JSActivation.h"
+#include "JSArray.h"
+#include "JSByteArray.h"
+#include "JSFunction.h"
+#include "JSNotAnObject.h"
+#include "JSPropertyNameIterator.h"
+#include "JSStaticScopeObject.h"
+#include "JSString.h"
+#include "ObjectPrototype.h"
+#include "Operations.h"
+#include "Parser.h"
+#include "Profiler.h"
+#include "RegExpObject.h"
+#include "RegExpPrototype.h"
+#include "Register.h"
+#include "SamplingTool.h"
+#include <stdio.h>
+
+#ifdef QT_BUILD_SCRIPT_LIB
+#include "bridge/qscriptobject_p.h"
+#endif
+
+using namespace std;
+
+namespace JSC {
+
+
+#if PLATFORM(DARWIN) || PLATFORM(WIN_OS)
+#define SYMBOL_STRING(name) "_" #name
+#else
+#define SYMBOL_STRING(name) #name
+#endif
+
+#if PLATFORM(DARWIN)
+ // Mach-O platform
+#define HIDE_SYMBOL(name) ".private_extern _" #name
+#elif PLATFORM(AIX)
+ // IBM's own file format
+#define HIDE_SYMBOL(name) ".lglobl " #name
+#elif PLATFORM(LINUX) || PLATFORM(FREEBSD) || PLATFORM(OPENBSD) || PLATFORM(SOLARIS) || (PLATFORM(HPUX) && PLATFORM(IA64)) || PLATFORM(SYMBIAN) || PLATFORM(NETBSD)
+ // ELF platform
+#define HIDE_SYMBOL(name) ".hidden " #name
+#else
+#define HIDE_SYMBOL(name)
+#endif
+
+#if COMPILER(GCC) && PLATFORM(X86)
+
+// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
+// need to change the assembly trampolines below to match.
+COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x38, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x30, JITStackFrame_code_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x1c, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
+
+asm volatile (
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+HIDE_SYMBOL(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "pushl %ebp" "\n"
+ "movl %esp, %ebp" "\n"
+ "pushl %esi" "\n"
+ "pushl %edi" "\n"
+ "pushl %ebx" "\n"
+ "subl $0x1c, %esp" "\n"
+ "movl $512, %esi" "\n"
+ "movl 0x38(%esp), %edi" "\n"
+ "call *0x30(%esp)" "\n"
+ "addl $0x1c, %esp" "\n"
+ "popl %ebx" "\n"
+ "popl %edi" "\n"
+ "popl %esi" "\n"
+ "popl %ebp" "\n"
+ "ret" "\n"
+);
+
+asm volatile (
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+#if !USE(JIT_STUB_ARGUMENT_VA_LIST)
+ "movl %esp, %ecx" "\n"
+#endif
+ "call " SYMBOL_STRING(cti_vm_throw) "\n"
+ "addl $0x1c, %esp" "\n"
+ "popl %ebx" "\n"
+ "popl %edi" "\n"
+ "popl %esi" "\n"
+ "popl %ebp" "\n"
+ "ret" "\n"
+);
+
+asm volatile (
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "addl $0x1c, %esp" "\n"
+ "popl %ebx" "\n"
+ "popl %edi" "\n"
+ "popl %esi" "\n"
+ "popl %ebp" "\n"
+ "ret" "\n"
+);
+
+#elif COMPILER(GCC) && PLATFORM(X86_64)
+
+#if USE(JIT_STUB_ARGUMENT_VA_LIST)
+#error "JIT_STUB_ARGUMENT_VA_LIST not supported on x86-64."
+#endif
+
+// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
+// need to change the assembly trampolines below to match.
+COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x90, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x80, JITStackFrame_code_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, savedRBX) == 0x48, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
+
+asm volatile (
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+HIDE_SYMBOL(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "pushq %rbp" "\n"
+ "movq %rsp, %rbp" "\n"
+ "pushq %r12" "\n"
+ "pushq %r13" "\n"
+ "pushq %r14" "\n"
+ "pushq %r15" "\n"
+ "pushq %rbx" "\n"
+ "subq $0x48, %rsp" "\n"
+ "movq $512, %r12" "\n"
+ "movq $0xFFFF000000000000, %r14" "\n"
+ "movq $0xFFFF000000000002, %r15" "\n"
+ "movq 0x90(%rsp), %r13" "\n"
+ "call *0x80(%rsp)" "\n"
+ "addq $0x48, %rsp" "\n"
+ "popq %rbx" "\n"
+ "popq %r15" "\n"
+ "popq %r14" "\n"
+ "popq %r13" "\n"
+ "popq %r12" "\n"
+ "popq %rbp" "\n"
+ "ret" "\n"
+);
+
+asm volatile (
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+ "movq %rsp, %rdi" "\n"
+ "call " SYMBOL_STRING(cti_vm_throw) "\n"
+ "addq $0x48, %rsp" "\n"
+ "popq %rbx" "\n"
+ "popq %r15" "\n"
+ "popq %r14" "\n"
+ "popq %r13" "\n"
+ "popq %r12" "\n"
+ "popq %rbp" "\n"
+ "ret" "\n"
+);
+
+asm volatile (
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "addq $0x48, %rsp" "\n"
+ "popq %rbx" "\n"
+ "popq %r15" "\n"
+ "popq %r14" "\n"
+ "popq %r13" "\n"
+ "popq %r12" "\n"
+ "popq %rbp" "\n"
+ "ret" "\n"
+);
+
+#elif COMPILER(GCC) && PLATFORM_ARM_ARCH(7)
+
+#if USE(JIT_STUB_ARGUMENT_VA_LIST)
+#error "JIT_STUB_ARGUMENT_VA_LIST not supported on ARMv7."
+#endif
+
+asm volatile (
+".text" "\n"
+".align 2" "\n"
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+HIDE_SYMBOL(ctiTrampoline) "\n"
+".thumb" "\n"
+".thumb_func " SYMBOL_STRING(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "sub sp, sp, #0x3c" "\n"
+ "str lr, [sp, #0x20]" "\n"
+ "str r4, [sp, #0x24]" "\n"
+ "str r5, [sp, #0x28]" "\n"
+ "str r6, [sp, #0x2c]" "\n"
+ "str r1, [sp, #0x30]" "\n"
+ "str r2, [sp, #0x34]" "\n"
+ "str r3, [sp, #0x38]" "\n"
+ "cpy r5, r2" "\n"
+ "mov r6, #512" "\n"
+ "blx r0" "\n"
+ "ldr r6, [sp, #0x2c]" "\n"
+ "ldr r5, [sp, #0x28]" "\n"
+ "ldr r4, [sp, #0x24]" "\n"
+ "ldr lr, [sp, #0x20]" "\n"
+ "add sp, sp, #0x3c" "\n"
+ "bx lr" "\n"
+);
+
+asm volatile (
+".text" "\n"
+".align 2" "\n"
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
+".thumb" "\n"
+".thumb_func " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+ "cpy r0, sp" "\n"
+ "bl " SYMBOL_STRING(cti_vm_throw) "\n"
+ "ldr r6, [sp, #0x2c]" "\n"
+ "ldr r5, [sp, #0x28]" "\n"
+ "ldr r4, [sp, #0x24]" "\n"
+ "ldr lr, [sp, #0x20]" "\n"
+ "add sp, sp, #0x3c" "\n"
+ "bx lr" "\n"
+);
+
+asm volatile (
+".text" "\n"
+".align 2" "\n"
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
+".thumb" "\n"
+".thumb_func " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "ldr r6, [sp, #0x2c]" "\n"
+ "ldr r5, [sp, #0x28]" "\n"
+ "ldr r4, [sp, #0x24]" "\n"
+ "ldr lr, [sp, #0x20]" "\n"
+ "add sp, sp, #0x3c" "\n"
+ "bx lr" "\n"
+);
+
+#elif COMPILER(MSVC)
+
+#if USE(JIT_STUB_ARGUMENT_VA_LIST)
+#error "JIT_STUB_ARGUMENT_VA_LIST configuration not supported on MSVC."
+#endif
+
+// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
+// need to change the assembly trampolines below to match.
+COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x38, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x30, JITStackFrame_code_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x1c, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
+
+extern "C" {
+
+ __declspec(naked) EncodedJSValue ctiTrampoline(void* code, RegisterFile*, CallFrame*, JSValue* exception, Profiler**, JSGlobalData*)
+ {
+ __asm {
+ push ebp;
+ mov ebp, esp;
+ push esi;
+ push edi;
+ push ebx;
+ sub esp, 0x1c;
+ mov esi, 512;
+ mov ecx, esp;
+ mov edi, [esp + 0x38];
+ call [esp + 0x30];
+ add esp, 0x1c;
+ pop ebx;
+ pop edi;
+ pop esi;
+ pop ebp;
+ ret;
+ }
+ }
+
+ __declspec(naked) void ctiVMThrowTrampoline()
+ {
+ __asm {
+ mov ecx, esp;
+ call JITStubs::cti_vm_throw;
+ add esp, 0x1c;
+ pop ebx;
+ pop edi;
+ pop esi;
+ pop ebp;
+ ret;
+ }
+ }
+
+ __declspec(naked) void ctiOpThrowNotCaught()
+ {
+ __asm {
+ add esp, 0x1c;
+ pop ebx;
+ pop edi;
+ pop esi;
+ pop ebp;
+ ret;
+ }
+ }
+}
+
+#endif
+
+#if ENABLE(OPCODE_SAMPLING)
+ #define CTI_SAMPLER stackFrame.globalData->interpreter->sampler()
+#else
+ #define CTI_SAMPLER 0
+#endif
+
+JITThunks::JITThunks(JSGlobalData* globalData)
+{
+ JIT::compileCTIMachineTrampolines(globalData, &m_executablePool, &m_ctiArrayLengthTrampoline, &m_ctiStringLengthTrampoline, &m_ctiVirtualCallPreLink, &m_ctiVirtualCallLink, &m_ctiVirtualCall, &m_ctiNativeCallThunk);
+
+#if PLATFORM_ARM_ARCH(7)
+ // Unfortunate the arm compiler does not like the use of offsetof on JITStackFrame (since it contains non POD types),
+ // and the OBJECT_OFFSETOF macro does not appear constantish enough for it to be happy with its use in COMPILE_ASSERT
+ // macros.
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedReturnAddress) == 0x20);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR4) == 0x24);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR5) == 0x28);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR6) == 0x2c);
+
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, registerFile) == 0x30);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, callFrame) == 0x34);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, exception) == 0x38);
+ // The fifth argument is the first item already on the stack.
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, enabledProfilerReference) == 0x3c);
+
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, thunkReturnAddress) == 0x1C);
+#endif
+}
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot& slot)
+{
+ // The interpreter checks for recursion here; I do not believe this can occur in CTI.
+
+ if (!baseValue.isCell())
+ return;
+
+ // Uncacheable: give up.
+ if (!slot.isCacheable()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(JITStubs::cti_op_put_by_id_generic));
+ return;
+ }
+
+ JSCell* baseCell = asCell(baseValue);
+ Structure* structure = baseCell->structure();
+
+ if (structure->isDictionary()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(JITStubs::cti_op_put_by_id_generic));
+ return;
+ }
+
+ // If baseCell != base, then baseCell must be a proxy for another object.
+ if (baseCell != slot.base()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(JITStubs::cti_op_put_by_id_generic));
+ return;
+ }
+
+ StructureStubInfo* stubInfo = &codeBlock->getStubInfo(returnAddress);
+
+ // Cache hit: Specialize instruction and ref Structures.
+
+ // Structure transition, cache transition info
+ if (slot.type() == PutPropertySlot::NewProperty) {
+ StructureChain* prototypeChain = structure->prototypeChain(callFrame);
+ if (!prototypeChain->isCacheable()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(JITStubs::cti_op_put_by_id_generic));
+ return;
+ }
+ stubInfo->initPutByIdTransition(structure->previousID(), structure, prototypeChain);
+ JIT::compilePutByIdTransition(callFrame->scopeChain()->globalData, codeBlock, stubInfo, structure->previousID(), structure, slot.cachedOffset(), prototypeChain, returnAddress);
+ return;
+ }
+
+ stubInfo->initPutByIdReplace(structure);
+
+ JIT::patchPutByIdReplace(codeBlock, stubInfo, structure, slot.cachedOffset(), returnAddress);
+}
+
+NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot)
+{
+ // FIXME: Write a test that proves we need to check for recursion here just
+ // like the interpreter does, then add a check for recursion.
+
+ // FIXME: Cache property access for immediates.
+ if (!baseValue.isCell()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_generic));
+ return;
+ }
+
+ JSGlobalData* globalData = &callFrame->globalData();
+
+ if (isJSArray(globalData, baseValue) && propertyName == callFrame->propertyNames().length) {
+ JIT::compilePatchGetArrayLength(callFrame->scopeChain()->globalData, codeBlock, returnAddress);
+ return;
+ }
+
+ if (isJSString(globalData, baseValue) && propertyName == callFrame->propertyNames().length) {
+ // The tradeoff of compiling an patched inline string length access routine does not seem
+ // to pay off, so we currently only do this for arrays.
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, globalData->jitStubs.ctiStringLengthTrampoline());
+ return;
+ }
+
+ // Uncacheable: give up.
+ if (!slot.isCacheable()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_generic));
+ return;
+ }
+
+ JSCell* baseCell = asCell(baseValue);
+ Structure* structure = baseCell->structure();
+
+ if (structure->isDictionary()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_generic));
+ return;
+ }
+
+ // In the interpreter the last structure is trapped here; in CTI we use the
+ // *_second method to achieve a similar (but not quite the same) effect.
+
+ StructureStubInfo* stubInfo = &codeBlock->getStubInfo(returnAddress);
+
+ // Cache hit: Specialize instruction and ref Structures.
+
+ if (slot.slotBase() == baseValue) {
+ // set this up, so derefStructures can do it's job.
+ stubInfo->initGetByIdSelf(structure);
+
+ JIT::patchGetByIdSelf(codeBlock, stubInfo, structure, slot.cachedOffset(), returnAddress);
+ return;
+ }
+
+ if (slot.slotBase() == structure->prototypeForLookup(callFrame)) {
+ ASSERT(slot.slotBase().isObject());
+
+ JSObject* slotBaseObject = asObject(slot.slotBase());
+
+ // Since we're accessing a prototype in a loop, it's a good bet that it
+ // should not be treated as a dictionary.
+ if (slotBaseObject->structure()->isDictionary())
+ slotBaseObject->setStructure(Structure::fromDictionaryTransition(slotBaseObject->structure()));
+
+ stubInfo->initGetByIdProto(structure, slotBaseObject->structure());
+
+ JIT::compileGetByIdProto(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, slotBaseObject->structure(), slot.cachedOffset(), returnAddress);
+ return;
+ }
+
+ size_t count = countPrototypeChainEntriesAndCheckForProxies(callFrame, baseValue, slot);
+ if (!count) {
+ stubInfo->opcodeID = op_get_by_id_generic;
+ return;
+ }
+
+ StructureChain* prototypeChain = structure->prototypeChain(callFrame);
+ if (!prototypeChain->isCacheable()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_generic));
+ return;
+ }
+ stubInfo->initGetByIdChain(structure, prototypeChain);
+ JIT::compileGetByIdChain(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, prototypeChain, count, slot.cachedOffset(), returnAddress);
+}
+
+#endif
+
+#if USE(JIT_STUB_ARGUMENT_VA_LIST)
+#define SETUP_VA_LISTL_ARGS va_list vl_args; va_start(vl_args, args)
+#else
+#define SETUP_VA_LISTL_ARGS
+#endif
+
+#ifndef NDEBUG
+
+extern "C" {
+
+static void jscGeneratedNativeCode()
+{
+ // When executing a JIT stub function (which might do an allocation), we hack the return address
+ // to pretend to be executing this function, to keep stack logging tools from blowing out
+ // memory.
+}
+
+}
+
+struct StackHack {
+ ALWAYS_INLINE StackHack(JITStackFrame& stackFrame)
+ : stackFrame(stackFrame)
+ , savedReturnAddress(*stackFrame.returnAddressSlot())
+ {
+ *stackFrame.returnAddressSlot() = ReturnAddressPtr(FunctionPtr(jscGeneratedNativeCode));
+ }
+
+ ALWAYS_INLINE ~StackHack()
+ {
+ *stackFrame.returnAddressSlot() = savedReturnAddress;
+ }
+
+ JITStackFrame& stackFrame;
+ ReturnAddressPtr savedReturnAddress;
+};
+
+#define STUB_INIT_STACK_FRAME(stackFrame) SETUP_VA_LISTL_ARGS; JITStackFrame& stackFrame = *reinterpret_cast<JITStackFrame*>(STUB_ARGS); StackHack stackHack(stackFrame)
+#define STUB_SET_RETURN_ADDRESS(returnAddress) stackHack.savedReturnAddress = ReturnAddressPtr(returnAddress)
+#define STUB_RETURN_ADDRESS stackHack.savedReturnAddress
+
+#else
+
+#define STUB_INIT_STACK_FRAME(stackFrame) SETUP_VA_LISTL_ARGS; JITStackFrame& stackFrame = *reinterpret_cast<JITStackFrame*>(STUB_ARGS)
+#define STUB_SET_RETURN_ADDRESS(returnAddress) *stackFrame.returnAddressSlot() = ReturnAddressPtr(returnAddress)
+#define STUB_RETURN_ADDRESS *stackFrame.returnAddressSlot()
+
+#endif
+
+// The reason this is not inlined is to avoid having to do a PIC branch
+// to get the address of the ctiVMThrowTrampoline function. It's also
+// good to keep the code size down by leaving as much of the exception
+// handling code out of line as possible.
+static NEVER_INLINE void returnToThrowTrampoline(JSGlobalData* globalData, ReturnAddressPtr exceptionLocation, ReturnAddressPtr& returnAddressSlot)
+{
+ ASSERT(globalData->exception);
+ globalData->exceptionLocation = exceptionLocation;
+ returnAddressSlot = ReturnAddressPtr(FunctionPtr(ctiVMThrowTrampoline));
+}
+
+static NEVER_INLINE void throwStackOverflowError(CallFrame* callFrame, JSGlobalData* globalData, ReturnAddressPtr exceptionLocation, ReturnAddressPtr& returnAddressSlot)
+{
+ globalData->exception = createStackOverflowError(callFrame);
+ returnToThrowTrampoline(globalData, exceptionLocation, returnAddressSlot);
+}
+
+#define VM_THROW_EXCEPTION() \
+ do { \
+ VM_THROW_EXCEPTION_AT_END(); \
+ return 0; \
+ } while (0)
+#define VM_THROW_EXCEPTION_AT_END() \
+ returnToThrowTrampoline(stackFrame.globalData, STUB_RETURN_ADDRESS, STUB_RETURN_ADDRESS)
+
+#define CHECK_FOR_EXCEPTION() \
+ do { \
+ if (UNLIKELY(stackFrame.globalData->exception != JSValue())) \
+ VM_THROW_EXCEPTION(); \
+ } while (0)
+#define CHECK_FOR_EXCEPTION_AT_END() \
+ do { \
+ if (UNLIKELY(stackFrame.globalData->exception != JSValue())) \
+ VM_THROW_EXCEPTION_AT_END(); \
+ } while (0)
+#define CHECK_FOR_EXCEPTION_VOID() \
+ do { \
+ if (UNLIKELY(stackFrame.globalData->exception != JSValue())) { \
+ VM_THROW_EXCEPTION_AT_END(); \
+ return; \
+ } \
+ } while (0)
+
+namespace JITStubs {
+
+#if PLATFORM_ARM_ARCH(7)
+
+#define DEFINE_STUB_FUNCTION(rtype, op) \
+ extern "C" { \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \
+ }; \
+ asm volatile ( \
+ ".text" "\n" \
+ ".align 2" "\n" \
+ ".globl " SYMBOL_STRING(cti_##op) "\n" \
+ HIDE_SYMBOL(cti_##op) "\n" \
+ ".thumb" "\n" \
+ ".thumb_func " SYMBOL_STRING(cti_##op) "\n" \
+ SYMBOL_STRING(cti_##op) ":" "\n" \
+ "str lr, [sp, #0x1c]" "\n" \
+ "bl " SYMBOL_STRING(JITStubThunked_##op) "\n" \
+ "ldr lr, [sp, #0x1c]" "\n" \
+ "bx lr" "\n" \
+ ); \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION) \
+
+#else
+#define DEFINE_STUB_FUNCTION(rtype, op) rtype JIT_STUB cti_##op(STUB_ARGS_DECLARATION)
+#endif
+
+DEFINE_STUB_FUNCTION(JSObject*, op_convert_this)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue v1 = stackFrame.args[0].jsValue();
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSObject* result = v1.toThisObject(callFrame);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+DEFINE_STUB_FUNCTION(void, op_end)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ ScopeChainNode* scopeChain = stackFrame.callFrame->scopeChain();
+ ASSERT(scopeChain->refCount > 1);
+ scopeChain->deref();
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_add)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue v1 = stackFrame.args[0].jsValue();
+ JSValue v2 = stackFrame.args[1].jsValue();
+
+ double left;
+ double right = 0.0;
+
+ bool rightIsNumber = v2.getNumber(right);
+ if (rightIsNumber && v1.getNumber(left))
+ return JSValue::encode(jsNumber(stackFrame.globalData, left + right));
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ bool leftIsString = v1.isString();
+ if (leftIsString && v2.isString()) {
+ RefPtr<UString::Rep> value = concatenate(asString(v1)->value().rep(), asString(v2)->value().rep());
+ if (UNLIKELY(!value)) {
+ throwOutOfMemoryError(callFrame);
+ VM_THROW_EXCEPTION();
+ }
+
+ return JSValue::encode(jsString(stackFrame.globalData, value.release()));
+ }
+
+ if (rightIsNumber & leftIsString) {
+ RefPtr<UString::Rep> value = v2.isInt32Fast() ?
+ concatenate(asString(v1)->value().rep(), v2.getInt32Fast()) :
+ concatenate(asString(v1)->value().rep(), right);
+
+ if (UNLIKELY(!value)) {
+ throwOutOfMemoryError(callFrame);
+ VM_THROW_EXCEPTION();
+ }
+ return JSValue::encode(jsString(stackFrame.globalData, value.release()));
+ }
+
+ // All other cases are pretty uncommon
+ JSValue result = jsAddSlowCase(callFrame, v1, v2);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_pre_inc)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue v = stackFrame.args[0].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(stackFrame.globalData, v.toNumber(callFrame) + 1);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(int, timeout_check)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSGlobalData* globalData = stackFrame.globalData;
+ TimeoutChecker* timeoutChecker = globalData->timeoutChecker;
+
+ if (timeoutChecker->didTimeOut(stackFrame.callFrame)) {
+ globalData->exception = createInterruptedExecutionException(globalData);
+ VM_THROW_EXCEPTION_AT_END();
+ }
+
+ return timeoutChecker->ticksUntilNextCheck();
+}
+
+DEFINE_STUB_FUNCTION(void, register_file_check)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ if (LIKELY(stackFrame.registerFile->grow(&stackFrame.callFrame->registers()[stackFrame.callFrame->codeBlock()->m_numCalleeRegisters])))
+ return;
+
+ // Rewind to the previous call frame because op_call already optimistically
+ // moved the call frame forward.
+ CallFrame* oldCallFrame = stackFrame.callFrame->callerFrame();
+ stackFrame.callFrame = oldCallFrame;
+ throwStackOverflowError(oldCallFrame, stackFrame.globalData, ReturnAddressPtr(oldCallFrame->returnPC()), STUB_RETURN_ADDRESS);
+}
+
+DEFINE_STUB_FUNCTION(int, op_loop_if_less)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ bool result = jsLess(callFrame, src1, src2);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+DEFINE_STUB_FUNCTION(int, op_loop_if_lesseq)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ bool result = jsLessEq(callFrame, src1, src2);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+DEFINE_STUB_FUNCTION(JSObject*, op_new_object)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return constructEmptyObject(stackFrame.callFrame);
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_by_id_generic)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ PutPropertySlot slot;
+ stackFrame.args[0].jsValue().put(stackFrame.callFrame, stackFrame.args[1].identifier(), stackFrame.args[2].jsValue(), slot);
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_generic)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(callFrame, ident, slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+DEFINE_STUB_FUNCTION(void, op_put_by_id)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ PutPropertySlot slot;
+ stackFrame.args[0].jsValue().put(callFrame, ident, stackFrame.args[2].jsValue(), slot);
+
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_put_by_id_second));
+
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_by_id_second)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ PutPropertySlot slot;
+ stackFrame.args[0].jsValue().put(stackFrame.callFrame, stackFrame.args[1].identifier(), stackFrame.args[2].jsValue(), slot);
+ JITThunks::tryCachePutByID(stackFrame.callFrame, stackFrame.callFrame->codeBlock(), STUB_RETURN_ADDRESS, stackFrame.args[0].jsValue(), slot);
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_by_id_fail)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ PutPropertySlot slot;
+ stackFrame.args[0].jsValue().put(callFrame, ident, stackFrame.args[2].jsValue(), slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_put_by_id_transition_realloc)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ int32_t oldSize = stackFrame.args[1].int32();
+ int32_t newSize = stackFrame.args[2].int32();
+
+ ASSERT(baseValue.isObject());
+ asObject(baseValue)->allocatePropertyStorage(oldSize, newSize);
+
+ return JSValue::encode(baseValue);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(callFrame, ident, slot);
+
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_second));
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(callFrame, ident, slot);
+
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_method_check_second));
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check_second)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(callFrame, ident, slot);
+
+ CHECK_FOR_EXCEPTION();
+
+ // If we successfully got something, then the base from which it is being accessed must
+ // be an object. (Assertion to ensure asObject() call below is safe, which comes after
+ // an isCacheable() chceck.
+ ASSERT(!slot.isCacheable() || slot.slotBase().isObject());
+
+ // Check that:
+ // * We're dealing with a JSCell,
+ // * the property is cachable,
+ // * it's not a dictionary
+ // * there is a function cached.
+ Structure* structure;
+ JSCell* specific;
+ JSObject* slotBaseObject;
+ if (baseValue.isCell()
+ && slot.isCacheable()
+ && !(structure = asCell(baseValue)->structure())->isDictionary()
+ && (slotBaseObject = asObject(slot.slotBase()))->getPropertySpecificValue(callFrame, ident, specific)
+ && specific
+ ) {
+
+ JSFunction* callee = (JSFunction*)specific;
+
+ // Since we're accessing a prototype in a loop, it's a good bet that it
+ // should not be treated as a dictionary.
+ if (slotBaseObject->structure()->isDictionary())
+ slotBaseObject->setStructure(Structure::fromDictionaryTransition(slotBaseObject->structure()));
+
+ // The result fetched should always be the callee!
+ ASSERT(result == JSValue(callee));
+ MethodCallLinkInfo& methodCallLinkInfo = callFrame->codeBlock()->getMethodCallLinkInfo(STUB_RETURN_ADDRESS);
+
+ // Check to see if the function is on the object's prototype. Patch up the code to optimize.
+ if (slot.slotBase() == structure->prototypeForLookup(callFrame))
+ JIT::patchMethodCallProto(callFrame->codeBlock(), methodCallLinkInfo, callee, structure, slotBaseObject);
+ // Check to see if the function is on the object itself.
+ // Since we generate the method-check to check both the structure and a prototype-structure (since this
+ // is the common case) we have a problem - we need to patch the prototype structure check to do something
+ // useful. We could try to nop it out altogether, but that's a little messy, so lets do something simpler
+ // for now. For now it performs a check on a special object on the global object only used for this
+ // purpose. The object is in no way exposed, and as such the check will always pass.
+ else if (slot.slotBase() == baseValue)
+ JIT::patchMethodCallProto(callFrame->codeBlock(), methodCallLinkInfo, callee, structure, callFrame->scopeChain()->globalObject()->methodCallDummy());
+
+ // For now let any other case be cached as a normal get_by_id.
+ }
+
+ // Revert the get_by_id op back to being a regular get_by_id - allow it to cache like normal, if it needs to.
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id));
+
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_second)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(callFrame, ident, slot);
+
+ JITThunks::tryCacheGetByID(callFrame, callFrame->codeBlock(), STUB_RETURN_ADDRESS, baseValue, ident, slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_self_fail)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(callFrame, ident, slot);
+
+ CHECK_FOR_EXCEPTION();
+
+ if (baseValue.isCell()
+ && slot.isCacheable()
+ && !asCell(baseValue)->structure()->isDictionary()
+ && slot.slotBase() == baseValue) {
+
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
+
+ ASSERT(slot.slotBase().isObject());
+
+ PolymorphicAccessStructureList* polymorphicStructureList;
+ int listIndex = 1;
+
+ if (stubInfo->opcodeID == op_get_by_id_self) {
+ ASSERT(!stubInfo->stubRoutine);
+ polymorphicStructureList = new PolymorphicAccessStructureList(CodeLocationLabel(), stubInfo->u.getByIdSelf.baseObjectStructure);
+ stubInfo->initGetByIdSelfList(polymorphicStructureList, 2);
+ } else {
+ polymorphicStructureList = stubInfo->u.getByIdSelfList.structureList;
+ listIndex = stubInfo->u.getByIdSelfList.listSize;
+ stubInfo->u.getByIdSelfList.listSize++;
+ }
+
+ JIT::compileGetByIdSelfList(callFrame->scopeChain()->globalData, codeBlock, stubInfo, polymorphicStructureList, listIndex, asCell(baseValue)->structure(), slot.cachedOffset());
+
+ if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_generic));
+ } else
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_generic));
+ return JSValue::encode(result);
+}
+
+static PolymorphicAccessStructureList* getPolymorphicAccessStructureListSlot(StructureStubInfo* stubInfo, int& listIndex)
+{
+ PolymorphicAccessStructureList* prototypeStructureList = 0;
+ listIndex = 1;
+
+ switch (stubInfo->opcodeID) {
+ case op_get_by_id_proto:
+ prototypeStructureList = new PolymorphicAccessStructureList(stubInfo->stubRoutine, stubInfo->u.getByIdProto.baseObjectStructure, stubInfo->u.getByIdProto.prototypeStructure);
+ stubInfo->stubRoutine = CodeLocationLabel();
+ stubInfo->initGetByIdProtoList(prototypeStructureList, 2);
+ break;
+ case op_get_by_id_chain:
+ prototypeStructureList = new PolymorphicAccessStructureList(stubInfo->stubRoutine, stubInfo->u.getByIdChain.baseObjectStructure, stubInfo->u.getByIdChain.chain);
+ stubInfo->stubRoutine = CodeLocationLabel();
+ stubInfo->initGetByIdProtoList(prototypeStructureList, 2);
+ break;
+ case op_get_by_id_proto_list:
+ prototypeStructureList = stubInfo->u.getByIdProtoList.structureList;
+ listIndex = stubInfo->u.getByIdProtoList.listSize;
+ stubInfo->u.getByIdProtoList.listSize++;
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+
+ ASSERT(listIndex < POLYMORPHIC_LIST_CACHE_SIZE);
+ return prototypeStructureList;
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(callFrame, stackFrame.args[1].identifier(), slot);
+
+ CHECK_FOR_EXCEPTION();
+
+ if (!baseValue.isCell() || !slot.isCacheable() || asCell(baseValue)->structure()->isDictionary()) {
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
+ return JSValue::encode(result);
+ }
+
+ Structure* structure = asCell(baseValue)->structure();
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
+
+ ASSERT(slot.slotBase().isObject());
+ JSObject* slotBaseObject = asObject(slot.slotBase());
+
+ if (slot.slotBase() == baseValue)
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
+ else if (slot.slotBase() == asCell(baseValue)->structure()->prototypeForLookup(callFrame)) {
+ // Since we're accessing a prototype in a loop, it's a good bet that it
+ // should not be treated as a dictionary.
+ if (slotBaseObject->structure()->isDictionary())
+ slotBaseObject->setStructure(Structure::fromDictionaryTransition(slotBaseObject->structure()));
+
+ int listIndex;
+ PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(stubInfo, listIndex);
+
+ JIT::compileGetByIdProtoList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, slotBaseObject->structure(), slot.cachedOffset());
+
+ if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full));
+ } else if (size_t count = countPrototypeChainEntriesAndCheckForProxies(callFrame, baseValue, slot)) {
+ StructureChain* protoChain = structure->prototypeChain(callFrame);
+ if (!protoChain->isCacheable()) {
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
+ return JSValue::encode(result);
+ }
+
+ int listIndex;
+ PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(stubInfo, listIndex);
+ JIT::compileGetByIdChainList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, protoChain, count, slot.cachedOffset());
+
+ if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full));
+ } else
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
+
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list_full)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(stackFrame.callFrame, stackFrame.args[1].identifier(), slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_fail)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(stackFrame.callFrame, stackFrame.args[1].identifier(), slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_array_fail)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(stackFrame.callFrame, stackFrame.args[1].identifier(), slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_string_fail)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(stackFrame.callFrame, stackFrame.args[1].identifier(), slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+#endif
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_instanceof)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue value = stackFrame.args[0].jsValue();
+ JSValue baseVal = stackFrame.args[1].jsValue();
+ JSValue proto = stackFrame.args[2].jsValue();
+
+ // At least one of these checks must have failed to get to the slow case.
+ ASSERT(!value.isCell() || !baseVal.isCell() || !proto.isCell()
+ || !value.isObject() || !baseVal.isObject() || !proto.isObject()
+ || (asObject(baseVal)->structure()->typeInfo().flags() & (ImplementsHasInstance | OverridesHasInstance)) != ImplementsHasInstance);
+
+
+ // ECMA-262 15.3.5.3:
+ // Throw an exception either if baseVal is not an object, or if it does not implement 'HasInstance' (i.e. is a function).
+ TypeInfo typeInfo(UnspecifiedType, 0);
+ if (!baseVal.isObject() || !(typeInfo = asObject(baseVal)->structure()->typeInfo()).implementsHasInstance()) {
+ CallFrame* callFrame = stackFrame.callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ stackFrame.globalData->exception = createInvalidParamError(callFrame, "instanceof", baseVal, vPCIndex, codeBlock);
+ VM_THROW_EXCEPTION();
+ }
+ ASSERT(typeInfo.type() != UnspecifiedType);
+
+ if (!typeInfo.overridesHasInstance()) {
+ if (!value.isObject())
+ return JSValue::encode(jsBoolean(false));
+
+ if (!proto.isObject()) {
+ throwError(callFrame, TypeError, "instanceof called on an object with an invalid prototype property.");
+ VM_THROW_EXCEPTION();
+ }
+ }
+
+ JSValue result = jsBoolean(asObject(baseVal)->hasInstance(callFrame, value, proto));
+ CHECK_FOR_EXCEPTION_AT_END();
+
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_del_by_id)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSObject* baseObj = stackFrame.args[0].jsValue().toObject(callFrame);
+
+ JSValue result = jsBoolean(baseObj->deleteProperty(callFrame, stackFrame.args[1].identifier()));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_mul)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+ double left;
+ double right;
+ if (src1.getNumber(left) && src2.getNumber(right))
+ return JSValue::encode(jsNumber(stackFrame.globalData, left * right));
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(stackFrame.globalData, src1.toNumber(callFrame) * src2.toNumber(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(JSObject*, op_new_func)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return stackFrame.args[0].funcDeclNode()->makeFunction(stackFrame.callFrame, stackFrame.callFrame->scopeChain());
+}
+
+DEFINE_STUB_FUNCTION(void*, op_call_JSFunction)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+#ifndef NDEBUG
+ CallData callData;
+ ASSERT(stackFrame.args[0].jsValue().getCallData(callData) == CallTypeJS);
+#endif
+
+ JSFunction* function = asFunction(stackFrame.args[0].jsValue());
+ ASSERT(!function->isHostFunction());
+ FunctionBodyNode* body = function->body();
+ ScopeChainNode* callDataScopeChain = function->scope().node();
+ body->jitCode(callDataScopeChain);
+
+ return &(body->generatedBytecode());
+}
+
+DEFINE_STUB_FUNCTION(VoidPtrPair, op_call_arityCheck)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ CodeBlock* newCodeBlock = stackFrame.args[3].codeBlock();
+ ASSERT(newCodeBlock->codeType() != NativeCode);
+ int argCount = stackFrame.args[2].int32();
+
+ ASSERT(argCount != newCodeBlock->m_numParameters);
+
+ CallFrame* oldCallFrame = callFrame->callerFrame();
+
+ if (argCount > newCodeBlock->m_numParameters) {
+ size_t numParameters = newCodeBlock->m_numParameters;
+ Register* r = callFrame->registers() + numParameters;
+
+ Register* argv = r - RegisterFile::CallFrameHeaderSize - numParameters - argCount;
+ for (size_t i = 0; i < numParameters; ++i)
+ argv[i + argCount] = argv[i];
+
+ callFrame = CallFrame::create(r);
+ callFrame->setCallerFrame(oldCallFrame);
+ } else {
+ size_t omittedArgCount = newCodeBlock->m_numParameters - argCount;
+ Register* r = callFrame->registers() + omittedArgCount;
+ Register* newEnd = r + newCodeBlock->m_numCalleeRegisters;
+ if (!stackFrame.registerFile->grow(newEnd)) {
+ // Rewind to the previous call frame because op_call already optimistically
+ // moved the call frame forward.
+ stackFrame.callFrame = oldCallFrame;
+ throwStackOverflowError(oldCallFrame, stackFrame.globalData, stackFrame.args[1].returnAddress(), STUB_RETURN_ADDRESS);
+ RETURN_POINTER_PAIR(0, 0);
+ }
+
+ Register* argv = r - RegisterFile::CallFrameHeaderSize - omittedArgCount;
+ for (size_t i = 0; i < omittedArgCount; ++i)
+ argv[i] = jsUndefined();
+
+ callFrame = CallFrame::create(r);
+ callFrame->setCallerFrame(oldCallFrame);
+ }
+
+ RETURN_POINTER_PAIR(newCodeBlock, callFrame);
+}
+
+DEFINE_STUB_FUNCTION(void*, vm_dontLazyLinkCall)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSGlobalData* globalData = stackFrame.globalData;
+ JSFunction* callee = asFunction(stackFrame.args[0].jsValue());
+
+ ctiPatchNearCallByReturnAddress(stackFrame.callFrame->callerFrame()->codeBlock(), stackFrame.args[1].returnAddress(), globalData->jitStubs.ctiVirtualCallLink());
+
+ return callee->body()->generatedJITCode().addressForCall().executableAddress();
+}
+
+DEFINE_STUB_FUNCTION(void*, vm_lazyLinkCall)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSFunction* callee = asFunction(stackFrame.args[0].jsValue());
+ JITCode& jitCode = callee->body()->generatedJITCode();
+
+ CodeBlock* codeBlock = 0;
+ if (!callee->isHostFunction())
+ codeBlock = &callee->body()->bytecode(callee->scope().node());
+ else
+ codeBlock = &callee->body()->generatedBytecode();
+
+ CallLinkInfo* callLinkInfo = &stackFrame.callFrame->callerFrame()->codeBlock()->getCallLinkInfo(stackFrame.args[1].returnAddress());
+ JIT::linkCall(callee, stackFrame.callFrame->callerFrame()->codeBlock(), codeBlock, jitCode, callLinkInfo, stackFrame.args[2].int32(), stackFrame.globalData);
+
+ return jitCode.addressForCall().executableAddress();
+}
+
+DEFINE_STUB_FUNCTION(JSObject*, op_push_activation)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSActivation* activation = new (stackFrame.globalData) JSActivation(stackFrame.callFrame, static_cast<FunctionBodyNode*>(stackFrame.callFrame->codeBlock()->ownerNode()));
+ stackFrame.callFrame->setScopeChain(stackFrame.callFrame->scopeChain()->copy()->push(activation));
+ return activation;
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_NotJSFunction)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue funcVal = stackFrame.args[0].jsValue();
+
+ CallData callData;
+ CallType callType = funcVal.getCallData(callData);
+
+ ASSERT(callType != CallTypeJS);
+
+ if (callType == CallTypeHost) {
+ int registerOffset = stackFrame.args[1].int32();
+ int argCount = stackFrame.args[2].int32();
+ CallFrame* previousCallFrame = stackFrame.callFrame;
+ CallFrame* callFrame = CallFrame::create(previousCallFrame->registers() + registerOffset);
+
+ callFrame->init(0, static_cast<Instruction*>((STUB_RETURN_ADDRESS).value()), previousCallFrame->scopeChain(), previousCallFrame, 0, argCount, 0);
+ stackFrame.callFrame = callFrame;
+
+ Register* argv = stackFrame.callFrame->registers() - RegisterFile::CallFrameHeaderSize - argCount;
+ ArgList argList(argv + 1, argCount - 1);
+
+ JSValue returnValue;
+ {
+ SamplingTool::HostCallRecord callRecord(CTI_SAMPLER);
+
+ // FIXME: All host methods should be calling toThisObject, but this is not presently the case.
+ JSValue thisValue = argv[0].jsValue();
+ if (thisValue == jsNull())
+ thisValue = callFrame->globalThisValue();
+
+ returnValue = callData.native.function(callFrame, asObject(funcVal), thisValue, argList);
+ }
+ stackFrame.callFrame = previousCallFrame;
+ CHECK_FOR_EXCEPTION();
+
+ return JSValue::encode(returnValue);
+ }
+
+ ASSERT(callType == CallTypeNone);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ stackFrame.globalData->exception = createNotAFunctionError(stackFrame.callFrame, funcVal, vPCIndex, codeBlock);
+ VM_THROW_EXCEPTION();
+}
+
+DEFINE_STUB_FUNCTION(void, op_create_arguments)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ Arguments* arguments = new (stackFrame.globalData) Arguments(stackFrame.callFrame);
+ stackFrame.callFrame->setCalleeArguments(arguments);
+ stackFrame.callFrame[RegisterFile::ArgumentsRegister] = arguments;
+}
+
+DEFINE_STUB_FUNCTION(void, op_create_arguments_no_params)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ Arguments* arguments = new (stackFrame.globalData) Arguments(stackFrame.callFrame, Arguments::NoParameters);
+ stackFrame.callFrame->setCalleeArguments(arguments);
+ stackFrame.callFrame[RegisterFile::ArgumentsRegister] = arguments;
+}
+
+DEFINE_STUB_FUNCTION(void, op_tear_off_activation)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ ASSERT(stackFrame.callFrame->codeBlock()->needsFullScopeChain());
+ asActivation(stackFrame.args[0].jsValue())->copyRegisters(stackFrame.callFrame->optionalCalleeArguments());
+}
+
+DEFINE_STUB_FUNCTION(void, op_tear_off_arguments)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ ASSERT(stackFrame.callFrame->codeBlock()->usesArguments() && !stackFrame.callFrame->codeBlock()->needsFullScopeChain());
+ if (stackFrame.callFrame->optionalCalleeArguments())
+ stackFrame.callFrame->optionalCalleeArguments()->copyRegisters();
+}
+
+DEFINE_STUB_FUNCTION(void, op_profile_will_call)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ ASSERT(*stackFrame.enabledProfilerReference);
+ (*stackFrame.enabledProfilerReference)->willExecute(stackFrame.callFrame, stackFrame.args[0].jsValue());
+}
+
+DEFINE_STUB_FUNCTION(void, op_profile_did_call)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ ASSERT(*stackFrame.enabledProfilerReference);
+ (*stackFrame.enabledProfilerReference)->didExecute(stackFrame.callFrame, stackFrame.args[0].jsValue());
+}
+
+DEFINE_STUB_FUNCTION(void, op_ret_scopeChain)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ ASSERT(stackFrame.callFrame->codeBlock()->needsFullScopeChain());
+ stackFrame.callFrame->scopeChain()->deref();
+}
+
+DEFINE_STUB_FUNCTION(JSObject*, op_new_array)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ ArgList argList(&stackFrame.callFrame->registers()[stackFrame.args[0].int32()], stackFrame.args[1].int32());
+ return constructArray(stackFrame.callFrame, argList);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ ScopeChainNode* scopeChain = callFrame->scopeChain();
+
+ ScopeChainIterator iter = scopeChain->begin();
+ ScopeChainIterator end = scopeChain->end();
+ ASSERT(iter != end);
+
+ Identifier& ident = stackFrame.args[0].identifier();
+ do {
+ JSObject* o = *iter;
+ PropertySlot slot(o);
+ if (o->getPropertySlot(callFrame, ident, slot)) {
+ JSValue result = slot.getValue(callFrame, ident);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+ }
+ } while (++iter != end);
+
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock);
+ VM_THROW_EXCEPTION();
+}
+
+DEFINE_STUB_FUNCTION(JSObject*, op_construct_JSConstruct)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSFunction* constructor = asFunction(stackFrame.args[0].jsValue());
+ if (constructor->isHostFunction()) {
+ CallFrame* callFrame = stackFrame.callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ stackFrame.globalData->exception = createNotAConstructorError(callFrame, constructor, vPCIndex, codeBlock);
+ VM_THROW_EXCEPTION();
+ }
+
+#ifndef NDEBUG
+ ConstructData constructData;
+ ASSERT(constructor->getConstructData(constructData) == ConstructTypeJS);
+#endif
+
+ Structure* structure;
+ if (stackFrame.args[3].jsValue().isObject())
+ structure = asObject(stackFrame.args[3].jsValue())->inheritorID();
+ else
+ structure = constructor->scope().node()->globalObject()->emptyObjectStructure();
+#ifdef QT_BUILD_SCRIPT_LIB
+ return new (stackFrame.globalData) QScriptObject(structure);
+#else
+ return new (stackFrame.globalData) JSObject(structure);
+#endif
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_construct_NotJSConstruct)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSValue constrVal = stackFrame.args[0].jsValue();
+ int argCount = stackFrame.args[2].int32();
+ int thisRegister = stackFrame.args[4].int32();
+
+ ConstructData constructData;
+ ConstructType constructType = constrVal.getConstructData(constructData);
+
+ if (constructType == ConstructTypeHost) {
+ ArgList argList(callFrame->registers() + thisRegister + 1, argCount - 1);
+
+ JSValue returnValue;
+ {
+ SamplingTool::HostCallRecord callRecord(CTI_SAMPLER);
+ returnValue = constructData.native.function(callFrame, asObject(constrVal), argList);
+ }
+ CHECK_FOR_EXCEPTION();
+
+ return JSValue::encode(returnValue);
+ }
+
+ ASSERT(constructType == ConstructTypeNone);
+
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ stackFrame.globalData->exception = createNotAConstructorError(callFrame, constrVal, vPCIndex, codeBlock);
+ VM_THROW_EXCEPTION();
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSGlobalData* globalData = stackFrame.globalData;
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ JSValue subscript = stackFrame.args[1].jsValue();
+
+ JSValue result;
+
+ if (LIKELY(subscript.isUInt32Fast())) {
+ uint32_t i = subscript.getUInt32Fast();
+ if (isJSArray(globalData, baseValue)) {
+ JSArray* jsArray = asArray(baseValue);
+ if (jsArray->canGetIndex(i))
+ result = jsArray->getIndex(i);
+ else
+ result = jsArray->JSArray::get(callFrame, i);
+ } else if (isJSString(globalData, baseValue) && asString(baseValue)->canGetIndex(i)) {
+ // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val_string));
+ result = asString(baseValue)->getIndex(stackFrame.globalData, i);
+ } else if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
+ // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val_byte_array));
+ return JSValue::encode(asByteArray(baseValue)->getIndex(callFrame, i));
+ } else
+ result = baseValue.get(callFrame, i);
+ } else {
+ Identifier property(callFrame, subscript.toString(callFrame));
+ result = baseValue.get(callFrame, property);
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val_string)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSGlobalData* globalData = stackFrame.globalData;
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ JSValue subscript = stackFrame.args[1].jsValue();
+
+ JSValue result;
+
+ if (LIKELY(subscript.isUInt32Fast())) {
+ uint32_t i = subscript.getUInt32Fast();
+ if (isJSString(globalData, baseValue) && asString(baseValue)->canGetIndex(i))
+ result = asString(baseValue)->getIndex(stackFrame.globalData, i);
+ else {
+ result = baseValue.get(callFrame, i);
+ if (!isJSString(globalData, baseValue))
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val));
+ }
+ } else {
+ Identifier property(callFrame, subscript.toString(callFrame));
+ result = baseValue.get(callFrame, property);
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val_byte_array)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSGlobalData* globalData = stackFrame.globalData;
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ JSValue subscript = stackFrame.args[1].jsValue();
+
+ JSValue result;
+
+ if (LIKELY(subscript.isUInt32Fast())) {
+ uint32_t i = subscript.getUInt32Fast();
+ if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
+ // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
+ return JSValue::encode(asByteArray(baseValue)->getIndex(callFrame, i));
+ }
+
+ result = baseValue.get(callFrame, i);
+ if (!isJSByteArray(globalData, baseValue))
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val));
+ } else {
+ Identifier property(callFrame, subscript.toString(callFrame));
+ result = baseValue.get(callFrame, property);
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_func)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ ScopeChainNode* scopeChain = callFrame->scopeChain();
+
+ ScopeChainIterator iter = scopeChain->begin();
+ ScopeChainIterator end = scopeChain->end();
+
+ // FIXME: add scopeDepthIsZero optimization
+
+ ASSERT(iter != end);
+
+ Identifier& ident = stackFrame.args[0].identifier();
+ JSObject* base;
+ do {
+ base = *iter;
+ PropertySlot slot(base);
+ if (base->getPropertySlot(callFrame, ident, slot)) {
+ // ECMA 11.2.3 says that if we hit an activation the this value should be null.
+ // However, section 10.2.3 says that in the case where the value provided
+ // by the caller is null, the global object should be used. It also says
+ // that the section does not apply to internal functions, but for simplicity
+ // of implementation we use the global object anyway here. This guarantees
+ // that in host objects you always get a valid object for this.
+ // We also handle wrapper substitution for the global object at the same time.
+ JSObject* thisObj = base->toThisObject(callFrame);
+ JSValue result = slot.getValue(callFrame, ident);
+ CHECK_FOR_EXCEPTION_AT_END();
+
+ callFrame->registers()[stackFrame.args[1].int32()] = JSValue(thisObj);
+ return JSValue::encode(result);
+ }
+ ++iter;
+ } while (iter != end);
+
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock);
+ VM_THROW_EXCEPTION_AT_END();
+ return JSValue::encode(JSValue());
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_sub)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+ double left;
+ double right;
+ if (src1.getNumber(left) && src2.getNumber(right))
+ return JSValue::encode(jsNumber(stackFrame.globalData, left - right));
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(stackFrame.globalData, src1.toNumber(callFrame) - src2.toNumber(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_by_val)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSGlobalData* globalData = stackFrame.globalData;
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ JSValue subscript = stackFrame.args[1].jsValue();
+ JSValue value = stackFrame.args[2].jsValue();
+
+ if (LIKELY(subscript.isUInt32Fast())) {
+ uint32_t i = subscript.getUInt32Fast();
+ if (isJSArray(globalData, baseValue)) {
+ JSArray* jsArray = asArray(baseValue);
+ if (jsArray->canSetIndex(i))
+ jsArray->setIndex(i, value);
+ else
+ jsArray->JSArray::put(callFrame, i, value);
+ } else if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
+ JSByteArray* jsByteArray = asByteArray(baseValue);
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_put_by_val_byte_array));
+ // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
+ if (value.isInt32Fast()) {
+ jsByteArray->setIndex(i, value.getInt32Fast());
+ return;
+ } else {
+ double dValue = 0;
+ if (value.getNumber(dValue)) {
+ jsByteArray->setIndex(i, dValue);
+ return;
+ }
+ }
+
+ baseValue.put(callFrame, i, value);
+ } else
+ baseValue.put(callFrame, i, value);
+ } else {
+ Identifier property(callFrame, subscript.toString(callFrame));
+ if (!stackFrame.globalData->exception) { // Don't put to an object if toString threw an exception.
+ PutPropertySlot slot;
+ baseValue.put(callFrame, property, value, slot);
+ }
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_by_val_array)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ int i = stackFrame.args[1].int32();
+ JSValue value = stackFrame.args[2].jsValue();
+
+ ASSERT(isJSArray(stackFrame.globalData, baseValue));
+
+ if (LIKELY(i >= 0))
+ asArray(baseValue)->JSArray::put(callFrame, i, value);
+ else {
+ // This should work since we're re-boxing an immediate unboxed in JIT code.
+ ASSERT(JSValue::makeInt32Fast(i));
+ Identifier property(callFrame, JSValue::makeInt32Fast(i).toString(callFrame));
+ // FIXME: can toString throw an exception here?
+ if (!stackFrame.globalData->exception) { // Don't put to an object if toString threw an exception.
+ PutPropertySlot slot;
+ baseValue.put(callFrame, property, value, slot);
+ }
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_by_val_byte_array)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSGlobalData* globalData = stackFrame.globalData;
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ JSValue subscript = stackFrame.args[1].jsValue();
+ JSValue value = stackFrame.args[2].jsValue();
+
+ if (LIKELY(subscript.isUInt32Fast())) {
+ uint32_t i = subscript.getUInt32Fast();
+ if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
+ JSByteArray* jsByteArray = asByteArray(baseValue);
+
+ // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
+ if (value.isInt32Fast()) {
+ jsByteArray->setIndex(i, value.getInt32Fast());
+ return;
+ } else {
+ double dValue = 0;
+ if (value.getNumber(dValue)) {
+ jsByteArray->setIndex(i, dValue);
+ return;
+ }
+ }
+ }
+
+ if (!isJSByteArray(globalData, baseValue))
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_put_by_val));
+ baseValue.put(callFrame, i, value);
+ } else {
+ Identifier property(callFrame, subscript.toString(callFrame));
+ if (!stackFrame.globalData->exception) { // Don't put to an object if toString threw an exception.
+ PutPropertySlot slot;
+ baseValue.put(callFrame, property, value, slot);
+ }
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_lesseq)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsBoolean(jsLessEq(callFrame, stackFrame.args[0].jsValue(), stackFrame.args[1].jsValue()));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(int, op_loop_if_true)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ bool result = src1.toBoolean(callFrame);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+DEFINE_STUB_FUNCTION(int, op_load_varargs)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+ RegisterFile* registerFile = stackFrame.registerFile;
+ int argsOffset = stackFrame.args[0].int32();
+ JSValue arguments = callFrame->registers()[argsOffset].jsValue();
+ uint32_t argCount = 0;
+ if (!arguments) {
+ int providedParams = callFrame->registers()[RegisterFile::ArgumentCount].i() - 1;
+ argCount = providedParams;
+ int32_t sizeDelta = argsOffset + argCount + RegisterFile::CallFrameHeaderSize;
+ Register* newEnd = callFrame->registers() + sizeDelta;
+ if (!registerFile->grow(newEnd) || ((newEnd - callFrame->registers()) != sizeDelta)) {
+ stackFrame.globalData->exception = createStackOverflowError(callFrame);
+ VM_THROW_EXCEPTION();
+ }
+ int32_t expectedParams = asFunction(callFrame->registers()[RegisterFile::Callee].jsValue())->body()->parameterCount();
+ int32_t inplaceArgs = min(providedParams, expectedParams);
+
+ Register* inplaceArgsDst = callFrame->registers() + argsOffset;
+
+ Register* inplaceArgsEnd = inplaceArgsDst + inplaceArgs;
+ Register* inplaceArgsEnd2 = inplaceArgsDst + providedParams;
+
+ Register* inplaceArgsSrc = callFrame->registers() - RegisterFile::CallFrameHeaderSize - expectedParams;
+ Register* inplaceArgsSrc2 = inplaceArgsSrc - providedParams - 1 + inplaceArgs;
+
+ // First step is to copy the "expected" parameters from their normal location relative to the callframe
+ while (inplaceArgsDst < inplaceArgsEnd)
+ *inplaceArgsDst++ = *inplaceArgsSrc++;
+
+ // Then we copy any additional arguments that may be further up the stack ('-1' to account for 'this')
+ while (inplaceArgsDst < inplaceArgsEnd2)
+ *inplaceArgsDst++ = *inplaceArgsSrc2++;
+
+ } else if (!arguments.isUndefinedOrNull()) {
+ if (!arguments.isObject()) {
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ stackFrame.globalData->exception = createInvalidParamError(callFrame, "Function.prototype.apply", arguments, vPCIndex, codeBlock);
+ VM_THROW_EXCEPTION();
+ }
+ if (asObject(arguments)->classInfo() == &Arguments::info) {
+ Arguments* argsObject = asArguments(arguments);
+ argCount = argsObject->numProvidedArguments(callFrame);
+ int32_t sizeDelta = argsOffset + argCount + RegisterFile::CallFrameHeaderSize;
+ Register* newEnd = callFrame->registers() + sizeDelta;
+ if (!registerFile->grow(newEnd) || ((newEnd - callFrame->registers()) != sizeDelta)) {
+ stackFrame.globalData->exception = createStackOverflowError(callFrame);
+ VM_THROW_EXCEPTION();
+ }
+ argsObject->copyToRegisters(callFrame, callFrame->registers() + argsOffset, argCount);
+ } else if (isJSArray(&callFrame->globalData(), arguments)) {
+ JSArray* array = asArray(arguments);
+ argCount = array->length();
+ int32_t sizeDelta = argsOffset + argCount + RegisterFile::CallFrameHeaderSize;
+ Register* newEnd = callFrame->registers() + sizeDelta;
+ if (!registerFile->grow(newEnd) || ((newEnd - callFrame->registers()) != sizeDelta)) {
+ stackFrame.globalData->exception = createStackOverflowError(callFrame);
+ VM_THROW_EXCEPTION();
+ }
+ array->copyToRegisters(callFrame, callFrame->registers() + argsOffset, argCount);
+ } else if (asObject(arguments)->inherits(&JSArray::info)) {
+ JSObject* argObject = asObject(arguments);
+ argCount = argObject->get(callFrame, callFrame->propertyNames().length).toUInt32(callFrame);
+ int32_t sizeDelta = argsOffset + argCount + RegisterFile::CallFrameHeaderSize;
+ Register* newEnd = callFrame->registers() + sizeDelta;
+ if (!registerFile->grow(newEnd) || ((newEnd - callFrame->registers()) != sizeDelta)) {
+ stackFrame.globalData->exception = createStackOverflowError(callFrame);
+ VM_THROW_EXCEPTION();
+ }
+ Register* argsBuffer = callFrame->registers() + argsOffset;
+ for (unsigned i = 0; i < argCount; ++i) {
+ argsBuffer[i] = asObject(arguments)->get(callFrame, i);
+ CHECK_FOR_EXCEPTION();
+ }
+ } else {
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ stackFrame.globalData->exception = createInvalidParamError(callFrame, "Function.prototype.apply", arguments, vPCIndex, codeBlock);
+ VM_THROW_EXCEPTION();
+ }
+ }
+
+ return argCount + 1;
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_negate)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src = stackFrame.args[0].jsValue();
+
+ double v;
+ if (src.getNumber(v))
+ return JSValue::encode(jsNumber(stackFrame.globalData, -v));
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(stackFrame.globalData, -src.toNumber(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_base)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return JSValue::encode(JSC::resolveBase(stackFrame.callFrame, stackFrame.args[0].identifier(), stackFrame.callFrame->scopeChain()));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_skip)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ ScopeChainNode* scopeChain = callFrame->scopeChain();
+
+ int skip = stackFrame.args[1].int32();
+
+ ScopeChainIterator iter = scopeChain->begin();
+ ScopeChainIterator end = scopeChain->end();
+ ASSERT(iter != end);
+ while (skip--) {
+ ++iter;
+ ASSERT(iter != end);
+ }
+ Identifier& ident = stackFrame.args[0].identifier();
+ do {
+ JSObject* o = *iter;
+ PropertySlot slot(o);
+ if (o->getPropertySlot(callFrame, ident, slot)) {
+ JSValue result = slot.getValue(callFrame, ident);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+ }
+ } while (++iter != end);
+
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock);
+ VM_THROW_EXCEPTION();
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_global)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSGlobalObject* globalObject = asGlobalObject(stackFrame.args[0].jsValue());
+ Identifier& ident = stackFrame.args[1].identifier();
+ unsigned globalResolveInfoIndex = stackFrame.args[2].int32();
+ ASSERT(globalObject->isGlobalObject());
+
+ PropertySlot slot(globalObject);
+ if (globalObject->getPropertySlot(callFrame, ident, slot)) {
+ JSValue result = slot.getValue(callFrame, ident);
+ if (slot.isCacheable() && !globalObject->structure()->isDictionary() && slot.slotBase() == globalObject) {
+ GlobalResolveInfo& globalResolveInfo = callFrame->codeBlock()->globalResolveInfo(globalResolveInfoIndex);
+ if (globalResolveInfo.structure)
+ globalResolveInfo.structure->deref();
+ globalObject->structure()->ref();
+ globalResolveInfo.structure = globalObject->structure();
+ globalResolveInfo.offset = slot.cachedOffset();
+ return JSValue::encode(result);
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+ }
+
+ unsigned vPCIndex = callFrame->codeBlock()->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, callFrame->codeBlock());
+ VM_THROW_EXCEPTION();
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_div)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+ double left;
+ double right;
+ if (src1.getNumber(left) && src2.getNumber(right))
+ return JSValue::encode(jsNumber(stackFrame.globalData, left / right));
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(stackFrame.globalData, src1.toNumber(callFrame) / src2.toNumber(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_pre_dec)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue v = stackFrame.args[0].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(stackFrame.globalData, v.toNumber(callFrame) - 1);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(int, op_jless)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ bool result = jsLess(callFrame, src1, src2);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+DEFINE_STUB_FUNCTION(int, op_jlesseq)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ bool result = jsLessEq(callFrame, src1, src2);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_not)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src = stackFrame.args[0].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSValue result = jsBoolean(!src.toBoolean(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(int, op_jtrue)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ bool result = src1.toBoolean(callFrame);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_post_inc)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue v = stackFrame.args[0].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSValue number = v.toJSNumber(callFrame);
+ CHECK_FOR_EXCEPTION_AT_END();
+
+ callFrame->registers()[stackFrame.args[1].int32()] = jsNumber(stackFrame.globalData, number.uncheckedGetNumber() + 1);
+ return JSValue::encode(number);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_eq)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ ASSERT(!JSValue::areBothInt32Fast(src1, src2));
+ JSValue result = jsBoolean(JSValue::equalSlowCaseInline(callFrame, src1, src2));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_lshift)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue val = stackFrame.args[0].jsValue();
+ JSValue shift = stackFrame.args[1].jsValue();
+
+ int32_t left;
+ uint32_t right;
+ if (JSValue::areBothInt32Fast(val, shift))
+ return JSValue::encode(jsNumber(stackFrame.globalData, val.getInt32Fast() << (shift.getInt32Fast() & 0x1f)));
+ if (val.numberToInt32(left) && shift.numberToUInt32(right))
+ return JSValue::encode(jsNumber(stackFrame.globalData, left << (right & 0x1f)));
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(stackFrame.globalData, (val.toInt32(callFrame)) << (shift.toUInt32(callFrame) & 0x1f));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitand)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+ int32_t left;
+ int32_t right;
+ if (src1.numberToInt32(left) && src2.numberToInt32(right))
+ return JSValue::encode(jsNumber(stackFrame.globalData, left & right));
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(stackFrame.globalData, src1.toInt32(callFrame) & src2.toInt32(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_rshift)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue val = stackFrame.args[0].jsValue();
+ JSValue shift = stackFrame.args[1].jsValue();
+
+ int32_t left;
+ uint32_t right;
+ if (JSFastMath::canDoFastRshift(val, shift))
+ return JSValue::encode(JSFastMath::rightShiftImmediateNumbers(val, shift));
+ if (val.numberToInt32(left) && shift.numberToUInt32(right))
+ return JSValue::encode(jsNumber(stackFrame.globalData, left >> (right & 0x1f)));
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(stackFrame.globalData, (val.toInt32(callFrame)) >> (shift.toUInt32(callFrame) & 0x1f));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitnot)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src = stackFrame.args[0].jsValue();
+
+ int value;
+ if (src.numberToInt32(value))
+ return JSValue::encode(jsNumber(stackFrame.globalData, ~value));
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(stackFrame.globalData, ~src.toInt32(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_with_base)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ ScopeChainNode* scopeChain = callFrame->scopeChain();
+
+ ScopeChainIterator iter = scopeChain->begin();
+ ScopeChainIterator end = scopeChain->end();
+
+ // FIXME: add scopeDepthIsZero optimization
+
+ ASSERT(iter != end);
+
+ Identifier& ident = stackFrame.args[0].identifier();
+ JSObject* base;
+ do {
+ base = *iter;
+ PropertySlot slot(base);
+ if (base->getPropertySlot(callFrame, ident, slot)) {
+ JSValue result = slot.getValue(callFrame, ident);
+ CHECK_FOR_EXCEPTION_AT_END();
+
+ callFrame->registers()[stackFrame.args[1].int32()] = JSValue(base);
+ return JSValue::encode(result);
+ }
+ ++iter;
+ } while (iter != end);
+
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock);
+ VM_THROW_EXCEPTION_AT_END();
+ return JSValue::encode(JSValue());
+}
+
+DEFINE_STUB_FUNCTION(JSObject*, op_new_func_exp)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return stackFrame.args[0].funcExprNode()->makeFunction(stackFrame.callFrame, stackFrame.callFrame->scopeChain());
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_mod)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue dividendValue = stackFrame.args[0].jsValue();
+ JSValue divisorValue = stackFrame.args[1].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ double d = dividendValue.toNumber(callFrame);
+ JSValue result = jsNumber(stackFrame.globalData, fmod(d, divisorValue.toNumber(callFrame)));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_less)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsBoolean(jsLess(callFrame, stackFrame.args[0].jsValue(), stackFrame.args[1].jsValue()));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_neq)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+ ASSERT(!JSValue::areBothInt32Fast(src1, src2));
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsBoolean(!JSValue::equalSlowCaseInline(callFrame, src1, src2));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_post_dec)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue v = stackFrame.args[0].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSValue number = v.toJSNumber(callFrame);
+ CHECK_FOR_EXCEPTION_AT_END();
+
+ callFrame->registers()[stackFrame.args[1].int32()] = jsNumber(stackFrame.globalData, number.uncheckedGetNumber() - 1);
+ return JSValue::encode(number);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_urshift)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue val = stackFrame.args[0].jsValue();
+ JSValue shift = stackFrame.args[1].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ if (JSFastMath::canDoFastUrshift(val, shift))
+ return JSValue::encode(JSFastMath::rightShiftImmediateNumbers(val, shift));
+ else {
+ JSValue result = jsNumber(stackFrame.globalData, (val.toUInt32(callFrame)) >> (shift.toUInt32(callFrame) & 0x1f));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+ }
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitxor)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSValue result = jsNumber(stackFrame.globalData, src1.toInt32(callFrame) ^ src2.toInt32(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(JSObject*, op_new_regexp)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return new (stackFrame.globalData) RegExpObject(stackFrame.callFrame->lexicalGlobalObject()->regExpStructure(), stackFrame.args[0].regExp());
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitor)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSValue result = jsNumber(stackFrame.globalData, src1.toInt32(callFrame) | src2.toInt32(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_eval)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ RegisterFile* registerFile = stackFrame.registerFile;
+
+ Interpreter* interpreter = stackFrame.globalData->interpreter;
+
+ JSValue funcVal = stackFrame.args[0].jsValue();
+ int registerOffset = stackFrame.args[1].int32();
+ int argCount = stackFrame.args[2].int32();
+
+ Register* newCallFrame = callFrame->registers() + registerOffset;
+ Register* argv = newCallFrame - RegisterFile::CallFrameHeaderSize - argCount;
+ JSValue thisValue = argv[0].jsValue();
+ JSGlobalObject* globalObject = callFrame->scopeChain()->globalObject();
+
+ if (thisValue == globalObject && funcVal == globalObject->evalFunction()) {
+ JSValue exceptionValue;
+ JSValue result = interpreter->callEval(callFrame, registerFile, argv, argCount, registerOffset, exceptionValue);
+ if (UNLIKELY(exceptionValue != JSValue())) {
+ stackFrame.globalData->exception = exceptionValue;
+ VM_THROW_EXCEPTION_AT_END();
+ }
+ return JSValue::encode(result);
+ }
+
+ return JSValue::encode(JSValue());
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_throw)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+
+ unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+
+ JSValue exceptionValue = stackFrame.args[0].jsValue();
+ ASSERT(exceptionValue);
+
+ HandlerInfo* handler = stackFrame.globalData->interpreter->throwException(callFrame, exceptionValue, vPCIndex, true);
+
+ if (!handler) {
+ *stackFrame.exception = exceptionValue;
+ STUB_SET_RETURN_ADDRESS(reinterpret_cast<void*>(ctiOpThrowNotCaught));
+ return JSValue::encode(jsNull());
+ }
+
+ stackFrame.callFrame = callFrame;
+ void* catchRoutine = handler->nativeCode.executableAddress();
+ ASSERT(catchRoutine);
+ STUB_SET_RETURN_ADDRESS(catchRoutine);
+ return JSValue::encode(exceptionValue);
+}
+
+DEFINE_STUB_FUNCTION(JSPropertyNameIterator*, op_get_pnames)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return JSPropertyNameIterator::create(stackFrame.callFrame, stackFrame.args[0].jsValue());
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_next_pname)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSPropertyNameIterator* it = stackFrame.args[0].propertyNameIterator();
+ JSValue temp = it->next(stackFrame.callFrame);
+ if (!temp)
+ it->invalidate();
+ return JSValue::encode(temp);
+}
+
+DEFINE_STUB_FUNCTION(JSObject*, op_push_scope)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSObject* o = stackFrame.args[0].jsValue().toObject(stackFrame.callFrame);
+ CHECK_FOR_EXCEPTION();
+ stackFrame.callFrame->setScopeChain(stackFrame.callFrame->scopeChain()->push(o));
+ return o;
+}
+
+DEFINE_STUB_FUNCTION(void, op_pop_scope)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ stackFrame.callFrame->setScopeChain(stackFrame.callFrame->scopeChain()->pop());
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_typeof)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return JSValue::encode(jsTypeStringForValue(stackFrame.callFrame, stackFrame.args[0].jsValue()));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_undefined)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue v = stackFrame.args[0].jsValue();
+ return JSValue::encode(jsBoolean(v.isCell() ? v.asCell()->structure()->typeInfo().masqueradesAsUndefined() : v.isUndefined()));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_boolean)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return JSValue::encode(jsBoolean(stackFrame.args[0].jsValue().isBoolean()));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_number)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return JSValue::encode(jsBoolean(stackFrame.args[0].jsValue().isNumber()));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_string)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return JSValue::encode(jsBoolean(isJSString(stackFrame.globalData, stackFrame.args[0].jsValue())));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_object)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return JSValue::encode(jsBoolean(jsIsObjectType(stackFrame.args[0].jsValue())));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_function)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return JSValue::encode(jsBoolean(jsIsFunctionType(stackFrame.args[0].jsValue())));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_stricteq)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+ return JSValue::encode(jsBoolean(JSValue::strictEqual(src1, src2)));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_to_primitive)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return JSValue::encode(stackFrame.args[0].jsValue().toPrimitive(stackFrame.callFrame));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_strcat)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return JSValue::encode(concatenateStrings(stackFrame.callFrame, &stackFrame.callFrame->registers()[stackFrame.args[0].int32()], stackFrame.args[1].int32()));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_nstricteq)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+ return JSValue::encode(jsBoolean(!JSValue::strictEqual(src1, src2)));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_to_jsnumber)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src = stackFrame.args[0].jsValue();
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSValue result = src.toJSNumber(callFrame);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_in)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue baseVal = stackFrame.args[1].jsValue();
+
+ if (!baseVal.isObject()) {
+ CallFrame* callFrame = stackFrame.callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ stackFrame.globalData->exception = createInvalidParamError(callFrame, "in", baseVal, vPCIndex, codeBlock);
+ VM_THROW_EXCEPTION();
+ }
+
+ JSValue propName = stackFrame.args[0].jsValue();
+ JSObject* baseObj = asObject(baseVal);
+
+ uint32_t i;
+ if (propName.getUInt32(i))
+ return JSValue::encode(jsBoolean(baseObj->hasProperty(callFrame, i)));
+
+ Identifier property(callFrame, propName.toString(callFrame));
+ CHECK_FOR_EXCEPTION();
+ return JSValue::encode(jsBoolean(baseObj->hasProperty(callFrame, property)));
+}
+
+DEFINE_STUB_FUNCTION(JSObject*, op_push_new_scope)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSObject* scope = new (stackFrame.globalData) JSStaticScopeObject(stackFrame.callFrame, stackFrame.args[0].identifier(), stackFrame.args[1].jsValue(), DontDelete);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ callFrame->setScopeChain(callFrame->scopeChain()->push(scope));
+ return scope;
+}
+
+DEFINE_STUB_FUNCTION(void, op_jmp_scopes)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ unsigned count = stackFrame.args[0].int32();
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ ScopeChainNode* tmp = callFrame->scopeChain();
+ while (count--)
+ tmp = tmp->pop();
+ callFrame->setScopeChain(tmp);
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_by_index)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ unsigned property = stackFrame.args[1].int32();
+
+ stackFrame.args[0].jsValue().put(callFrame, property, stackFrame.args[2].jsValue());
+}
+
+DEFINE_STUB_FUNCTION(void*, op_switch_imm)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue scrutinee = stackFrame.args[0].jsValue();
+ unsigned tableIndex = stackFrame.args[1].int32();
+ CallFrame* callFrame = stackFrame.callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+
+ if (scrutinee.isInt32Fast())
+ return codeBlock->immediateSwitchJumpTable(tableIndex).ctiForValue(scrutinee.getInt32Fast()).executableAddress();
+ else {
+ double value;
+ int32_t intValue;
+ if (scrutinee.getNumber(value) && ((intValue = static_cast<int32_t>(value)) == value))
+ return codeBlock->immediateSwitchJumpTable(tableIndex).ctiForValue(intValue).executableAddress();
+ else
+ return codeBlock->immediateSwitchJumpTable(tableIndex).ctiDefault.executableAddress();
+ }
+}
+
+DEFINE_STUB_FUNCTION(void*, op_switch_char)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue scrutinee = stackFrame.args[0].jsValue();
+ unsigned tableIndex = stackFrame.args[1].int32();
+ CallFrame* callFrame = stackFrame.callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+
+ void* result = codeBlock->characterSwitchJumpTable(tableIndex).ctiDefault.executableAddress();
+
+ if (scrutinee.isString()) {
+ UString::Rep* value = asString(scrutinee)->value().rep();
+ if (value->size() == 1)
+ result = codeBlock->characterSwitchJumpTable(tableIndex).ctiForValue(value->data()[0]).executableAddress();
+ }
+
+ return result;
+}
+
+DEFINE_STUB_FUNCTION(void*, op_switch_string)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue scrutinee = stackFrame.args[0].jsValue();
+ unsigned tableIndex = stackFrame.args[1].int32();
+ CallFrame* callFrame = stackFrame.callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+
+ void* result = codeBlock->stringSwitchJumpTable(tableIndex).ctiDefault.executableAddress();
+
+ if (scrutinee.isString()) {
+ UString::Rep* value = asString(scrutinee)->value().rep();
+ result = codeBlock->stringSwitchJumpTable(tableIndex).ctiForValue(value).executableAddress();
+ }
+
+ return result;
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_del_by_val)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ JSObject* baseObj = baseValue.toObject(callFrame); // may throw
+
+ JSValue subscript = stackFrame.args[1].jsValue();
+ JSValue result;
+ uint32_t i;
+ if (subscript.getUInt32(i))
+ result = jsBoolean(baseObj->deleteProperty(callFrame, i));
+ else {
+ CHECK_FOR_EXCEPTION();
+ Identifier property(callFrame, subscript.toString(callFrame));
+ CHECK_FOR_EXCEPTION();
+ result = jsBoolean(baseObj->deleteProperty(callFrame, property));
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_getter)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ ASSERT(stackFrame.args[0].jsValue().isObject());
+ JSObject* baseObj = asObject(stackFrame.args[0].jsValue());
+ ASSERT(stackFrame.args[2].jsValue().isObject());
+ baseObj->defineGetter(callFrame, stackFrame.args[1].identifier(), asObject(stackFrame.args[2].jsValue()));
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_setter)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ ASSERT(stackFrame.args[0].jsValue().isObject());
+ JSObject* baseObj = asObject(stackFrame.args[0].jsValue());
+ ASSERT(stackFrame.args[2].jsValue().isObject());
+ baseObj->defineSetter(callFrame, stackFrame.args[1].identifier(), asObject(stackFrame.args[2].jsValue()));
+}
+
+DEFINE_STUB_FUNCTION(JSObject*, op_new_error)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ unsigned type = stackFrame.args[0].int32();
+ JSValue message = stackFrame.args[1].jsValue();
+ unsigned bytecodeOffset = stackFrame.args[2].int32();
+
+ unsigned lineNumber = codeBlock->lineNumberForBytecodeOffset(callFrame, bytecodeOffset);
+ return Error::create(callFrame, static_cast<ErrorType>(type), message.toString(callFrame), lineNumber, codeBlock->ownerNode()->sourceID(), codeBlock->ownerNode()->sourceURL());
+}
+
+DEFINE_STUB_FUNCTION(void, op_debug)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ int debugHookID = stackFrame.args[0].int32();
+ int firstLine = stackFrame.args[1].int32();
+ int lastLine = stackFrame.args[2].int32();
+ int column = stackFrame.args[3].int32();
+
+ stackFrame.globalData->interpreter->debug(callFrame, static_cast<DebugHookID>(debugHookID), firstLine, lastLine, column);
+}
+
+#ifdef QT_BUILD_SCRIPT_LIB
+DEFINE_STUB_FUNCTION(void, op_debug_catch)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+ if (JSC::Debugger* debugger = callFrame->lexicalGlobalObject()->debugger() ) {
+ JSValue exceptionValue = callFrame->r(stackFrame.args[0].int32()).jsValue();
+ DebuggerCallFrame debuggerCallFrame(callFrame, exceptionValue);
+ debugger->exceptionCatch(debuggerCallFrame, callFrame->codeBlock()->ownerNode()->sourceID());
+ }
+}
+
+DEFINE_STUB_FUNCTION(void, op_debug_return)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+ if (JSC::Debugger* debugger = callFrame->lexicalGlobalObject()->debugger() ) {
+ JSValue returnValue = callFrame->r(stackFrame.args[0].int32()).jsValue();
+ intptr_t sourceID = callFrame->codeBlock()->ownerNode()->sourceID();
+ debugger->functionExit(returnValue, sourceID);
+ }
+}
+
+#endif
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, vm_throw)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ JSGlobalData* globalData = stackFrame.globalData;
+
+ unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, globalData->exceptionLocation);
+
+ JSValue exceptionValue = globalData->exception;
+ ASSERT(exceptionValue);
+ globalData->exception = JSValue();
+
+ HandlerInfo* handler = globalData->interpreter->throwException(callFrame, exceptionValue, vPCIndex, false);
+
+ if (!handler) {
+ *stackFrame.exception = exceptionValue;
+ return JSValue::encode(jsNull());
+ }
+
+ stackFrame.callFrame = callFrame;
+ void* catchRoutine = handler->nativeCode.executableAddress();
+ ASSERT(catchRoutine);
+ STUB_SET_RETURN_ADDRESS(catchRoutine);
+ return JSValue::encode(exceptionValue);
+}
+
+} // namespace JITStubs
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.h b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.h
new file mode 100644
index 0000000..325c3fd
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.h
@@ -0,0 +1,346 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITStubs_h
+#define JITStubs_h
+
+#include <wtf/Platform.h>
+
+#include "MacroAssemblerCodeRef.h"
+#include "Register.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+ class CodeBlock;
+ class ExecutablePool;
+ class Identifier;
+ class JSGlobalData;
+ class JSGlobalData;
+ class JSObject;
+ class JSPropertyNameIterator;
+ class JSValue;
+ class JSValueEncodedAsPointer;
+ class Profiler;
+ class PropertySlot;
+ class PutPropertySlot;
+ class RegisterFile;
+ class FuncDeclNode;
+ class FuncExprNode;
+ class RegExp;
+
+ union JITStubArg {
+ void* asPointer;
+ EncodedJSValue asEncodedJSValue;
+ int32_t asInt32;
+
+ JSValue jsValue() { return JSValue::decode(asEncodedJSValue); }
+ Identifier& identifier() { return *static_cast<Identifier*>(asPointer); }
+ int32_t int32() { return asInt32; }
+ CodeBlock* codeBlock() { return static_cast<CodeBlock*>(asPointer); }
+ FuncDeclNode* funcDeclNode() { return static_cast<FuncDeclNode*>(asPointer); }
+ FuncExprNode* funcExprNode() { return static_cast<FuncExprNode*>(asPointer); }
+ RegExp* regExp() { return static_cast<RegExp*>(asPointer); }
+ JSPropertyNameIterator* propertyNameIterator() { return static_cast<JSPropertyNameIterator*>(asPointer); }
+ ReturnAddressPtr returnAddress() { return ReturnAddressPtr(asPointer); }
+ };
+
+#if PLATFORM(X86_64)
+ struct JITStackFrame {
+ JITStubArg padding; // Unused
+ JITStubArg args[8];
+
+ void* savedRBX;
+ void* savedR15;
+ void* savedR14;
+ void* savedR13;
+ void* savedR12;
+ void* savedRBP;
+ void* savedRIP;
+
+ void* code;
+ RegisterFile* registerFile;
+ CallFrame* callFrame;
+ JSValue* exception;
+ Profiler** enabledProfilerReference;
+ JSGlobalData* globalData;
+
+ // When JIT code makes a call, it pushes its return address just below the rest of the stack.
+ ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; }
+ };
+#elif PLATFORM(X86)
+ struct JITStackFrame {
+ JITStubArg padding; // Unused
+ JITStubArg args[6];
+
+ void* savedEBX;
+ void* savedEDI;
+ void* savedESI;
+ void* savedEBP;
+ void* savedEIP;
+
+ void* code;
+ RegisterFile* registerFile;
+ CallFrame* callFrame;
+ JSValue* exception;
+ Profiler** enabledProfilerReference;
+ JSGlobalData* globalData;
+
+ // When JIT code makes a call, it pushes its return address just below the rest of the stack.
+ ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; }
+ };
+#elif PLATFORM_ARM_ARCH(7)
+ struct JITStackFrame {
+ JITStubArg padding; // Unused
+ JITStubArg args[6];
+
+ ReturnAddressPtr thunkReturnAddress;
+
+ void* preservedReturnAddress;
+ void* preservedR4;
+ void* preservedR5;
+ void* preservedR6;
+
+ // These arguments passed in r1..r3 (r0 contained the entry code pointed, which is not preserved)
+ RegisterFile* registerFile;
+ CallFrame* callFrame;
+ JSValue* exception;
+
+ // These arguments passed on the stack.
+ Profiler** enabledProfilerReference;
+ JSGlobalData* globalData;
+
+ ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
+ };
+#else
+#error "JITStackFrame not defined for this platform."
+#endif
+
+#if USE(JIT_STUB_ARGUMENT_VA_LIST)
+ #define STUB_ARGS_DECLARATION void* args, ...
+ #define STUB_ARGS (reinterpret_cast<void**>(vl_args) - 1)
+
+ #if COMPILER(MSVC)
+ #define JIT_STUB __cdecl
+ #else
+ #define JIT_STUB
+ #endif
+#else
+ #define STUB_ARGS_DECLARATION void** args
+ #define STUB_ARGS (args)
+
+ #if PLATFORM(X86) && COMPILER(MSVC)
+ #define JIT_STUB __fastcall
+ #elif PLATFORM(X86) && COMPILER(GCC)
+ #define JIT_STUB __attribute__ ((fastcall))
+ #else
+ #define JIT_STUB
+ #endif
+#endif
+
+#if PLATFORM(X86_64)
+ struct VoidPtrPair {
+ void* first;
+ void* second;
+ };
+ #define RETURN_POINTER_PAIR(a,b) VoidPtrPair pair = { a, b }; return pair
+#else
+ // MSVC doesn't support returning a two-value struct in two registers, so
+ // we cast the struct to int64_t instead.
+ typedef uint64_t VoidPtrPair;
+ union VoidPtrPairUnion {
+ struct { void* first; void* second; } s;
+ VoidPtrPair i;
+ };
+ #define RETURN_POINTER_PAIR(a,b) VoidPtrPairUnion pair = {{ a, b }}; return pair.i
+#endif
+
+ extern "C" void ctiVMThrowTrampoline();
+ extern "C" void ctiOpThrowNotCaught();
+ extern "C" EncodedJSValue ctiTrampoline(
+#if PLATFORM(X86_64)
+ // FIXME: (bug #22910) this will force all arguments onto the stack (regparm(0) does not appear to have any effect).
+ // We can allow register passing here, and move the writes of these values into the trampoline.
+ void*, void*, void*, void*, void*, void*,
+#endif
+ void* code, RegisterFile*, CallFrame*, JSValue* exception, Profiler**, JSGlobalData*);
+
+ class JITThunks {
+ public:
+ JITThunks(JSGlobalData*);
+
+ static void tryCacheGetByID(CallFrame*, CodeBlock*, ReturnAddressPtr returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot&);
+ static void tryCachePutByID(CallFrame*, CodeBlock*, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot&);
+
+ MacroAssemblerCodePtr ctiArrayLengthTrampoline() { return m_ctiArrayLengthTrampoline; }
+ MacroAssemblerCodePtr ctiStringLengthTrampoline() { return m_ctiStringLengthTrampoline; }
+ MacroAssemblerCodePtr ctiVirtualCallPreLink() { return m_ctiVirtualCallPreLink; }
+ MacroAssemblerCodePtr ctiVirtualCallLink() { return m_ctiVirtualCallLink; }
+ MacroAssemblerCodePtr ctiVirtualCall() { return m_ctiVirtualCall; }
+ MacroAssemblerCodePtr ctiNativeCallThunk() { return m_ctiNativeCallThunk; }
+
+ private:
+ RefPtr<ExecutablePool> m_executablePool;
+
+ MacroAssemblerCodePtr m_ctiArrayLengthTrampoline;
+ MacroAssemblerCodePtr m_ctiStringLengthTrampoline;
+ MacroAssemblerCodePtr m_ctiVirtualCallPreLink;
+ MacroAssemblerCodePtr m_ctiVirtualCallLink;
+ MacroAssemblerCodePtr m_ctiVirtualCall;
+ MacroAssemblerCodePtr m_ctiNativeCallThunk;
+ };
+
+namespace JITStubs { extern "C" {
+
+ void JIT_STUB cti_op_create_arguments(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_create_arguments_no_params(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_debug(STUB_ARGS_DECLARATION);
+#ifdef QT_BUILD_SCRIPT_LIB
+ void JIT_STUB cti_op_debug_catch(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_debug_return(STUB_ARGS_DECLARATION);
+#endif
+ void JIT_STUB cti_op_end(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_jmp_scopes(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_pop_scope(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_profile_did_call(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_profile_will_call(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id_fail(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id_generic(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id_second(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_index(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_val(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_val_array(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_val_byte_array(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_getter(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_setter(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_ret_scopeChain(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_tear_off_activation(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_tear_off_arguments(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_register_file_check(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_jless(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_jlesseq(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_jtrue(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_load_varargs(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_loop_if_less(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_loop_if_lesseq(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_loop_if_true(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_timeout_check(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_call_JSFunction(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_switch_char(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_switch_imm(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_switch_string(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_vm_dontLazyLinkCall(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_vm_lazyLinkCall(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_construct_JSConstruct(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_convert_this(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_array(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_error(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_func(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_func_exp(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_object(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_regexp(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_push_activation(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_push_new_scope(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_push_scope(STUB_ARGS_DECLARATION);
+ JSPropertyNameIterator* JIT_STUB cti_op_get_pnames(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_add(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_bitand(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_bitnot(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_bitor(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_bitxor(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_call_NotJSFunction(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_call_eval(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_construct_NotJSConstruct(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_del_by_id(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_del_by_val(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_div(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_eq(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_method_check(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_method_check_second(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_array_fail(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_generic(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_proto_fail(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_proto_list(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_proto_list_full(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_second(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_self_fail(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_string_fail(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_val(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_val_byte_array(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_val_string(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_put_by_id_transition_realloc(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_in(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_instanceof(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_is_boolean(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_is_function(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_is_number(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_is_object(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_is_string(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_is_undefined(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_less(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_lesseq(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_lshift(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_mod(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_mul(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_negate(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_neq(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_next_pname(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_not(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_nstricteq(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_pre_dec(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_pre_inc(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve_base(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve_global(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve_skip(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_rshift(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_strcat(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_stricteq(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_sub(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_throw(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_to_jsnumber(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_to_primitive(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_typeof(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_urshift(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_vm_throw(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_post_dec(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_post_inc(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve_func(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve_with_base(STUB_ARGS_DECLARATION);
+ VoidPtrPair JIT_STUB cti_op_call_arityCheck(STUB_ARGS_DECLARATION);
+
+}; } // extern "C" namespace JITStubs
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // JITStubs_h