summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/webkit/Source/JavaScriptCore/jit
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/webkit/Source/JavaScriptCore/jit')
-rw-r--r--src/3rdparty/webkit/Source/JavaScriptCore/jit/ExecutableAllocator.cpp128
-rw-r--r--src/3rdparty/webkit/Source/JavaScriptCore/jit/ExecutableAllocator.h378
-rw-r--r--src/3rdparty/webkit/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp531
-rw-r--r--src/3rdparty/webkit/Source/JavaScriptCore/jit/JIT.cpp633
-rw-r--r--src/3rdparty/webkit/Source/JavaScriptCore/jit/JIT.h1045
-rw-r--r--src/3rdparty/webkit/Source/JavaScriptCore/jit/JITArithmetic.cpp1244
-rw-r--r--src/3rdparty/webkit/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp1424
-rw-r--r--src/3rdparty/webkit/Source/JavaScriptCore/jit/JITCall.cpp264
-rw-r--r--src/3rdparty/webkit/Source/JavaScriptCore/jit/JITCall32_64.cpp348
-rw-r--r--src/3rdparty/webkit/Source/JavaScriptCore/jit/JITCode.h117
-rw-r--r--src/3rdparty/webkit/Source/JavaScriptCore/jit/JITInlineMethods.h858
-rw-r--r--src/3rdparty/webkit/Source/JavaScriptCore/jit/JITOpcodes.cpp1750
-rw-r--r--src/3rdparty/webkit/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp1847
-rw-r--r--src/3rdparty/webkit/Source/JavaScriptCore/jit/JITPropertyAccess.cpp1038
-rw-r--r--src/3rdparty/webkit/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp1112
-rw-r--r--src/3rdparty/webkit/Source/JavaScriptCore/jit/JITStubCall.h237
-rw-r--r--src/3rdparty/webkit/Source/JavaScriptCore/jit/JITStubs.cpp3613
-rw-r--r--src/3rdparty/webkit/Source/JavaScriptCore/jit/JITStubs.h440
-rw-r--r--src/3rdparty/webkit/Source/JavaScriptCore/jit/JSInterfaceJIT.h352
-rw-r--r--src/3rdparty/webkit/Source/JavaScriptCore/jit/SpecializedThunkJIT.h165
-rw-r--r--src/3rdparty/webkit/Source/JavaScriptCore/jit/ThunkGenerators.cpp162
-rw-r--r--src/3rdparty/webkit/Source/JavaScriptCore/jit/ThunkGenerators.h45
22 files changed, 17731 insertions, 0 deletions
diff --git a/src/3rdparty/webkit/Source/JavaScriptCore/jit/ExecutableAllocator.cpp b/src/3rdparty/webkit/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
new file mode 100644
index 0000000..35531d9
--- /dev/null
+++ b/src/3rdparty/webkit/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#include "ExecutableAllocator.h"
+
+#if ENABLE(ASSEMBLER)
+
+namespace JSC {
+
+size_t ExecutableAllocator::pageSize = 0;
+
+#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
+
+void ExecutableAllocator::intializePageSize()
+{
+#if OS(SYMBIAN) && CPU(ARMV5_OR_LOWER)
+ // The moving memory model (as used in ARMv5 and earlier platforms)
+ // on Symbian OS limits the number of chunks for each process to 16.
+ // To mitigate this limitation increase the pagesize to allocate
+ // fewer, larger chunks. Set the page size to 256 Kb to compensate
+ // for moving memory model limitation
+ ExecutableAllocator::pageSize = 256 * 1024;
+#else
+ ExecutableAllocator::pageSize = WTF::pageSize();
+#endif
+}
+
+ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t size)
+{
+ PageAllocation allocation = PageAllocation::allocate(size, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
+ if (!allocation)
+ CRASH();
+ return allocation;
+}
+
+void ExecutablePool::systemRelease(ExecutablePool::Allocation& allocation)
+{
+ allocation.deallocate();
+}
+
+bool ExecutableAllocator::isValid() const
+{
+ return true;
+}
+
+bool ExecutableAllocator::underMemoryPressure()
+{
+ return false;
+}
+
+size_t ExecutableAllocator::committedByteCount()
+{
+ return 0;
+}
+
+#endif
+
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+
+#if OS(WINDOWS) || OS(SYMBIAN)
+#error "ASSEMBLER_WX_EXCLUSIVE not yet suported on this platform."
+#endif
+
+void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSetting setting)
+{
+ if (!pageSize)
+ intializePageSize();
+
+ // Calculate the start of the page containing this region,
+ // and account for this extra memory within size.
+ intptr_t startPtr = reinterpret_cast<intptr_t>(start);
+ intptr_t pageStartPtr = startPtr & ~(pageSize - 1);
+ void* pageStart = reinterpret_cast<void*>(pageStartPtr);
+ size += (startPtr - pageStartPtr);
+
+ // Round size up
+ size += (pageSize - 1);
+ size &= ~(pageSize - 1);
+
+ mprotect(pageStart, size, (setting == Writable) ? PROTECTION_FLAGS_RW : PROTECTION_FLAGS_RX);
+}
+
+#endif
+
+#if CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(RVCT)
+
+__asm void ExecutableAllocator::cacheFlush(void* code, size_t size)
+{
+ ARM
+ push {r7}
+ add r1, r1, r0
+ mov r7, #0xf0000
+ add r7, r7, #0x2
+ mov r2, #0x0
+ svc #0x0
+ pop {r7}
+ bx lr
+}
+
+#endif
+
+}
+
+#endif // HAVE(ASSEMBLER)
diff --git a/src/3rdparty/webkit/Source/JavaScriptCore/jit/ExecutableAllocator.h b/src/3rdparty/webkit/Source/JavaScriptCore/jit/ExecutableAllocator.h
new file mode 100644
index 0000000..77a2567
--- /dev/null
+++ b/src/3rdparty/webkit/Source/JavaScriptCore/jit/ExecutableAllocator.h
@@ -0,0 +1,378 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ExecutableAllocator_h
+#define ExecutableAllocator_h
+#include <stddef.h> // for ptrdiff_t
+#include <limits>
+#include <wtf/Assertions.h>
+#include <wtf/PageAllocation.h>
+#include <wtf/PassRefPtr.h>
+#include <wtf/RefCounted.h>
+#include <wtf/UnusedParam.h>
+#include <wtf/Vector.h>
+
+#if OS(IOS)
+#include <libkern/OSCacheControl.h>
+#include <sys/mman.h>
+#endif
+
+#if OS(SYMBIAN)
+#include <e32std.h>
+#endif
+
+#if CPU(MIPS) && OS(LINUX)
+#include <sys/cachectl.h>
+#endif
+
+#if CPU(SH4) && OS(LINUX)
+#include <asm/cachectl.h>
+#include <asm/unistd.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+#endif
+
+#if OS(WINCE)
+// From pkfuncs.h (private header file from the Platform Builder)
+#define CACHE_SYNC_ALL 0x07F
+extern "C" __declspec(dllimport) void CacheRangeFlush(LPVOID pAddr, DWORD dwLength, DWORD dwFlags);
+#endif
+
+#if PLATFORM(BREWMP)
+#include <AEEIMemCache1.h>
+#include <AEEMemCache1.bid>
+#include <wtf/brew/RefPtrBrew.h>
+#endif
+
+#define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize)
+#define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4)
+
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+#define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE)
+#define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC)
+#define EXECUTABLE_POOL_WRITABLE false
+#else
+#define EXECUTABLE_POOL_WRITABLE true
+#endif
+
+namespace JSC {
+
+inline size_t roundUpAllocationSize(size_t request, size_t granularity)
+{
+ if ((std::numeric_limits<size_t>::max() - granularity) <= request)
+ CRASH(); // Allocation is too large
+
+ // Round up to next page boundary
+ size_t size = request + (granularity - 1);
+ size = size & ~(granularity - 1);
+ ASSERT(size >= request);
+ return size;
+}
+
+}
+
+#if ENABLE(JIT) && ENABLE(ASSEMBLER)
+
+namespace JSC {
+
+class ExecutablePool : public RefCounted<ExecutablePool> {
+public:
+#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
+ typedef PageAllocation Allocation;
+#else
+ class Allocation {
+ public:
+ Allocation(void* base, size_t size)
+ : m_base(base)
+ , m_size(size)
+ {
+ }
+ void* base() { return m_base; }
+ size_t size() { return m_size; }
+ bool operator!() const { return !m_base; }
+
+ private:
+ void* m_base;
+ size_t m_size;
+ };
+#endif
+ typedef Vector<Allocation, 2> AllocationList;
+
+ static PassRefPtr<ExecutablePool> create(size_t n)
+ {
+ return adoptRef(new ExecutablePool(n));
+ }
+
+ void* alloc(size_t n)
+ {
+ ASSERT(m_freePtr <= m_end);
+
+ // Round 'n' up to a multiple of word size; if all allocations are of
+ // word sized quantities, then all subsequent allocations will be aligned.
+ n = roundUpAllocationSize(n, sizeof(void*));
+
+ if (static_cast<ptrdiff_t>(n) < (m_end - m_freePtr)) {
+ void* result = m_freePtr;
+ m_freePtr += n;
+ return result;
+ }
+
+ // Insufficient space to allocate in the existing pool
+ // so we need allocate into a new pool
+ return poolAllocate(n);
+ }
+
+ void tryShrink(void* allocation, size_t oldSize, size_t newSize)
+ {
+ if (static_cast<char*>(allocation) + oldSize != m_freePtr)
+ return;
+ m_freePtr = static_cast<char*>(allocation) + roundUpAllocationSize(newSize, sizeof(void*));
+ }
+
+ ~ExecutablePool()
+ {
+ AllocationList::iterator end = m_pools.end();
+ for (AllocationList::iterator ptr = m_pools.begin(); ptr != end; ++ptr)
+ ExecutablePool::systemRelease(*ptr);
+ }
+
+ size_t available() const { return (m_pools.size() > 1) ? 0 : m_end - m_freePtr; }
+
+private:
+ static Allocation systemAlloc(size_t n);
+ static void systemRelease(Allocation& alloc);
+
+ ExecutablePool(size_t n);
+
+ void* poolAllocate(size_t n);
+
+ char* m_freePtr;
+ char* m_end;
+ AllocationList m_pools;
+};
+
+class ExecutableAllocator {
+ enum ProtectionSetting { Writable, Executable };
+
+public:
+ static size_t pageSize;
+ ExecutableAllocator()
+ {
+ if (!pageSize)
+ intializePageSize();
+ if (isValid())
+ m_smallAllocationPool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
+#if !ENABLE(INTERPRETER)
+ else
+ CRASH();
+#endif
+ }
+
+ bool isValid() const;
+
+ static bool underMemoryPressure();
+
+ PassRefPtr<ExecutablePool> poolForSize(size_t n)
+ {
+ // Try to fit in the existing small allocator
+ ASSERT(m_smallAllocationPool);
+ if (n < m_smallAllocationPool->available())
+ return m_smallAllocationPool;
+
+ // If the request is large, we just provide a unshared allocator
+ if (n > JIT_ALLOCATOR_LARGE_ALLOC_SIZE)
+ return ExecutablePool::create(n);
+
+ // Create a new allocator
+ RefPtr<ExecutablePool> pool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
+
+ // If the new allocator will result in more free space than in
+ // the current small allocator, then we will use it instead
+ if ((pool->available() - n) > m_smallAllocationPool->available())
+ m_smallAllocationPool = pool;
+ return pool.release();
+ }
+
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+ static void makeWritable(void* start, size_t size)
+ {
+ reprotectRegion(start, size, Writable);
+ }
+
+ static void makeExecutable(void* start, size_t size)
+ {
+ reprotectRegion(start, size, Executable);
+ }
+#else
+ static void makeWritable(void*, size_t) {}
+ static void makeExecutable(void*, size_t) {}
+#endif
+
+
+#if CPU(X86) || CPU(X86_64)
+ static void cacheFlush(void*, size_t)
+ {
+ }
+#elif CPU(MIPS)
+ static void cacheFlush(void* code, size_t size)
+ {
+#if GCC_VERSION_AT_LEAST(4, 3, 0)
+#if WTF_MIPS_ISA_REV(2) && !GCC_VERSION_AT_LEAST(4, 4, 3)
+ int lineSize;
+ asm("rdhwr %0, $1" : "=r" (lineSize));
+ //
+ // Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in
+ // mips_expand_synci_loop that may execute synci one more time.
+ // "start" points to the fisrt byte of the cache line.
+ // "end" points to the last byte of the line before the last cache line.
+ // Because size is always a multiple of 4, this is safe to set
+ // "end" to the last byte.
+ //
+ intptr_t start = reinterpret_cast<intptr_t>(code) & (-lineSize);
+ intptr_t end = ((reinterpret_cast<intptr_t>(code) + size - 1) & (-lineSize)) - 1;
+ __builtin___clear_cache(reinterpret_cast<char*>(start), reinterpret_cast<char*>(end));
+#else
+ intptr_t end = reinterpret_cast<intptr_t>(code) + size;
+ __builtin___clear_cache(reinterpret_cast<char*>(code), reinterpret_cast<char*>(end));
+#endif
+#else
+ _flush_cache(reinterpret_cast<char*>(code), size, BCACHE);
+#endif
+ }
+#elif CPU(ARM_THUMB2) && OS(IOS)
+ static void cacheFlush(void* code, size_t size)
+ {
+ sys_cache_control(kCacheFunctionPrepareForExecution, code, size);
+ }
+#elif CPU(ARM_THUMB2) && OS(LINUX)
+ static void cacheFlush(void* code, size_t size)
+ {
+ asm volatile (
+ "push {r7}\n"
+ "mov r0, %0\n"
+ "mov r1, %1\n"
+ "movw r7, #0x2\n"
+ "movt r7, #0xf\n"
+ "movs r2, #0x0\n"
+ "svc 0x0\n"
+ "pop {r7}\n"
+ :
+ : "r" (code), "r" (reinterpret_cast<char*>(code) + size)
+ : "r0", "r1", "r2");
+ }
+#elif OS(SYMBIAN)
+ static void cacheFlush(void* code, size_t size)
+ {
+ User::IMB_Range(code, static_cast<char*>(code) + size);
+ }
+#elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(RVCT)
+ static __asm void cacheFlush(void* code, size_t size);
+#elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(GCC)
+ static void cacheFlush(void* code, size_t size)
+ {
+ asm volatile (
+ "push {r7}\n"
+ "mov r0, %0\n"
+ "mov r1, %1\n"
+ "mov r7, #0xf0000\n"
+ "add r7, r7, #0x2\n"
+ "mov r2, #0x0\n"
+ "svc 0x0\n"
+ "pop {r7}\n"
+ :
+ : "r" (code), "r" (reinterpret_cast<char*>(code) + size)
+ : "r0", "r1", "r2");
+ }
+#elif OS(WINCE)
+ static void cacheFlush(void* code, size_t size)
+ {
+ CacheRangeFlush(code, size, CACHE_SYNC_ALL);
+ }
+#elif PLATFORM(BREWMP)
+ static void cacheFlush(void* code, size_t size)
+ {
+ RefPtr<IMemCache1> memCache = createRefPtrInstance<IMemCache1>(AEECLSID_MemCache1);
+ IMemCache1_ClearCache(memCache.get(), reinterpret_cast<uint32>(code), size, MEMSPACE_CACHE_FLUSH, MEMSPACE_DATACACHE);
+ IMemCache1_ClearCache(memCache.get(), reinterpret_cast<uint32>(code), size, MEMSPACE_CACHE_INVALIDATE, MEMSPACE_INSTCACHE);
+ }
+#elif CPU(SH4) && OS(LINUX)
+ static void cacheFlush(void* code, size_t size)
+ {
+#ifdef CACHEFLUSH_D_L2
+ syscall(__NR_cacheflush, reinterpret_cast<unsigned>(code), size, CACHEFLUSH_D_WB | CACHEFLUSH_I | CACHEFLUSH_D_L2);
+#else
+ syscall(__NR_cacheflush, reinterpret_cast<unsigned>(code), size, CACHEFLUSH_D_WB | CACHEFLUSH_I);
+#endif
+ }
+#else
+ #error "The cacheFlush support is missing on this platform."
+#endif
+ static size_t committedByteCount();
+
+private:
+
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+ static void reprotectRegion(void*, size_t, ProtectionSetting);
+#endif
+
+ RefPtr<ExecutablePool> m_smallAllocationPool;
+ static void intializePageSize();
+};
+
+inline ExecutablePool::ExecutablePool(size_t n)
+{
+ size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
+ Allocation mem = systemAlloc(allocSize);
+ m_pools.append(mem);
+ m_freePtr = static_cast<char*>(mem.base());
+ if (!m_freePtr)
+ CRASH(); // Failed to allocate
+ m_end = m_freePtr + allocSize;
+}
+
+inline void* ExecutablePool::poolAllocate(size_t n)
+{
+ size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
+
+ Allocation result = systemAlloc(allocSize);
+ if (!result.base())
+ CRASH(); // Failed to allocate
+
+ ASSERT(m_end >= m_freePtr);
+ if ((allocSize - n) > static_cast<size_t>(m_end - m_freePtr)) {
+ // Replace allocation pool
+ m_freePtr = static_cast<char*>(result.base()) + n;
+ m_end = static_cast<char*>(result.base()) + allocSize;
+ }
+
+ m_pools.append(result);
+ return result.base();
+}
+
+}
+
+#endif // ENABLE(JIT) && ENABLE(ASSEMBLER)
+
+#endif // !defined(ExecutableAllocator)
diff --git a/src/3rdparty/webkit/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp b/src/3rdparty/webkit/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
new file mode 100644
index 0000000..6a945b0
--- /dev/null
+++ b/src/3rdparty/webkit/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
@@ -0,0 +1,531 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#include "ExecutableAllocator.h"
+
+#if ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
+
+#include <errno.h>
+
+#include "TCSpinLock.h"
+#include <sys/mman.h>
+#include <unistd.h>
+#include <wtf/AVLTree.h>
+#include <wtf/PageReservation.h>
+#include <wtf/VMTags.h>
+
+#if OS(LINUX)
+#include <stdio.h>
+#endif
+
+using namespace WTF;
+
+namespace JSC {
+
+#define TwoPow(n) (1ull << n)
+
+class AllocationTableSizeClass {
+public:
+ AllocationTableSizeClass(size_t size, size_t blockSize, unsigned log2BlockSize)
+ : m_blockSize(blockSize)
+ {
+ ASSERT(blockSize == TwoPow(log2BlockSize));
+
+ // Calculate the number of blocks needed to hold size.
+ size_t blockMask = blockSize - 1;
+ m_blockCount = (size + blockMask) >> log2BlockSize;
+
+ // Align to the smallest power of two >= m_blockCount.
+ m_blockAlignment = 1;
+ while (m_blockAlignment < m_blockCount)
+ m_blockAlignment += m_blockAlignment;
+ }
+
+ size_t blockSize() const { return m_blockSize; }
+ size_t blockCount() const { return m_blockCount; }
+ size_t blockAlignment() const { return m_blockAlignment; }
+
+ size_t size()
+ {
+ return m_blockSize * m_blockCount;
+ }
+
+private:
+ size_t m_blockSize;
+ size_t m_blockCount;
+ size_t m_blockAlignment;
+};
+
+template<unsigned log2Entries>
+class AllocationTableLeaf {
+ typedef uint64_t BitField;
+
+public:
+ static const unsigned log2SubregionSize = 12; // 2^12 == pagesize
+ static const unsigned log2RegionSize = log2SubregionSize + log2Entries;
+
+ static const size_t subregionSize = TwoPow(log2SubregionSize);
+ static const size_t regionSize = TwoPow(log2RegionSize);
+ static const unsigned entries = TwoPow(log2Entries);
+ COMPILE_ASSERT(entries <= (sizeof(BitField) * 8), AllocationTableLeaf_entries_fit_in_BitField);
+
+ AllocationTableLeaf()
+ : m_allocated(0)
+ {
+ }
+
+ ~AllocationTableLeaf()
+ {
+ ASSERT(isEmpty());
+ }
+
+ size_t allocate(AllocationTableSizeClass& sizeClass)
+ {
+ ASSERT(sizeClass.blockSize() == subregionSize);
+ ASSERT(!isFull());
+
+ size_t alignment = sizeClass.blockAlignment();
+ size_t count = sizeClass.blockCount();
+ // Use this mask to check for spans of free blocks.
+ BitField mask = ((1ull << count) - 1) << (alignment - count);
+
+ // Step in units of alignment size.
+ for (unsigned i = 0; i < entries; i += alignment) {
+ if (!(m_allocated & mask)) {
+ m_allocated |= mask;
+ return (i + (alignment - count)) << log2SubregionSize;
+ }
+ mask <<= alignment;
+ }
+ return notFound;
+ }
+
+ void free(size_t location, AllocationTableSizeClass& sizeClass)
+ {
+ ASSERT(sizeClass.blockSize() == subregionSize);
+
+ size_t entry = location >> log2SubregionSize;
+ size_t count = sizeClass.blockCount();
+ BitField mask = ((1ull << count) - 1) << entry;
+
+ ASSERT((m_allocated & mask) == mask);
+ m_allocated &= ~mask;
+ }
+
+ bool isEmpty()
+ {
+ return !m_allocated;
+ }
+
+ bool isFull()
+ {
+ return !~m_allocated;
+ }
+
+ static size_t size()
+ {
+ return regionSize;
+ }
+
+ static AllocationTableSizeClass classForSize(size_t size)
+ {
+ return AllocationTableSizeClass(size, subregionSize, log2SubregionSize);
+ }
+
+#ifndef NDEBUG
+ void dump(size_t parentOffset = 0, unsigned indent = 0)
+ {
+ for (unsigned i = 0; i < indent; ++i)
+ fprintf(stderr, " ");
+ fprintf(stderr, "%08x: [%016llx]\n", (int)parentOffset, m_allocated);
+ }
+#endif
+
+private:
+ BitField m_allocated;
+};
+
+
+template<class NextLevel>
+class LazyAllocationTable {
+public:
+ static const unsigned log2RegionSize = NextLevel::log2RegionSize;
+ static const unsigned entries = NextLevel::entries;
+
+ LazyAllocationTable()
+ : m_ptr(0)
+ {
+ }
+
+ ~LazyAllocationTable()
+ {
+ ASSERT(isEmpty());
+ }
+
+ size_t allocate(AllocationTableSizeClass& sizeClass)
+ {
+ if (!m_ptr)
+ m_ptr = new NextLevel();
+ return m_ptr->allocate(sizeClass);
+ }
+
+ void free(size_t location, AllocationTableSizeClass& sizeClass)
+ {
+ ASSERT(m_ptr);
+ m_ptr->free(location, sizeClass);
+ if (m_ptr->isEmpty()) {
+ delete m_ptr;
+ m_ptr = 0;
+ }
+ }
+
+ bool isEmpty()
+ {
+ return !m_ptr;
+ }
+
+ bool isFull()
+ {
+ return m_ptr && m_ptr->isFull();
+ }
+
+ static size_t size()
+ {
+ return NextLevel::size();
+ }
+
+#ifndef NDEBUG
+ void dump(size_t parentOffset = 0, unsigned indent = 0)
+ {
+ ASSERT(m_ptr);
+ m_ptr->dump(parentOffset, indent);
+ }
+#endif
+
+ static AllocationTableSizeClass classForSize(size_t size)
+ {
+ return NextLevel::classForSize(size);
+ }
+
+private:
+ NextLevel* m_ptr;
+};
+
+template<class NextLevel, unsigned log2Entries>
+class AllocationTableDirectory {
+ typedef uint64_t BitField;
+
+public:
+ static const unsigned log2SubregionSize = NextLevel::log2RegionSize;
+ static const unsigned log2RegionSize = log2SubregionSize + log2Entries;
+
+ static const size_t subregionSize = TwoPow(log2SubregionSize);
+ static const size_t regionSize = TwoPow(log2RegionSize);
+ static const unsigned entries = TwoPow(log2Entries);
+ COMPILE_ASSERT(entries <= (sizeof(BitField) * 8), AllocationTableDirectory_entries_fit_in_BitField);
+
+ AllocationTableDirectory()
+ : m_full(0)
+ , m_hasSuballocation(0)
+ {
+ }
+
+ ~AllocationTableDirectory()
+ {
+ ASSERT(isEmpty());
+ }
+
+ size_t allocate(AllocationTableSizeClass& sizeClass)
+ {
+ ASSERT(sizeClass.blockSize() <= subregionSize);
+ ASSERT(!isFull());
+
+ if (sizeClass.blockSize() < subregionSize) {
+ BitField bit = 1;
+ for (unsigned i = 0; i < entries; ++i, bit += bit) {
+ if (m_full & bit)
+ continue;
+ size_t location = m_suballocations[i].allocate(sizeClass);
+ if (location != notFound) {
+ // If this didn't already have a subregion, it does now!
+ m_hasSuballocation |= bit;
+ // Mirror the suballocation's full bit.
+ if (m_suballocations[i].isFull())
+ m_full |= bit;
+ return (i * subregionSize) | location;
+ }
+ }
+ return notFound;
+ }
+
+ // A block is allocated if either it is fully allocated or contains suballocations.
+ BitField allocated = m_full | m_hasSuballocation;
+
+ size_t alignment = sizeClass.blockAlignment();
+ size_t count = sizeClass.blockCount();
+ // Use this mask to check for spans of free blocks.
+ BitField mask = ((1ull << count) - 1) << (alignment - count);
+
+ // Step in units of alignment size.
+ for (unsigned i = 0; i < entries; i += alignment) {
+ if (!(allocated & mask)) {
+ m_full |= mask;
+ return (i + (alignment - count)) << log2SubregionSize;
+ }
+ mask <<= alignment;
+ }
+ return notFound;
+ }
+
+ void free(size_t location, AllocationTableSizeClass& sizeClass)
+ {
+ ASSERT(sizeClass.blockSize() <= subregionSize);
+
+ size_t entry = location >> log2SubregionSize;
+
+ if (sizeClass.blockSize() < subregionSize) {
+ BitField bit = 1ull << entry;
+ m_suballocations[entry].free(location & (subregionSize - 1), sizeClass);
+ // Check if the suballocation is now empty.
+ if (m_suballocations[entry].isEmpty())
+ m_hasSuballocation &= ~bit;
+ // No need to check, it clearly isn't full any more!
+ m_full &= ~bit;
+ } else {
+ size_t count = sizeClass.blockCount();
+ BitField mask = ((1ull << count) - 1) << entry;
+ ASSERT((m_full & mask) == mask);
+ ASSERT(!(m_hasSuballocation & mask));
+ m_full &= ~mask;
+ }
+ }
+
+ bool isEmpty()
+ {
+ return !(m_full | m_hasSuballocation);
+ }
+
+ bool isFull()
+ {
+ return !~m_full;
+ }
+
+ static size_t size()
+ {
+ return regionSize;
+ }
+
+ static AllocationTableSizeClass classForSize(size_t size)
+ {
+ if (size < subregionSize) {
+ AllocationTableSizeClass sizeClass = NextLevel::classForSize(size);
+ if (sizeClass.size() < NextLevel::size())
+ return sizeClass;
+ }
+ return AllocationTableSizeClass(size, subregionSize, log2SubregionSize);
+ }
+
+#ifndef NDEBUG
+ void dump(size_t parentOffset = 0, unsigned indent = 0)
+ {
+ for (unsigned i = 0; i < indent; ++i)
+ fprintf(stderr, " ");
+ fprintf(stderr, "%08x: [", (int)parentOffset);
+ for (unsigned i = 0; i < entries; ++i) {
+ BitField bit = 1ull << i;
+ char c = m_hasSuballocation & bit
+ ? (m_full & bit ? 'N' : 'n')
+ : (m_full & bit ? 'F' : '-');
+ fprintf(stderr, "%c", c);
+ }
+ fprintf(stderr, "]\n");
+
+ for (unsigned i = 0; i < entries; ++i) {
+ BitField bit = 1ull << i;
+ size_t offset = parentOffset | (subregionSize * i);
+ if (m_hasSuballocation & bit)
+ m_suballocations[i].dump(offset, indent + 1);
+ }
+ }
+#endif
+
+private:
+ NextLevel m_suballocations[entries];
+ // Subregions exist in one of four states:
+ // (1) empty (both bits clear)
+ // (2) fully allocated as a single allocation (m_full set)
+ // (3) partially allocated through suballocations (m_hasSuballocation set)
+ // (4) fully allocated through suballocations (both bits set)
+ BitField m_full;
+ BitField m_hasSuballocation;
+};
+
+
+typedef AllocationTableLeaf<6> PageTables256KB;
+typedef AllocationTableDirectory<PageTables256KB, 6> PageTables16MB;
+typedef AllocationTableDirectory<LazyAllocationTable<PageTables16MB>, 1> PageTables32MB;
+typedef AllocationTableDirectory<LazyAllocationTable<PageTables16MB>, 6> PageTables1GB;
+
+#if CPU(ARM)
+typedef PageTables16MB FixedVMPoolPageTables;
+#elif CPU(X86_64)
+typedef PageTables1GB FixedVMPoolPageTables;
+#else
+typedef PageTables32MB FixedVMPoolPageTables;
+#endif
+
+
+class FixedVMPoolAllocator
+{
+public:
+ FixedVMPoolAllocator()
+ {
+ ASSERT(PageTables256KB::size() == 256 * 1024);
+ ASSERT(PageTables16MB::size() == 16 * 1024 * 1024);
+ ASSERT(PageTables32MB::size() == 32 * 1024 * 1024);
+ ASSERT(PageTables1GB::size() == 1024 * 1024 * 1024);
+
+ m_reservation = PageReservation::reserve(FixedVMPoolPageTables::size(), OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
+#if !ENABLE(INTERPRETER)
+ if (!isValid())
+ CRASH();
+#endif
+ }
+
+ ExecutablePool::Allocation alloc(size_t requestedSize)
+ {
+ ASSERT(requestedSize);
+ AllocationTableSizeClass sizeClass = classForSize(requestedSize);
+ size_t size = sizeClass.size();
+ ASSERT(size);
+
+ if (size >= FixedVMPoolPageTables::size())
+ CRASH();
+ if (m_pages.isFull())
+ CRASH();
+
+ size_t offset = m_pages.allocate(sizeClass);
+ if (offset == notFound)
+ CRASH();
+
+ void* pointer = offsetToPointer(offset);
+ m_reservation.commit(pointer, size);
+ return ExecutablePool::Allocation(pointer, size);
+ }
+
+ void free(ExecutablePool::Allocation allocation)
+ {
+ void* pointer = allocation.base();
+ size_t size = allocation.size();
+ ASSERT(size);
+
+ m_reservation.decommit(pointer, size);
+
+ AllocationTableSizeClass sizeClass = classForSize(size);
+ ASSERT(sizeClass.size() == size);
+ m_pages.free(pointerToOffset(pointer), sizeClass);
+ }
+
+ size_t allocated()
+ {
+ return m_reservation.committed();
+ }
+
+ bool isValid() const
+ {
+ return !!m_reservation;
+ }
+
+private:
+ AllocationTableSizeClass classForSize(size_t size)
+ {
+ return FixedVMPoolPageTables::classForSize(size);
+ }
+
+ void* offsetToPointer(size_t offset)
+ {
+ return reinterpret_cast<void*>(reinterpret_cast<intptr_t>(m_reservation.base()) + offset);
+ }
+
+ size_t pointerToOffset(void* pointer)
+ {
+ return reinterpret_cast<intptr_t>(pointer) - reinterpret_cast<intptr_t>(m_reservation.base());
+ }
+
+ PageReservation m_reservation;
+ FixedVMPoolPageTables m_pages;
+};
+
+
+static SpinLock spinlock = SPINLOCK_INITIALIZER;
+static FixedVMPoolAllocator* allocator = 0;
+
+
+size_t ExecutableAllocator::committedByteCount()
+{
+ SpinLockHolder lockHolder(&spinlock);
+ return allocator ? allocator->allocated() : 0;
+}
+
+void ExecutableAllocator::intializePageSize()
+{
+ ExecutableAllocator::pageSize = getpagesize();
+}
+
+bool ExecutableAllocator::isValid() const
+{
+ SpinLockHolder lock_holder(&spinlock);
+ if (!allocator)
+ allocator = new FixedVMPoolAllocator();
+ return allocator->isValid();
+}
+
+bool ExecutableAllocator::underMemoryPressure()
+{
+ // Technically we should take the spin lock here, but we don't care if we get stale data.
+ // This is only really a heuristic anyway.
+ return allocator && (allocator->allocated() > (FixedVMPoolPageTables::size() / 2));
+}
+
+ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t size)
+{
+ SpinLockHolder lock_holder(&spinlock);
+ ASSERT(allocator);
+ return allocator->alloc(size);
+}
+
+void ExecutablePool::systemRelease(ExecutablePool::Allocation& allocation)
+{
+ SpinLockHolder lock_holder(&spinlock);
+ ASSERT(allocator);
+ allocator->free(allocation);
+}
+
+}
+
+
+#endif // HAVE(ASSEMBLER)
diff --git a/src/3rdparty/webkit/Source/JavaScriptCore/jit/JIT.cpp b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JIT.cpp
new file mode 100644
index 0000000..981e2e1
--- /dev/null
+++ b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JIT.cpp
@@ -0,0 +1,633 @@
+/*
+ * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(JIT)
+#include "JIT.h"
+
+// This probably does not belong here; adding here for now as a quick Windows build fix.
+#if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X)
+#include "MacroAssembler.h"
+JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2;
+#endif
+
+#include "CodeBlock.h"
+#include "Interpreter.h"
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JSArray.h"
+#include "JSFunction.h"
+#include "LinkBuffer.h"
+#include "RepatchBuffer.h"
+#include "ResultType.h"
+#include "SamplingTool.h"
+#include "dfg/DFGNode.h" // for DFG_SUCCESS_STATS
+
+using namespace std;
+
+namespace JSC {
+
+void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
+{
+ RepatchBuffer repatchBuffer(codeblock);
+ repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction);
+}
+
+void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
+{
+ RepatchBuffer repatchBuffer(codeblock);
+ repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction);
+}
+
+void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction)
+{
+ RepatchBuffer repatchBuffer(codeblock);
+ repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction);
+}
+
+JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
+ : m_interpreter(globalData->interpreter)
+ , m_globalData(globalData)
+ , m_codeBlock(codeBlock)
+ , m_labels(codeBlock ? codeBlock->instructions().size() : 0)
+ , m_propertyAccessCompilationInfo(codeBlock ? codeBlock->numberOfStructureStubInfos() : 0)
+ , m_callStructureStubCompilationInfo(codeBlock ? codeBlock->numberOfCallLinkInfos() : 0)
+ , m_bytecodeOffset((unsigned)-1)
+#if USE(JSVALUE32_64)
+ , m_jumpTargetIndex(0)
+ , m_mappedBytecodeOffset((unsigned)-1)
+ , m_mappedVirtualRegisterIndex((unsigned)-1)
+ , m_mappedTag((RegisterID)-1)
+ , m_mappedPayload((RegisterID)-1)
+#else
+ , m_lastResultBytecodeRegister(std::numeric_limits<int>::max())
+ , m_jumpTargetsPosition(0)
+#endif
+{
+}
+
+#if USE(JSVALUE32_64)
+void JIT::emitTimeoutCheck()
+{
+ Jump skipTimeout = branchSub32(NonZero, TrustedImm32(1), timeoutCheckRegister);
+ JITStubCall stubCall(this, cti_timeout_check);
+ stubCall.addArgument(regT1, regT0); // save last result registers.
+ stubCall.call(timeoutCheckRegister);
+ stubCall.getArgument(0, regT1, regT0); // reload last result registers.
+ skipTimeout.link(this);
+}
+#else
+void JIT::emitTimeoutCheck()
+{
+ Jump skipTimeout = branchSub32(NonZero, TrustedImm32(1), timeoutCheckRegister);
+ JITStubCall(this, cti_timeout_check).call(timeoutCheckRegister);
+ skipTimeout.link(this);
+
+ killLastResultRegister();
+}
+#endif
+
+#define NEXT_OPCODE(name) \
+ m_bytecodeOffset += OPCODE_LENGTH(name); \
+ break;
+
+#if USE(JSVALUE32_64)
+#define DEFINE_BINARY_OP(name) \
+ case name: { \
+ JITStubCall stubCall(this, cti_##name); \
+ stubCall.addArgument(currentInstruction[2].u.operand); \
+ stubCall.addArgument(currentInstruction[3].u.operand); \
+ stubCall.call(currentInstruction[1].u.operand); \
+ NEXT_OPCODE(name); \
+ }
+
+#define DEFINE_UNARY_OP(name) \
+ case name: { \
+ JITStubCall stubCall(this, cti_##name); \
+ stubCall.addArgument(currentInstruction[2].u.operand); \
+ stubCall.call(currentInstruction[1].u.operand); \
+ NEXT_OPCODE(name); \
+ }
+
+#else // USE(JSVALUE32_64)
+
+#define DEFINE_BINARY_OP(name) \
+ case name: { \
+ JITStubCall stubCall(this, cti_##name); \
+ stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
+ stubCall.addArgument(currentInstruction[3].u.operand, regT2); \
+ stubCall.call(currentInstruction[1].u.operand); \
+ NEXT_OPCODE(name); \
+ }
+
+#define DEFINE_UNARY_OP(name) \
+ case name: { \
+ JITStubCall stubCall(this, cti_##name); \
+ stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
+ stubCall.call(currentInstruction[1].u.operand); \
+ NEXT_OPCODE(name); \
+ }
+#endif // USE(JSVALUE32_64)
+
+#define DEFINE_OP(name) \
+ case name: { \
+ emit_##name(currentInstruction); \
+ NEXT_OPCODE(name); \
+ }
+
+#define DEFINE_SLOWCASE_OP(name) \
+ case name: { \
+ emitSlow_##name(currentInstruction, iter); \
+ NEXT_OPCODE(name); \
+ }
+
+void JIT::privateCompileMainPass()
+{
+ Instruction* instructionsBegin = m_codeBlock->instructions().begin();
+ unsigned instructionCount = m_codeBlock->instructions().size();
+
+ m_propertyAccessInstructionIndex = 0;
+ m_globalResolveInfoIndex = 0;
+ m_callLinkInfoIndex = 0;
+
+ for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) {
+ Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
+ ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);
+
+#if ENABLE(OPCODE_SAMPLING)
+ if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice.
+ sampleInstruction(currentInstruction);
+#endif
+
+#if USE(JSVALUE64)
+ if (atJumpTarget())
+ killLastResultRegister();
+#endif
+
+ m_labels[m_bytecodeOffset] = label();
+
+ switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
+ DEFINE_BINARY_OP(op_del_by_val)
+ DEFINE_BINARY_OP(op_in)
+ DEFINE_BINARY_OP(op_less)
+ DEFINE_BINARY_OP(op_lesseq)
+ DEFINE_UNARY_OP(op_is_boolean)
+ DEFINE_UNARY_OP(op_is_function)
+ DEFINE_UNARY_OP(op_is_number)
+ DEFINE_UNARY_OP(op_is_object)
+ DEFINE_UNARY_OP(op_is_string)
+ DEFINE_UNARY_OP(op_is_undefined)
+#if USE(JSVALUE64)
+ DEFINE_UNARY_OP(op_negate)
+#endif
+ DEFINE_UNARY_OP(op_typeof)
+
+ DEFINE_OP(op_add)
+ DEFINE_OP(op_bitand)
+ DEFINE_OP(op_bitnot)
+ DEFINE_OP(op_bitor)
+ DEFINE_OP(op_bitxor)
+ DEFINE_OP(op_call)
+ DEFINE_OP(op_call_eval)
+ DEFINE_OP(op_call_varargs)
+ DEFINE_OP(op_catch)
+ DEFINE_OP(op_construct)
+ DEFINE_OP(op_get_callee)
+ DEFINE_OP(op_create_this)
+ DEFINE_OP(op_convert_this)
+ DEFINE_OP(op_convert_this_strict)
+ DEFINE_OP(op_init_lazy_reg)
+ DEFINE_OP(op_create_arguments)
+ DEFINE_OP(op_debug)
+ DEFINE_OP(op_del_by_id)
+ DEFINE_OP(op_div)
+ DEFINE_OP(op_end)
+ DEFINE_OP(op_enter)
+ DEFINE_OP(op_create_activation)
+ DEFINE_OP(op_eq)
+ DEFINE_OP(op_eq_null)
+ DEFINE_OP(op_get_by_id)
+ DEFINE_OP(op_get_arguments_length)
+ DEFINE_OP(op_get_by_val)
+ DEFINE_OP(op_get_argument_by_val)
+ DEFINE_OP(op_get_by_pname)
+ DEFINE_OP(op_get_global_var)
+ DEFINE_OP(op_get_pnames)
+ DEFINE_OP(op_get_scoped_var)
+ DEFINE_OP(op_check_has_instance)
+ DEFINE_OP(op_instanceof)
+ DEFINE_OP(op_jeq_null)
+ DEFINE_OP(op_jfalse)
+ DEFINE_OP(op_jmp)
+ DEFINE_OP(op_jmp_scopes)
+ DEFINE_OP(op_jneq_null)
+ DEFINE_OP(op_jneq_ptr)
+ DEFINE_OP(op_jnless)
+ DEFINE_OP(op_jless)
+ DEFINE_OP(op_jlesseq)
+ DEFINE_OP(op_jnlesseq)
+ DEFINE_OP(op_jsr)
+ DEFINE_OP(op_jtrue)
+ DEFINE_OP(op_load_varargs)
+ DEFINE_OP(op_loop)
+ DEFINE_OP(op_loop_if_less)
+ DEFINE_OP(op_loop_if_lesseq)
+ DEFINE_OP(op_loop_if_true)
+ DEFINE_OP(op_loop_if_false)
+ DEFINE_OP(op_lshift)
+ DEFINE_OP(op_method_check)
+ DEFINE_OP(op_mod)
+ DEFINE_OP(op_mov)
+ DEFINE_OP(op_mul)
+#if USE(JSVALUE32_64)
+ DEFINE_OP(op_negate)
+#endif
+ DEFINE_OP(op_neq)
+ DEFINE_OP(op_neq_null)
+ DEFINE_OP(op_new_array)
+ DEFINE_OP(op_new_func)
+ DEFINE_OP(op_new_func_exp)
+ DEFINE_OP(op_new_object)
+ DEFINE_OP(op_new_regexp)
+ DEFINE_OP(op_next_pname)
+ DEFINE_OP(op_not)
+ DEFINE_OP(op_nstricteq)
+ DEFINE_OP(op_pop_scope)
+ DEFINE_OP(op_post_dec)
+ DEFINE_OP(op_post_inc)
+ DEFINE_OP(op_pre_dec)
+ DEFINE_OP(op_pre_inc)
+ DEFINE_OP(op_profile_did_call)
+ DEFINE_OP(op_profile_will_call)
+ DEFINE_OP(op_push_new_scope)
+ DEFINE_OP(op_push_scope)
+ DEFINE_OP(op_put_by_id)
+ DEFINE_OP(op_put_by_index)
+ DEFINE_OP(op_put_by_val)
+ DEFINE_OP(op_put_getter)
+ DEFINE_OP(op_put_global_var)
+ DEFINE_OP(op_put_scoped_var)
+ DEFINE_OP(op_put_setter)
+ DEFINE_OP(op_resolve)
+ DEFINE_OP(op_resolve_base)
+ DEFINE_OP(op_ensure_property_exists)
+ DEFINE_OP(op_resolve_global)
+ DEFINE_OP(op_resolve_global_dynamic)
+ DEFINE_OP(op_resolve_skip)
+ DEFINE_OP(op_resolve_with_base)
+ DEFINE_OP(op_ret)
+ DEFINE_OP(op_call_put_result)
+ DEFINE_OP(op_ret_object_or_this)
+ DEFINE_OP(op_rshift)
+ DEFINE_OP(op_urshift)
+ DEFINE_OP(op_sret)
+ DEFINE_OP(op_strcat)
+ DEFINE_OP(op_stricteq)
+ DEFINE_OP(op_sub)
+ DEFINE_OP(op_switch_char)
+ DEFINE_OP(op_switch_imm)
+ DEFINE_OP(op_switch_string)
+ DEFINE_OP(op_tear_off_activation)
+ DEFINE_OP(op_tear_off_arguments)
+ DEFINE_OP(op_throw)
+ DEFINE_OP(op_throw_reference_error)
+ DEFINE_OP(op_to_jsnumber)
+ DEFINE_OP(op_to_primitive)
+
+ case op_get_array_length:
+ case op_get_by_id_chain:
+ case op_get_by_id_generic:
+ case op_get_by_id_proto:
+ case op_get_by_id_proto_list:
+ case op_get_by_id_self:
+ case op_get_by_id_self_list:
+ case op_get_by_id_getter_chain:
+ case op_get_by_id_getter_proto:
+ case op_get_by_id_getter_proto_list:
+ case op_get_by_id_getter_self:
+ case op_get_by_id_getter_self_list:
+ case op_get_by_id_custom_chain:
+ case op_get_by_id_custom_proto:
+ case op_get_by_id_custom_proto_list:
+ case op_get_by_id_custom_self:
+ case op_get_by_id_custom_self_list:
+ case op_get_string_length:
+ case op_put_by_id_generic:
+ case op_put_by_id_replace:
+ case op_put_by_id_transition:
+ ASSERT_NOT_REACHED();
+ }
+ }
+
+ ASSERT(m_propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos());
+ ASSERT(m_callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos());
+
+#ifndef NDEBUG
+ // Reset this, in order to guard its use with ASSERTs.
+ m_bytecodeOffset = (unsigned)-1;
+#endif
+}
+
+
+void JIT::privateCompileLinkPass()
+{
+ unsigned jmpTableCount = m_jmpTable.size();
+ for (unsigned i = 0; i < jmpTableCount; ++i)
+ m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this);
+ m_jmpTable.clear();
+}
+
+void JIT::privateCompileSlowCases()
+{
+ Instruction* instructionsBegin = m_codeBlock->instructions().begin();
+
+ m_propertyAccessInstructionIndex = 0;
+ m_globalResolveInfoIndex = 0;
+ m_callLinkInfoIndex = 0;
+
+ for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
+#if USE(JSVALUE64)
+ killLastResultRegister();
+#endif
+
+ m_bytecodeOffset = iter->to;
+#ifndef NDEBUG
+ unsigned firstTo = m_bytecodeOffset;
+#endif
+ Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
+
+ switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
+ DEFINE_SLOWCASE_OP(op_add)
+ DEFINE_SLOWCASE_OP(op_bitand)
+ DEFINE_SLOWCASE_OP(op_bitnot)
+ DEFINE_SLOWCASE_OP(op_bitor)
+ DEFINE_SLOWCASE_OP(op_bitxor)
+ DEFINE_SLOWCASE_OP(op_call)
+ DEFINE_SLOWCASE_OP(op_call_eval)
+ DEFINE_SLOWCASE_OP(op_call_varargs)
+ DEFINE_SLOWCASE_OP(op_construct)
+ DEFINE_SLOWCASE_OP(op_convert_this)
+ DEFINE_SLOWCASE_OP(op_convert_this_strict)
+ DEFINE_SLOWCASE_OP(op_div)
+ DEFINE_SLOWCASE_OP(op_eq)
+ DEFINE_SLOWCASE_OP(op_get_by_id)
+ DEFINE_SLOWCASE_OP(op_get_arguments_length)
+ DEFINE_SLOWCASE_OP(op_get_by_val)
+ DEFINE_SLOWCASE_OP(op_get_argument_by_val)
+ DEFINE_SLOWCASE_OP(op_get_by_pname)
+ DEFINE_SLOWCASE_OP(op_check_has_instance)
+ DEFINE_SLOWCASE_OP(op_instanceof)
+ DEFINE_SLOWCASE_OP(op_jfalse)
+ DEFINE_SLOWCASE_OP(op_jnless)
+ DEFINE_SLOWCASE_OP(op_jless)
+ DEFINE_SLOWCASE_OP(op_jlesseq)
+ DEFINE_SLOWCASE_OP(op_jnlesseq)
+ DEFINE_SLOWCASE_OP(op_jtrue)
+ DEFINE_SLOWCASE_OP(op_load_varargs)
+ DEFINE_SLOWCASE_OP(op_loop_if_less)
+ DEFINE_SLOWCASE_OP(op_loop_if_lesseq)
+ DEFINE_SLOWCASE_OP(op_loop_if_true)
+ DEFINE_SLOWCASE_OP(op_loop_if_false)
+ DEFINE_SLOWCASE_OP(op_lshift)
+ DEFINE_SLOWCASE_OP(op_method_check)
+ DEFINE_SLOWCASE_OP(op_mod)
+ DEFINE_SLOWCASE_OP(op_mul)
+#if USE(JSVALUE32_64)
+ DEFINE_SLOWCASE_OP(op_negate)
+#endif
+ DEFINE_SLOWCASE_OP(op_neq)
+ DEFINE_SLOWCASE_OP(op_not)
+ DEFINE_SLOWCASE_OP(op_nstricteq)
+ DEFINE_SLOWCASE_OP(op_post_dec)
+ DEFINE_SLOWCASE_OP(op_post_inc)
+ DEFINE_SLOWCASE_OP(op_pre_dec)
+ DEFINE_SLOWCASE_OP(op_pre_inc)
+ DEFINE_SLOWCASE_OP(op_put_by_id)
+ DEFINE_SLOWCASE_OP(op_put_by_val)
+ DEFINE_SLOWCASE_OP(op_resolve_global)
+ DEFINE_SLOWCASE_OP(op_resolve_global_dynamic)
+ DEFINE_SLOWCASE_OP(op_rshift)
+ DEFINE_SLOWCASE_OP(op_urshift)
+ DEFINE_SLOWCASE_OP(op_stricteq)
+ DEFINE_SLOWCASE_OP(op_sub)
+ DEFINE_SLOWCASE_OP(op_to_jsnumber)
+ DEFINE_SLOWCASE_OP(op_to_primitive)
+ default:
+ ASSERT_NOT_REACHED();
+ }
+
+ ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to,"Not enough jumps linked in slow case codegen.");
+ ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
+
+ emitJumpSlowToHot(jump(), 0);
+ }
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ ASSERT(m_propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos());
+#endif
+ ASSERT(m_callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos());
+
+#ifndef NDEBUG
+ // Reset this, in order to guard its use with ASSERTs.
+ m_bytecodeOffset = (unsigned)-1;
+#endif
+}
+
+JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck)
+{
+ // Could use a pop_m, but would need to offset the following instruction if so.
+ preserveReturnAddressAfterCall(regT2);
+ emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC);
+
+ Label beginLabel(this);
+
+ sampleCodeBlock(m_codeBlock);
+#if ENABLE(OPCODE_SAMPLING)
+ sampleInstruction(m_codeBlock->instructions().begin());
+#endif
+
+ Jump registerFileCheck;
+ if (m_codeBlock->codeType() == FunctionCode) {
+#if DFG_SUCCESS_STATS
+ static SamplingCounter counter("orignalJIT");
+ emitCount(counter);
+#endif
+
+ // In the case of a fast linked call, we do not set this up in the caller.
+ emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock);
+
+ addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1);
+ registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), regT1);
+ }
+
+ Label functionBody = label();
+
+ privateCompileMainPass();
+ privateCompileLinkPass();
+ privateCompileSlowCases();
+
+ Label arityCheck;
+ if (m_codeBlock->codeType() == FunctionCode) {
+ registerFileCheck.link(this);
+ m_bytecodeOffset = 0;
+ JITStubCall(this, cti_register_file_check).call();
+#ifndef NDEBUG
+ m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
+#endif
+ jump(functionBody);
+
+ arityCheck = label();
+ preserveReturnAddressAfterCall(regT2);
+ emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC);
+ branch32(Equal, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
+ restoreArgumentReference();
+
+ JITStubCall(this, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck).call(callFrameRegister);
+
+ jump(beginLabel);
+ }
+
+ ASSERT(m_jmpTable.isEmpty());
+
+ LinkBuffer patchBuffer(this, m_globalData->executableAllocator);
+
+ // Translate vPC offsets into addresses in JIT generated code, for switch tables.
+ for (unsigned i = 0; i < m_switches.size(); ++i) {
+ SwitchRecord record = m_switches[i];
+ unsigned bytecodeOffset = record.bytecodeOffset;
+
+ if (record.type != SwitchRecord::String) {
+ ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character);
+ ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
+
+ record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
+
+ for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
+ unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
+ record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
+ }
+ } else {
+ ASSERT(record.type == SwitchRecord::String);
+
+ record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
+
+ StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end();
+ for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) {
+ unsigned offset = it->second.branchOffset;
+ it->second.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
+ }
+ }
+ }
+
+ for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) {
+ HandlerInfo& handler = m_codeBlock->exceptionHandler(i);
+ handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]);
+ }
+
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+
+ if (m_codeBlock->needsCallReturnIndices()) {
+ m_codeBlock->callReturnIndexVector().reserveCapacity(m_calls.size());
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter)
+ m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(patchBuffer.returnAddressOffset(iter->from), iter->bytecodeOffset));
+ }
+
+ // Link absolute addresses for jsr
+ for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter)
+ patchBuffer.patch(iter->storeLocation, patchBuffer.locationOf(iter->target).executableAddress());
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ for (unsigned i = 0; i < m_codeBlock->numberOfStructureStubInfos(); ++i) {
+ StructureStubInfo& info = m_codeBlock->structureStubInfo(i);
+ info.callReturnLocation = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].callReturnLocation);
+ info.hotPathBegin = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].hotPathBegin);
+ }
+#endif
+#if ENABLE(JIT_OPTIMIZE_CALL)
+ for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) {
+ CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
+ info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation);
+ info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin);
+ info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther);
+ }
+#endif
+ unsigned methodCallCount = m_methodCallCompilationInfo.size();
+ m_codeBlock->addMethodCallLinkInfos(methodCallCount);
+ for (unsigned i = 0; i < methodCallCount; ++i) {
+ MethodCallLinkInfo& info = m_codeBlock->methodCallLinkInfo(i);
+ info.structureLabel = patchBuffer.locationOf(m_methodCallCompilationInfo[i].structureToCompare);
+ info.callReturnLocation = m_codeBlock->structureStubInfo(m_methodCallCompilationInfo[i].propertyAccessIndex).callReturnLocation;
+ }
+
+ if (m_codeBlock->codeType() == FunctionCode && functionEntryArityCheck)
+ *functionEntryArityCheck = patchBuffer.locationOf(arityCheck);
+
+ return patchBuffer.finalizeCode();
+}
+
+#if ENABLE(JIT_OPTIMIZE_CALL)
+
+void JIT::linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, int callerArgCount, JSGlobalData* globalData)
+{
+ RepatchBuffer repatchBuffer(callerCodeBlock);
+
+ // Currently we only link calls with the exact number of arguments.
+ // If this is a native call calleeCodeBlock is null so the number of parameters is unimportant
+ if (!calleeCodeBlock || (callerArgCount == calleeCodeBlock->m_numParameters)) {
+ ASSERT(!callLinkInfo->isLinked());
+ callLinkInfo->callee.set(*globalData, callerCodeBlock->ownerExecutable(), callee);
+ repatchBuffer.repatch(callLinkInfo->hotPathBegin, callee);
+ repatchBuffer.relink(callLinkInfo->hotPathOther, code);
+ }
+
+ // patch the call so we do not continue to try to link.
+ repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs->ctiVirtualCall());
+}
+
+void JIT::linkConstruct(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, int callerArgCount, JSGlobalData* globalData)
+{
+ RepatchBuffer repatchBuffer(callerCodeBlock);
+
+ // Currently we only link calls with the exact number of arguments.
+ // If this is a native call calleeCodeBlock is null so the number of parameters is unimportant
+ if (!calleeCodeBlock || (callerArgCount == calleeCodeBlock->m_numParameters)) {
+ ASSERT(!callLinkInfo->isLinked());
+ callLinkInfo->callee.set(*globalData, callerCodeBlock->ownerExecutable(), callee);
+ repatchBuffer.repatch(callLinkInfo->hotPathBegin, callee);
+ repatchBuffer.relink(callLinkInfo->hotPathOther, code);
+ }
+
+ // patch the call so we do not continue to try to link.
+ repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs->ctiVirtualConstruct());
+}
+#endif // ENABLE(JIT_OPTIMIZE_CALL)
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/src/3rdparty/webkit/Source/JavaScriptCore/jit/JIT.h b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JIT.h
new file mode 100644
index 0000000..d05101f
--- /dev/null
+++ b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JIT.h
@@ -0,0 +1,1045 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JIT_h
+#define JIT_h
+
+#if ENABLE(JIT)
+
+// We've run into some problems where changing the size of the class JIT leads to
+// performance fluctuations. Try forcing alignment in an attempt to stabalize this.
+#if COMPILER(GCC)
+#define JIT_CLASS_ALIGNMENT __attribute__ ((aligned (32)))
+#else
+#define JIT_CLASS_ALIGNMENT
+#endif
+
+#define ASSERT_JIT_OFFSET_UNUSED(variable, actual, expected) ASSERT_WITH_MESSAGE_UNUSED(variable, actual == expected, "JIT Offset \"%s\" should be %d, not %d.\n", #expected, static_cast<int>(expected), static_cast<int>(actual));
+#define ASSERT_JIT_OFFSET(actual, expected) ASSERT_WITH_MESSAGE(actual == expected, "JIT Offset \"%s\" should be %d, not %d.\n", #expected, static_cast<int>(expected), static_cast<int>(actual));
+
+#include "CodeBlock.h"
+#include "Interpreter.h"
+#include "JSInterfaceJIT.h"
+#include "Opcode.h"
+#include "Profiler.h"
+#include <bytecode/SamplingTool.h>
+
+namespace JSC {
+
+ class CodeBlock;
+ class JIT;
+ class JSPropertyNameIterator;
+ class Interpreter;
+ class Register;
+ class RegisterFile;
+ class ScopeChainNode;
+ class StructureChain;
+
+ struct CallLinkInfo;
+ struct Instruction;
+ struct OperandTypes;
+ struct PolymorphicAccessStructureList;
+ struct SimpleJumpTable;
+ struct StringJumpTable;
+ struct StructureStubInfo;
+
+ struct CallRecord {
+ MacroAssembler::Call from;
+ unsigned bytecodeOffset;
+ void* to;
+
+ CallRecord()
+ {
+ }
+
+ CallRecord(MacroAssembler::Call from, unsigned bytecodeOffset, void* to = 0)
+ : from(from)
+ , bytecodeOffset(bytecodeOffset)
+ , to(to)
+ {
+ }
+ };
+
+ struct JumpTable {
+ MacroAssembler::Jump from;
+ unsigned toBytecodeOffset;
+
+ JumpTable(MacroAssembler::Jump f, unsigned t)
+ : from(f)
+ , toBytecodeOffset(t)
+ {
+ }
+ };
+
+ struct SlowCaseEntry {
+ MacroAssembler::Jump from;
+ unsigned to;
+ unsigned hint;
+
+ SlowCaseEntry(MacroAssembler::Jump f, unsigned t, unsigned h = 0)
+ : from(f)
+ , to(t)
+ , hint(h)
+ {
+ }
+ };
+
+ struct SwitchRecord {
+ enum Type {
+ Immediate,
+ Character,
+ String
+ };
+
+ Type type;
+
+ union {
+ SimpleJumpTable* simpleJumpTable;
+ StringJumpTable* stringJumpTable;
+ } jumpTable;
+
+ unsigned bytecodeOffset;
+ unsigned defaultOffset;
+
+ SwitchRecord(SimpleJumpTable* jumpTable, unsigned bytecodeOffset, unsigned defaultOffset, Type type)
+ : type(type)
+ , bytecodeOffset(bytecodeOffset)
+ , defaultOffset(defaultOffset)
+ {
+ this->jumpTable.simpleJumpTable = jumpTable;
+ }
+
+ SwitchRecord(StringJumpTable* jumpTable, unsigned bytecodeOffset, unsigned defaultOffset)
+ : type(String)
+ , bytecodeOffset(bytecodeOffset)
+ , defaultOffset(defaultOffset)
+ {
+ this->jumpTable.stringJumpTable = jumpTable;
+ }
+ };
+
+ struct PropertyStubCompilationInfo {
+ MacroAssembler::Call callReturnLocation;
+ MacroAssembler::Label hotPathBegin;
+ };
+
+ struct StructureStubCompilationInfo {
+ MacroAssembler::DataLabelPtr hotPathBegin;
+ MacroAssembler::Call hotPathOther;
+ MacroAssembler::Call callReturnLocation;
+ };
+
+ struct MethodCallCompilationInfo {
+ MethodCallCompilationInfo(unsigned propertyAccessIndex)
+ : propertyAccessIndex(propertyAccessIndex)
+ {
+ }
+
+ MacroAssembler::DataLabelPtr structureToCompare;
+ unsigned propertyAccessIndex;
+ };
+
+ // Near calls can only be patched to other JIT code, regular calls can be patched to JIT code or relinked to stub functions.
+ void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction);
+ void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction);
+ void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction);
+
+ class JIT : private JSInterfaceJIT {
+ friend class JITStubCall;
+
+ using MacroAssembler::Jump;
+ using MacroAssembler::JumpList;
+ using MacroAssembler::Label;
+
+ static const int patchGetByIdDefaultStructure = -1;
+ // Magic number - initial offset cannot be representable as a signed 8bit value, or the X86Assembler
+ // will compress the displacement, and we may not be able to fit a patched offset.
+ static const int patchGetByIdDefaultOffset = 256;
+
+ public:
+ static JITCode compile(JSGlobalData* globalData, CodeBlock* codeBlock, CodePtr* functionEntryArityCheck = 0)
+ {
+ return JIT(globalData, codeBlock).privateCompile(functionEntryArityCheck);
+ }
+
+ static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.privateCompileGetByIdProto(stubInfo, structure, prototypeStructure, ident, slot, cachedOffset, returnAddress, callFrame);
+ }
+
+ static void compileGetByIdSelfList(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.privateCompileGetByIdSelfList(stubInfo, polymorphicStructures, currentIndex, structure, ident, slot, cachedOffset);
+ }
+ static void compileGetByIdProtoList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.privateCompileGetByIdProtoList(stubInfo, prototypeStructureList, currentIndex, structure, prototypeStructure, ident, slot, cachedOffset, callFrame);
+ }
+ static void compileGetByIdChainList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.privateCompileGetByIdChainList(stubInfo, prototypeStructureList, currentIndex, structure, chain, count, ident, slot, cachedOffset, callFrame);
+ }
+
+ static void compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.privateCompileGetByIdChain(stubInfo, structure, chain, count, ident, slot, cachedOffset, returnAddress, callFrame);
+ }
+
+ static void compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.privateCompilePutByIdTransition(stubInfo, oldStructure, newStructure, cachedOffset, chain, returnAddress, direct);
+ }
+
+ static void compileCTIMachineTrampolines(JSGlobalData* globalData, RefPtr<ExecutablePool>* executablePool, TrampolineStructure *trampolines)
+ {
+ if (!globalData->canUseJIT())
+ return;
+ JIT jit(globalData, 0);
+ jit.privateCompileCTIMachineTrampolines(executablePool, globalData, trampolines);
+ }
+
+ static CodePtr compileCTINativeCall(JSGlobalData* globalData, PassRefPtr<ExecutablePool> executablePool, NativeFunction func)
+ {
+ if (!globalData->canUseJIT())
+ return CodePtr();
+ JIT jit(globalData, 0);
+ return jit.privateCompileCTINativeCall(executablePool, globalData, func);
+ }
+
+ static void patchGetByIdSelf(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress);
+ static void patchPutByIdReplace(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct);
+ static void patchMethodCallProto(JSGlobalData&, CodeBlock* codeblock, MethodCallLinkInfo&, JSFunction*, Structure*, JSObject*, ReturnAddressPtr);
+
+ static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, ReturnAddressPtr returnAddress)
+ {
+ JIT jit(globalData, codeBlock);
+ return jit.privateCompilePatchGetArrayLength(returnAddress);
+ }
+
+ static void linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, CodePtr, CallLinkInfo*, int callerArgCount, JSGlobalData*);
+ static void linkConstruct(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, CodePtr, CallLinkInfo*, int callerArgCount, JSGlobalData*);
+
+ private:
+ struct JSRInfo {
+ DataLabelPtr storeLocation;
+ Label target;
+
+ JSRInfo(DataLabelPtr storeLocation, Label targetLocation)
+ : storeLocation(storeLocation)
+ , target(targetLocation)
+ {
+ }
+ };
+
+ JIT(JSGlobalData*, CodeBlock* = 0);
+
+ void privateCompileMainPass();
+ void privateCompileLinkPass();
+ void privateCompileSlowCases();
+ JITCode privateCompile(CodePtr* functionEntryArityCheck);
+ void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
+ void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, const Identifier&, const PropertySlot&, size_t cachedOffset);
+ void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, size_t cachedOffset, CallFrame* callFrame);
+ void privateCompileGetByIdChainList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, StructureChain* chain, size_t count, const Identifier&, const PropertySlot&, size_t cachedOffset, CallFrame* callFrame);
+ void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, const Identifier&, const PropertySlot&, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
+ void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, size_t cachedOffset, StructureChain*, ReturnAddressPtr returnAddress, bool direct);
+
+ void privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* data, TrampolineStructure *trampolines);
+ Label privateCompileCTINativeCall(JSGlobalData*, bool isConstruct = false);
+ CodePtr privateCompileCTINativeCall(PassRefPtr<ExecutablePool> executablePool, JSGlobalData* data, NativeFunction func);
+ void privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress);
+
+ void addSlowCase(Jump);
+ void addSlowCase(JumpList);
+ void addJump(Jump, int);
+ void emitJumpSlowToHot(Jump, int);
+
+ void compileOpCall(OpcodeID, Instruction* instruction, unsigned callLinkInfoIndex);
+ void compileOpCallVarargs(Instruction* instruction);
+ void compileOpCallInitializeCallFrame();
+ void compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID);
+ void compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter);
+
+ enum CompileOpStrictEqType { OpStrictEq, OpNStrictEq };
+ void compileOpStrictEq(Instruction* instruction, CompileOpStrictEqType type);
+ bool isOperandConstantImmediateDouble(unsigned src);
+
+ void emitLoadDouble(unsigned index, FPRegisterID value);
+ void emitLoadInt32ToDouble(unsigned index, FPRegisterID value);
+
+ void testPrototype(JSValue, JumpList& failureCases);
+
+#if USE(JSVALUE32_64)
+ bool getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant);
+
+ void emitLoadTag(unsigned index, RegisterID tag);
+ void emitLoadPayload(unsigned index, RegisterID payload);
+
+ void emitLoad(const JSValue& v, RegisterID tag, RegisterID payload);
+ void emitLoad(unsigned index, RegisterID tag, RegisterID payload, RegisterID base = callFrameRegister);
+ void emitLoad2(unsigned index1, RegisterID tag1, RegisterID payload1, unsigned index2, RegisterID tag2, RegisterID payload2);
+
+ void emitStore(unsigned index, RegisterID tag, RegisterID payload, RegisterID base = callFrameRegister);
+ void emitStore(unsigned index, const JSValue constant, RegisterID base = callFrameRegister);
+ void emitStoreInt32(unsigned index, RegisterID payload, bool indexIsInt32 = false);
+ void emitStoreInt32(unsigned index, TrustedImm32 payload, bool indexIsInt32 = false);
+ void emitStoreCell(unsigned index, RegisterID payload, bool indexIsCell = false);
+ void emitStoreBool(unsigned index, RegisterID payload, bool indexIsBool = false);
+ void emitStoreDouble(unsigned index, FPRegisterID value);
+
+ bool isLabeled(unsigned bytecodeOffset);
+ void map(unsigned bytecodeOffset, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload);
+ void unmap(RegisterID);
+ void unmap();
+ bool isMapped(unsigned virtualRegisterIndex);
+ bool getMappedPayload(unsigned virtualRegisterIndex, RegisterID& payload);
+ bool getMappedTag(unsigned virtualRegisterIndex, RegisterID& tag);
+
+ void emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex);
+ void emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex, RegisterID tag);
+ void linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&, unsigned virtualRegisterIndex);
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ void compileGetByIdHotPath();
+ void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck = false);
+#endif
+ void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset);
+ void compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset);
+ void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset);
+ void compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, Structure* structure, size_t cachedOffset);
+
+ // Arithmetic opcode helpers
+ void emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType);
+ void emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType);
+ void emitBinaryDoubleOp(OpcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters = true, bool op2IsInRegisters = true);
+
+#if CPU(X86)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 7;
+ static const int patchOffsetPutByIdPropertyMapOffset1 = 22;
+ static const int patchOffsetPutByIdPropertyMapOffset2 = 28;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 7;
+ static const int patchOffsetGetByIdBranchToSlowCase = 13;
+ static const int patchOffsetGetByIdPropertyMapOffset1 = 22;
+ static const int patchOffsetGetByIdPropertyMapOffset2 = 28;
+ static const int patchOffsetGetByIdPutResult = 28;
+#if ENABLE(OPCODE_SAMPLING)
+ static const int patchOffsetGetByIdSlowCaseCall = 37;
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 27;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 6;
+
+ static const int patchOffsetMethodCheckProtoObj = 11;
+ static const int patchOffsetMethodCheckProtoStruct = 18;
+ static const int patchOffsetMethodCheckPutFunction = 29;
+#elif CPU(ARM_TRADITIONAL)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 4;
+ static const int patchOffsetPutByIdPropertyMapOffset1 = 20;
+ static const int patchOffsetPutByIdPropertyMapOffset2 = 28;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 4;
+ static const int patchOffsetGetByIdBranchToSlowCase = 16;
+ static const int patchOffsetGetByIdPropertyMapOffset1 = 20;
+ static const int patchOffsetGetByIdPropertyMapOffset2 = 28;
+ static const int patchOffsetGetByIdPutResult = 36;
+#if ENABLE(OPCODE_SAMPLING)
+ #error "OPCODE_SAMPLING is not yet supported"
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 32;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 12;
+
+ static const int patchOffsetMethodCheckProtoObj = 12;
+ static const int patchOffsetMethodCheckProtoStruct = 20;
+ static const int patchOffsetMethodCheckPutFunction = 32;
+
+ // sequenceOpCall
+ static const int sequenceOpCallInstructionSpace = 12;
+ static const int sequenceOpCallConstantSpace = 2;
+ // sequenceMethodCheck
+ static const int sequenceMethodCheckInstructionSpace = 40;
+ static const int sequenceMethodCheckConstantSpace = 6;
+ // sequenceGetByIdHotPath
+ static const int sequenceGetByIdHotPathInstructionSpace = 36;
+ static const int sequenceGetByIdHotPathConstantSpace = 4;
+ // sequenceGetByIdSlowCase
+ static const int sequenceGetByIdSlowCaseInstructionSpace = 56;
+ static const int sequenceGetByIdSlowCaseConstantSpace = 2;
+ // sequencePutById
+ static const int sequencePutByIdInstructionSpace = 36;
+ static const int sequencePutByIdConstantSpace = 4;
+#elif CPU(ARM_THUMB2)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 10;
+ static const int patchOffsetPutByIdPropertyMapOffset1 = 36;
+ static const int patchOffsetPutByIdPropertyMapOffset2 = 48;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 10;
+ static const int patchOffsetGetByIdBranchToSlowCase = 26;
+ static const int patchOffsetGetByIdPropertyMapOffset1 = 36;
+ static const int patchOffsetGetByIdPropertyMapOffset2 = 48;
+ static const int patchOffsetGetByIdPutResult = 52;
+#if ENABLE(OPCODE_SAMPLING)
+ #error "OPCODE_SAMPLING is not yet supported"
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 30;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 16;
+
+ static const int patchOffsetMethodCheckProtoObj = 24;
+ static const int patchOffsetMethodCheckProtoStruct = 34;
+ static const int patchOffsetMethodCheckPutFunction = 58;
+
+ // sequenceOpCall
+ static const int sequenceOpCallInstructionSpace = 12;
+ static const int sequenceOpCallConstantSpace = 2;
+ // sequenceMethodCheck
+ static const int sequenceMethodCheckInstructionSpace = 40;
+ static const int sequenceMethodCheckConstantSpace = 6;
+ // sequenceGetByIdHotPath
+ static const int sequenceGetByIdHotPathInstructionSpace = 36;
+ static const int sequenceGetByIdHotPathConstantSpace = 4;
+ // sequenceGetByIdSlowCase
+ static const int sequenceGetByIdSlowCaseInstructionSpace = 40;
+ static const int sequenceGetByIdSlowCaseConstantSpace = 2;
+ // sequencePutById
+ static const int sequencePutByIdInstructionSpace = 36;
+ static const int sequencePutByIdConstantSpace = 4;
+#elif CPU(MIPS)
+#if WTF_MIPS_ISA(1)
+ static const int patchOffsetPutByIdStructure = 16;
+ static const int patchOffsetPutByIdPropertyMapOffset1 = 56;
+ static const int patchOffsetPutByIdPropertyMapOffset2 = 72;
+ static const int patchOffsetGetByIdStructure = 16;
+ static const int patchOffsetGetByIdBranchToSlowCase = 48;
+ static const int patchOffsetGetByIdPropertyMapOffset1 = 56;
+ static const int patchOffsetGetByIdPropertyMapOffset2 = 76;
+ static const int patchOffsetGetByIdPutResult = 96;
+#if ENABLE(OPCODE_SAMPLING)
+ #error "OPCODE_SAMPLING is not yet supported"
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 44;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 32;
+ static const int patchOffsetMethodCheckProtoObj = 32;
+ static const int patchOffsetMethodCheckProtoStruct = 56;
+ static const int patchOffsetMethodCheckPutFunction = 88;
+#else // WTF_MIPS_ISA(1)
+ static const int patchOffsetPutByIdStructure = 12;
+ static const int patchOffsetPutByIdPropertyMapOffset1 = 48;
+ static const int patchOffsetPutByIdPropertyMapOffset2 = 64;
+ static const int patchOffsetGetByIdStructure = 12;
+ static const int patchOffsetGetByIdBranchToSlowCase = 44;
+ static const int patchOffsetGetByIdPropertyMapOffset1 = 48;
+ static const int patchOffsetGetByIdPropertyMapOffset2 = 64;
+ static const int patchOffsetGetByIdPutResult = 80;
+#if ENABLE(OPCODE_SAMPLING)
+ #error "OPCODE_SAMPLING is not yet supported"
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 44;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 32;
+ static const int patchOffsetMethodCheckProtoObj = 32;
+ static const int patchOffsetMethodCheckProtoStruct = 52;
+ static const int patchOffsetMethodCheckPutFunction = 84;
+#endif
+#elif CPU(SH4)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetGetByIdStructure = 6;
+ static const int patchOffsetPutByIdPropertyMapOffset = 24;
+ static const int patchOffsetPutByIdStructure = 6;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdBranchToSlowCase = 10;
+ static const int patchOffsetGetByIdPropertyMapOffset = 24;
+ static const int patchOffsetGetByIdPutResult = 32;
+
+ // sequenceOpCall
+ static const int sequenceOpCallInstructionSpace = 12;
+ static const int sequenceOpCallConstantSpace = 2;
+ // sequenceMethodCheck
+ static const int sequenceMethodCheckInstructionSpace = 40;
+ static const int sequenceMethodCheckConstantSpace = 6;
+ // sequenceGetByIdHotPath
+ static const int sequenceGetByIdHotPathInstructionSpace = 36;
+ static const int sequenceGetByIdHotPathConstantSpace = 5;
+ // sequenceGetByIdSlowCase
+ static const int sequenceGetByIdSlowCaseInstructionSpace = 26;
+ static const int sequenceGetByIdSlowCaseConstantSpace = 2;
+ // sequencePutById
+ static const int sequencePutByIdInstructionSpace = 36;
+ static const int sequencePutByIdConstantSpace = 5;
+
+ static const int patchOffsetGetByIdPropertyMapOffset1 = 20;
+ static const int patchOffsetGetByIdPropertyMapOffset2 = 26;
+
+ static const int patchOffsetPutByIdPropertyMapOffset1 = 20;
+ static const int patchOffsetPutByIdPropertyMapOffset2 = 26;
+
+#if ENABLE(OPCODE_SAMPLING)
+ static const int patchOffsetGetByIdSlowCaseCall = 0; // FIMXE
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 22;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 4;
+
+ static const int patchOffsetMethodCheckProtoObj = 12;
+ static const int patchOffsetMethodCheckProtoStruct = 20;
+ static const int patchOffsetMethodCheckPutFunction = 32;
+#else
+#error "JSVALUE32_64 not supported on this platform."
+#endif
+
+#else // USE(JSVALUE32_64)
+ void emitGetVirtualRegister(int src, RegisterID dst);
+ void emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2);
+ void emitPutVirtualRegister(unsigned dst, RegisterID from = regT0);
+
+ int32_t getConstantOperandImmediateInt(unsigned src);
+
+ void killLastResultRegister();
+
+ Jump emitJumpIfJSCell(RegisterID);
+ Jump emitJumpIfBothJSCells(RegisterID, RegisterID, RegisterID);
+ void emitJumpSlowCaseIfJSCell(RegisterID);
+ Jump emitJumpIfNotJSCell(RegisterID);
+ void emitJumpSlowCaseIfNotJSCell(RegisterID);
+ void emitJumpSlowCaseIfNotJSCell(RegisterID, int VReg);
+#if USE(JSVALUE32_64)
+ JIT::Jump emitJumpIfImmediateNumber(RegisterID reg)
+ {
+ return emitJumpIfImmediateInteger(reg);
+ }
+
+ JIT::Jump emitJumpIfNotImmediateNumber(RegisterID reg)
+ {
+ return emitJumpIfNotImmediateInteger(reg);
+ }
+#endif
+ JIT::Jump emitJumpIfImmediateInteger(RegisterID);
+ JIT::Jump emitJumpIfNotImmediateInteger(RegisterID);
+ JIT::Jump emitJumpIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
+ void emitJumpSlowCaseIfNotImmediateInteger(RegisterID);
+ void emitJumpSlowCaseIfNotImmediateNumber(RegisterID);
+ void emitJumpSlowCaseIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
+
+#if USE(JSVALUE32_64)
+ void emitFastArithDeTagImmediate(RegisterID);
+ Jump emitFastArithDeTagImmediateJumpIfZero(RegisterID);
+#endif
+ void emitFastArithReTagImmediate(RegisterID src, RegisterID dest);
+ void emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest);
+
+ void emitTagAsBoolImmediate(RegisterID reg);
+ void compileBinaryArithOp(OpcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi);
+#if USE(JSVALUE64)
+ void compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase);
+#else
+ void compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes);
+#endif
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ void compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, unsigned propertyAccessInstructionIndex);
+ void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck = false);
+#endif
+ void compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset);
+ void compileGetDirectOffset(JSObject* base, RegisterID result, size_t cachedOffset);
+ void compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch);
+ void compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset);
+
+#if CPU(X86_64)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 10;
+ static const int patchOffsetPutByIdPropertyMapOffset = 31;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 10;
+ static const int patchOffsetGetByIdBranchToSlowCase = 20;
+ static const int patchOffsetGetByIdPropertyMapOffset = 31;
+ static const int patchOffsetGetByIdPutResult = 31;
+#if ENABLE(OPCODE_SAMPLING)
+ static const int patchOffsetGetByIdSlowCaseCall = 64;
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 41;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 9;
+
+ static const int patchOffsetMethodCheckProtoObj = 20;
+ static const int patchOffsetMethodCheckProtoStruct = 30;
+ static const int patchOffsetMethodCheckPutFunction = 50;
+#elif CPU(X86)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 7;
+ static const int patchOffsetPutByIdPropertyMapOffset = 22;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 7;
+ static const int patchOffsetGetByIdBranchToSlowCase = 13;
+ static const int patchOffsetGetByIdPropertyMapOffset = 22;
+ static const int patchOffsetGetByIdPutResult = 22;
+#if ENABLE(OPCODE_SAMPLING)
+ static const int patchOffsetGetByIdSlowCaseCall = 33;
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 23;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 6;
+
+ static const int patchOffsetMethodCheckProtoObj = 11;
+ static const int patchOffsetMethodCheckProtoStruct = 18;
+ static const int patchOffsetMethodCheckPutFunction = 29;
+#elif CPU(ARM_THUMB2)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 10;
+ static const int patchOffsetPutByIdPropertyMapOffset = 46;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 10;
+ static const int patchOffsetGetByIdBranchToSlowCase = 26;
+ static const int patchOffsetGetByIdPropertyMapOffset = 46;
+ static const int patchOffsetGetByIdPutResult = 50;
+#if ENABLE(OPCODE_SAMPLING)
+ static const int patchOffsetGetByIdSlowCaseCall = 0; // FIMXE
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 28;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 16;
+
+ static const int patchOffsetMethodCheckProtoObj = 24;
+ static const int patchOffsetMethodCheckProtoStruct = 34;
+ static const int patchOffsetMethodCheckPutFunction = 58;
+#elif CPU(ARM_TRADITIONAL)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 4;
+ static const int patchOffsetPutByIdPropertyMapOffset = 20;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 4;
+ static const int patchOffsetGetByIdBranchToSlowCase = 16;
+ static const int patchOffsetGetByIdPropertyMapOffset = 20;
+ static const int patchOffsetGetByIdPutResult = 28;
+#if ENABLE(OPCODE_SAMPLING)
+ #error "OPCODE_SAMPLING is not yet supported"
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 28;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 12;
+
+ static const int patchOffsetMethodCheckProtoObj = 12;
+ static const int patchOffsetMethodCheckProtoStruct = 20;
+ static const int patchOffsetMethodCheckPutFunction = 32;
+
+ // sequenceOpCall
+ static const int sequenceOpCallInstructionSpace = 12;
+ static const int sequenceOpCallConstantSpace = 2;
+ // sequenceMethodCheck
+ static const int sequenceMethodCheckInstructionSpace = 40;
+ static const int sequenceMethodCheckConstantSpace = 6;
+ // sequenceGetByIdHotPath
+ static const int sequenceGetByIdHotPathInstructionSpace = 28;
+ static const int sequenceGetByIdHotPathConstantSpace = 3;
+ // sequenceGetByIdSlowCase
+ static const int sequenceGetByIdSlowCaseInstructionSpace = 32;
+ static const int sequenceGetByIdSlowCaseConstantSpace = 2;
+ // sequencePutById
+ static const int sequencePutByIdInstructionSpace = 28;
+ static const int sequencePutByIdConstantSpace = 3;
+#elif CPU(MIPS)
+#if WTF_MIPS_ISA(1)
+ static const int patchOffsetPutByIdStructure = 16;
+ static const int patchOffsetPutByIdPropertyMapOffset = 68;
+ static const int patchOffsetGetByIdStructure = 16;
+ static const int patchOffsetGetByIdBranchToSlowCase = 48;
+ static const int patchOffsetGetByIdPropertyMapOffset = 68;
+ static const int patchOffsetGetByIdPutResult = 88;
+#if ENABLE(OPCODE_SAMPLING)
+ #error "OPCODE_SAMPLING is not yet supported"
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 40;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 32;
+ static const int patchOffsetMethodCheckProtoObj = 32;
+ static const int patchOffsetMethodCheckProtoStruct = 56;
+ static const int patchOffsetMethodCheckPutFunction = 88;
+#else // WTF_MIPS_ISA(1)
+ static const int patchOffsetPutByIdStructure = 12;
+ static const int patchOffsetPutByIdPropertyMapOffset = 60;
+ static const int patchOffsetGetByIdStructure = 12;
+ static const int patchOffsetGetByIdBranchToSlowCase = 44;
+ static const int patchOffsetGetByIdPropertyMapOffset = 60;
+ static const int patchOffsetGetByIdPutResult = 76;
+#if ENABLE(OPCODE_SAMPLING)
+ #error "OPCODE_SAMPLING is not yet supported"
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 40;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 32;
+ static const int patchOffsetMethodCheckProtoObj = 32;
+ static const int patchOffsetMethodCheckProtoStruct = 52;
+ static const int patchOffsetMethodCheckPutFunction = 84;
+#endif
+#endif
+#endif // USE(JSVALUE32_64)
+
+#if (defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL)
+#define BEGIN_UNINTERRUPTED_SEQUENCE(name) do { beginUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace); } while (false)
+#define END_UNINTERRUPTED_SEQUENCE_FOR_PUT(name, dst) do { endUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace, dst); } while (false)
+#define END_UNINTERRUPTED_SEQUENCE(name) END_UNINTERRUPTED_SEQUENCE_FOR_PUT(name, 0)
+
+ void beginUninterruptedSequence(int, int);
+ void endUninterruptedSequence(int, int, int);
+
+#else
+#define BEGIN_UNINTERRUPTED_SEQUENCE(name) do { beginUninterruptedSequence(); } while (false)
+#define END_UNINTERRUPTED_SEQUENCE(name) do { endUninterruptedSequence(); } while (false)
+#define END_UNINTERRUPTED_SEQUENCE_FOR_PUT(name, dst) do { endUninterruptedSequence(); } while (false)
+#endif
+
+ void emit_op_add(Instruction*);
+ void emit_op_bitand(Instruction*);
+ void emit_op_bitnot(Instruction*);
+ void emit_op_bitor(Instruction*);
+ void emit_op_bitxor(Instruction*);
+ void emit_op_call(Instruction*);
+ void emit_op_call_eval(Instruction*);
+ void emit_op_call_varargs(Instruction*);
+ void emit_op_call_put_result(Instruction*);
+ void emit_op_catch(Instruction*);
+ void emit_op_construct(Instruction*);
+ void emit_op_get_callee(Instruction*);
+ void emit_op_create_this(Instruction*);
+ void emit_op_convert_this(Instruction*);
+ void emit_op_convert_this_strict(Instruction*);
+ void emit_op_create_arguments(Instruction*);
+ void emit_op_debug(Instruction*);
+ void emit_op_del_by_id(Instruction*);
+ void emit_op_div(Instruction*);
+ void emit_op_end(Instruction*);
+ void emit_op_enter(Instruction*);
+ void emit_op_create_activation(Instruction*);
+ void emit_op_eq(Instruction*);
+ void emit_op_eq_null(Instruction*);
+ void emit_op_get_by_id(Instruction*);
+ void emit_op_get_arguments_length(Instruction*);
+ void emit_op_get_by_val(Instruction*);
+ void emit_op_get_argument_by_val(Instruction*);
+ void emit_op_get_by_pname(Instruction*);
+ void emit_op_get_global_var(Instruction*);
+ void emit_op_get_scoped_var(Instruction*);
+ void emit_op_init_lazy_reg(Instruction*);
+ void emit_op_check_has_instance(Instruction*);
+ void emit_op_instanceof(Instruction*);
+ void emit_op_jeq_null(Instruction*);
+ void emit_op_jfalse(Instruction*);
+ void emit_op_jmp(Instruction*);
+ void emit_op_jmp_scopes(Instruction*);
+ void emit_op_jneq_null(Instruction*);
+ void emit_op_jneq_ptr(Instruction*);
+ void emit_op_jnless(Instruction*);
+ void emit_op_jless(Instruction*);
+ void emit_op_jlesseq(Instruction*, bool invert = false);
+ void emit_op_jnlesseq(Instruction*);
+ void emit_op_jsr(Instruction*);
+ void emit_op_jtrue(Instruction*);
+ void emit_op_load_varargs(Instruction*);
+ void emit_op_loop(Instruction*);
+ void emit_op_loop_if_less(Instruction*);
+ void emit_op_loop_if_lesseq(Instruction*);
+ void emit_op_loop_if_true(Instruction*);
+ void emit_op_loop_if_false(Instruction*);
+ void emit_op_lshift(Instruction*);
+ void emit_op_method_check(Instruction*);
+ void emit_op_mod(Instruction*);
+ void emit_op_mov(Instruction*);
+ void emit_op_mul(Instruction*);
+ void emit_op_negate(Instruction*);
+ void emit_op_neq(Instruction*);
+ void emit_op_neq_null(Instruction*);
+ void emit_op_new_array(Instruction*);
+ void emit_op_new_func(Instruction*);
+ void emit_op_new_func_exp(Instruction*);
+ void emit_op_new_object(Instruction*);
+ void emit_op_new_regexp(Instruction*);
+ void emit_op_get_pnames(Instruction*);
+ void emit_op_next_pname(Instruction*);
+ void emit_op_not(Instruction*);
+ void emit_op_nstricteq(Instruction*);
+ void emit_op_pop_scope(Instruction*);
+ void emit_op_post_dec(Instruction*);
+ void emit_op_post_inc(Instruction*);
+ void emit_op_pre_dec(Instruction*);
+ void emit_op_pre_inc(Instruction*);
+ void emit_op_profile_did_call(Instruction*);
+ void emit_op_profile_will_call(Instruction*);
+ void emit_op_push_new_scope(Instruction*);
+ void emit_op_push_scope(Instruction*);
+ void emit_op_put_by_id(Instruction*);
+ void emit_op_put_by_index(Instruction*);
+ void emit_op_put_by_val(Instruction*);
+ void emit_op_put_getter(Instruction*);
+ void emit_op_put_global_var(Instruction*);
+ void emit_op_put_scoped_var(Instruction*);
+ void emit_op_put_setter(Instruction*);
+ void emit_op_resolve(Instruction*);
+ void emit_op_resolve_base(Instruction*);
+ void emit_op_ensure_property_exists(Instruction*);
+ void emit_op_resolve_global(Instruction*, bool dynamic = false);
+ void emit_op_resolve_global_dynamic(Instruction*);
+ void emit_op_resolve_skip(Instruction*);
+ void emit_op_resolve_with_base(Instruction*);
+ void emit_op_ret(Instruction*);
+ void emit_op_ret_object_or_this(Instruction*);
+ void emit_op_rshift(Instruction*);
+ void emit_op_sret(Instruction*);
+ void emit_op_strcat(Instruction*);
+ void emit_op_stricteq(Instruction*);
+ void emit_op_sub(Instruction*);
+ void emit_op_switch_char(Instruction*);
+ void emit_op_switch_imm(Instruction*);
+ void emit_op_switch_string(Instruction*);
+ void emit_op_tear_off_activation(Instruction*);
+ void emit_op_tear_off_arguments(Instruction*);
+ void emit_op_throw(Instruction*);
+ void emit_op_throw_reference_error(Instruction*);
+ void emit_op_to_jsnumber(Instruction*);
+ void emit_op_to_primitive(Instruction*);
+ void emit_op_unexpected_load(Instruction*);
+ void emit_op_urshift(Instruction*);
+#if ENABLE(JIT_USE_SOFT_MODULO)
+ void softModulo();
+#endif
+
+ void emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_bitand(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_bitnot(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_bitor(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_bitxor(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_call(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_call_eval(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_call_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_construct(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_convert_this(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_convert_this_strict(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_div(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_eq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_get_arguments_length(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_get_argument_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_get_by_pname(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_check_has_instance(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_instanceof(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jfalse(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jnless(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jless(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jlesseq(Instruction*, Vector<SlowCaseEntry>::iterator&, bool invert = false);
+ void emitSlow_op_jnlesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jtrue(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_load_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_loop_if_less(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_loop_if_lesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_loop_if_true(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_loop_if_false(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_lshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_negate(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_neq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_not(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_nstricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_post_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_post_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_pre_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_pre_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_resolve_global(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_resolve_global_dynamic(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_rshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_stricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_to_jsnumber(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_to_primitive(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_urshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
+
+
+ void emitRightShift(Instruction*, bool isUnsigned);
+ void emitRightShiftSlowCase(Instruction*, Vector<SlowCaseEntry>::iterator&, bool isUnsigned);
+
+ /* This function is deprecated. */
+ void emitGetJITStubArg(unsigned argumentNumber, RegisterID dst);
+
+ void emitInitRegister(unsigned dst);
+
+ void emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry);
+ void emitPutCellToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry);
+ void emitPutIntToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry);
+ void emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry);
+ void emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister);
+ void emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister);
+
+ JSValue getConstantOperand(unsigned src);
+ bool isOperandConstantImmediateInt(unsigned src);
+ bool isOperandConstantImmediateChar(unsigned src);
+
+ bool atJumpTarget();
+
+ Jump getSlowCase(Vector<SlowCaseEntry>::iterator& iter)
+ {
+ return iter++->from;
+ }
+ void linkSlowCase(Vector<SlowCaseEntry>::iterator& iter)
+ {
+ iter->from.link(this);
+ ++iter;
+ }
+ void linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&, int vReg);
+
+ Jump checkStructure(RegisterID reg, Structure* structure);
+
+ void restoreArgumentReference();
+ void restoreArgumentReferenceForTrampoline();
+
+ Call emitNakedCall(CodePtr function = CodePtr());
+
+ void preserveReturnAddressAfterCall(RegisterID);
+ void restoreReturnAddressBeforeReturn(RegisterID);
+ void restoreReturnAddressBeforeReturn(Address);
+
+ // Loads the character value of a single character string into dst.
+ void emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures);
+
+ void emitTimeoutCheck();
+#ifndef NDEBUG
+ void printBytecodeOperandTypes(unsigned src1, unsigned src2);
+#endif
+
+#if ENABLE(SAMPLING_FLAGS)
+ void setSamplingFlag(int32_t);
+ void clearSamplingFlag(int32_t);
+#endif
+
+#if ENABLE(SAMPLING_COUNTERS)
+ void emitCount(AbstractSamplingCounter&, uint32_t = 1);
+#endif
+
+#if ENABLE(OPCODE_SAMPLING)
+ void sampleInstruction(Instruction*, bool = false);
+#endif
+
+#if ENABLE(CODEBLOCK_SAMPLING)
+ void sampleCodeBlock(CodeBlock*);
+#else
+ void sampleCodeBlock(CodeBlock*) {}
+#endif
+
+ Interpreter* m_interpreter;
+ JSGlobalData* m_globalData;
+ CodeBlock* m_codeBlock;
+
+ Vector<CallRecord> m_calls;
+ Vector<Label> m_labels;
+ Vector<PropertyStubCompilationInfo> m_propertyAccessCompilationInfo;
+ Vector<StructureStubCompilationInfo> m_callStructureStubCompilationInfo;
+ Vector<MethodCallCompilationInfo> m_methodCallCompilationInfo;
+ Vector<JumpTable> m_jmpTable;
+
+ unsigned m_bytecodeOffset;
+ Vector<JSRInfo> m_jsrSites;
+ Vector<SlowCaseEntry> m_slowCases;
+ Vector<SwitchRecord> m_switches;
+
+ unsigned m_propertyAccessInstructionIndex;
+ unsigned m_globalResolveInfoIndex;
+ unsigned m_callLinkInfoIndex;
+
+#if USE(JSVALUE32_64)
+ unsigned m_jumpTargetIndex;
+ unsigned m_mappedBytecodeOffset;
+ unsigned m_mappedVirtualRegisterIndex;
+ RegisterID m_mappedTag;
+ RegisterID m_mappedPayload;
+#else
+ int m_lastResultBytecodeRegister;
+#endif
+ unsigned m_jumpTargetsPosition;
+
+#ifndef NDEBUG
+#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
+ Label m_uninterruptedInstructionSequenceBegin;
+ int m_uninterruptedConstantSequenceBegin;
+#endif
+#endif
+ static CodePtr stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool);
+ } JIT_CLASS_ALIGNMENT;
+
+ inline void JIT::emit_op_loop(Instruction* currentInstruction)
+ {
+ emitTimeoutCheck();
+ emit_op_jmp(currentInstruction);
+ }
+
+ inline void JIT::emit_op_loop_if_true(Instruction* currentInstruction)
+ {
+ emitTimeoutCheck();
+ emit_op_jtrue(currentInstruction);
+ }
+
+ inline void JIT::emitSlow_op_loop_if_true(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+ {
+ emitSlow_op_jtrue(currentInstruction, iter);
+ }
+
+ inline void JIT::emit_op_loop_if_false(Instruction* currentInstruction)
+ {
+ emitTimeoutCheck();
+ emit_op_jfalse(currentInstruction);
+ }
+
+ inline void JIT::emitSlow_op_loop_if_false(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+ {
+ emitSlow_op_jfalse(currentInstruction, iter);
+ }
+
+ inline void JIT::emit_op_loop_if_less(Instruction* currentInstruction)
+ {
+ emitTimeoutCheck();
+ emit_op_jless(currentInstruction);
+ }
+
+ inline void JIT::emitSlow_op_loop_if_less(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+ {
+ emitSlow_op_jless(currentInstruction, iter);
+ }
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // JIT_h
diff --git a/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITArithmetic.cpp b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITArithmetic.cpp
new file mode 100644
index 0000000..c2a84c5
--- /dev/null
+++ b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITArithmetic.cpp
@@ -0,0 +1,1244 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(JIT)
+#if USE(JSVALUE64)
+#include "JIT.h"
+
+#include "CodeBlock.h"
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JITStubs.h"
+#include "JSArray.h"
+#include "JSFunction.h"
+#include "Interpreter.h"
+#include "ResultType.h"
+#include "SamplingTool.h"
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+using namespace std;
+
+namespace JSC {
+
+void JIT::emit_op_lshift(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegisters(op1, regT0, op2, regT2);
+ // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT2);
+ emitFastArithImmToInt(regT0);
+ emitFastArithImmToInt(regT2);
+ lshift32(regT2, regT0);
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ UNUSED_PARAM(op1);
+ UNUSED_PARAM(op2);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_lshift);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT2);
+ stubCall.call(result);
+}
+
+void JIT::emit_op_rshift(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op2)) {
+ // isOperandConstantImmediateInt(op2) => 1 SlowCase
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ // Mask with 0x1f as per ecma-262 11.7.2 step 7.
+ rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
+ } else {
+ emitGetVirtualRegisters(op1, regT0, op2, regT2);
+ if (supportsFloatingPointTruncate()) {
+ Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
+ // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
+ addSlowCase(emitJumpIfNotImmediateNumber(regT0));
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT0);
+ addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
+ lhsIsInt.link(this);
+ emitJumpSlowCaseIfNotImmediateInteger(regT2);
+ } else {
+ // !supportsFloatingPoint() => 2 SlowCases
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT2);
+ }
+ emitFastArithImmToInt(regT2);
+ rshift32(regT2, regT0);
+ }
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_rshift);
+
+ if (isOperandConstantImmediateInt(op2)) {
+ linkSlowCase(iter);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(op2, regT2);
+ } else {
+ if (supportsFloatingPointTruncate()) {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ // We're reloading op1 to regT0 as we can no longer guarantee that
+ // we have not munged the operand. It may have already been shifted
+ // correctly, but it still will not have been tagged.
+ stubCall.addArgument(op1, regT0);
+ stubCall.addArgument(regT2);
+ } else {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT2);
+ }
+ }
+
+ stubCall.call(result);
+}
+
+void JIT::emit_op_urshift(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ // Slow case of urshift makes assumptions about what registers hold the
+ // shift arguments, so any changes must be updated there as well.
+ if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitFastArithImmToInt(regT0);
+ int shift = getConstantOperand(op2).asInt32();
+ if (shift)
+ urshift32(Imm32(shift & 0x1f), regT0);
+ // unsigned shift < 0 or shift = k*2^32 may result in (essentially)
+ // a toUint conversion, which can result in a value we can represent
+ // as an immediate int.
+ if (shift < 0 || !(shift & 31))
+ addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(dst, regT0);
+ return;
+ }
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ if (!isOperandConstantImmediateInt(op1))
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+ emitFastArithImmToInt(regT0);
+ emitFastArithImmToInt(regT1);
+ urshift32(regT1, regT0);
+ addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(dst, regT0);
+}
+
+void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ if (isOperandConstantImmediateInt(op2)) {
+ int shift = getConstantOperand(op2).asInt32();
+ // op1 = regT0
+ linkSlowCase(iter); // int32 check
+ if (supportsFloatingPointTruncate()) {
+ JumpList failures;
+ failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT0);
+ failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
+ if (shift)
+ urshift32(Imm32(shift & 0x1f), regT0);
+ if (shift < 0 || !(shift & 31))
+ failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(dst, regT0);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
+ failures.link(this);
+ }
+ if (shift < 0 || !(shift & 31))
+ linkSlowCase(iter); // failed to box in hot path
+ } else {
+ // op1 = regT0
+ // op2 = regT1
+ if (!isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter); // int32 check -- op1 is not an int
+ if (supportsFloatingPointTruncate()) {
+ JumpList failures;
+ failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT0);
+ failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
+ failures.append(emitJumpIfNotImmediateInteger(regT1)); // op2 is not an int
+ emitFastArithImmToInt(regT1);
+ urshift32(regT1, regT0);
+ failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(dst, regT0);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
+ failures.link(this);
+ }
+ }
+
+ linkSlowCase(iter); // int32 check - op2 is not an int
+ linkSlowCase(iter); // Can't represent unsigned result as an immediate
+ }
+
+ JITStubCall stubCall(this, cti_op_urshift);
+ stubCall.addArgument(op1, regT0);
+ stubCall.addArgument(op2, regT1);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_jnless(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ // We generate inline code for the following cases in the fast path:
+ // - int immediate to constant int immediate
+ // - constant int immediate to int immediate
+ // - int immediate to int immediate
+
+ if (isOperandConstantImmediateChar(op1)) {
+ emitGetVirtualRegister(op2, regT0);
+ addSlowCase(emitJumpIfNotJSCell(regT0));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(LessThanOrEqual, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateChar(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ addSlowCase(emitJumpIfNotJSCell(regT0));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(GreaterThanOrEqual, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ int32_t op2imm = getConstantOperandImmediateInt(op2);
+ addJump(branch32(GreaterThanOrEqual, regT0, Imm32(op2imm)), target);
+ } else if (isOperandConstantImmediateInt(op1)) {
+ emitGetVirtualRegister(op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+ int32_t op1imm = getConstantOperandImmediateInt(op1);
+ addJump(branch32(LessThanOrEqual, regT1, Imm32(op1imm)), target);
+ } else {
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+
+ addJump(branch32(GreaterThanOrEqual, regT0, regT1), target);
+ }
+}
+
+void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ // We generate inline code for the following cases in the slow path:
+ // - floating-point number to constant int immediate
+ // - constant int immediate to floating-point number
+ // - floating-point number to floating-point number.
+ if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_jless);
+ stubCall.addArgument(op1, regT0);
+ stubCall.addArgument(op2, regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target);
+ return;
+ }
+
+ if (isOperandConstantImmediateInt(op2)) {
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT0);
+
+ int32_t op2imm = getConstantOperand(op2).asInt32();;
+
+ move(Imm32(op2imm), regT1);
+ convertInt32ToDouble(regT1, fpRegT1);
+
+ emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), target);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
+
+ fail1.link(this);
+ }
+
+ JITStubCall stubCall(this, cti_op_jless);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target);
+
+ } else if (isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT1, fpRegT1);
+
+ int32_t op1imm = getConstantOperand(op1).asInt32();;
+
+ move(Imm32(op1imm), regT0);
+ convertInt32ToDouble(regT0, fpRegT0);
+
+ emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), target);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
+
+ fail1.link(this);
+ }
+
+ JITStubCall stubCall(this, cti_op_jless);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target);
+
+ } else {
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
+ Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
+ Jump fail3 = emitJumpIfImmediateInteger(regT1);
+ addPtr(tagTypeNumberRegister, regT0);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT0, fpRegT0);
+ movePtrToDouble(regT1, fpRegT1);
+
+ emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), target);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
+
+ fail1.link(this);
+ fail2.link(this);
+ fail3.link(this);
+ }
+
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_jless);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target);
+ }
+}
+
+void JIT::emit_op_jless(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ // We generate inline code for the following cases in the fast path:
+ // - int immediate to constant int immediate
+ // - constant int immediate to int immediate
+ // - int immediate to int immediate
+
+ if (isOperandConstantImmediateChar(op1)) {
+ emitGetVirtualRegister(op2, regT0);
+ addSlowCase(emitJumpIfNotJSCell(regT0));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(GreaterThan, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateChar(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ addSlowCase(emitJumpIfNotJSCell(regT0));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(LessThan, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ int32_t op2imm = getConstantOperandImmediateInt(op2);
+ addJump(branch32(LessThan, regT0, Imm32(op2imm)), target);
+ } else if (isOperandConstantImmediateInt(op1)) {
+ emitGetVirtualRegister(op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+ int32_t op1imm = getConstantOperandImmediateInt(op1);
+ addJump(branch32(GreaterThan, regT1, Imm32(op1imm)), target);
+ } else {
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+
+ addJump(branch32(LessThan, regT0, regT1), target);
+ }
+}
+
+void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ // We generate inline code for the following cases in the slow path:
+ // - floating-point number to constant int immediate
+ // - constant int immediate to floating-point number
+ // - floating-point number to floating-point number.
+ if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_jless);
+ stubCall.addArgument(op1, regT0);
+ stubCall.addArgument(op2, regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
+ return;
+ }
+
+ if (isOperandConstantImmediateInt(op2)) {
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT0);
+
+ int32_t op2imm = getConstantOperand(op2).asInt32();
+
+ move(Imm32(op2imm), regT1);
+ convertInt32ToDouble(regT1, fpRegT1);
+
+ emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), target);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
+
+ fail1.link(this);
+ }
+
+ JITStubCall stubCall(this, cti_op_jless);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
+
+ } else if (isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT1, fpRegT1);
+
+ int32_t op1imm = getConstantOperand(op1).asInt32();
+
+ move(Imm32(op1imm), regT0);
+ convertInt32ToDouble(regT0, fpRegT0);
+
+ emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), target);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
+
+ fail1.link(this);
+ }
+
+ JITStubCall stubCall(this, cti_op_jless);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
+
+ } else {
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
+ Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
+ Jump fail3 = emitJumpIfImmediateInteger(regT1);
+ addPtr(tagTypeNumberRegister, regT0);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT0, fpRegT0);
+ movePtrToDouble(regT1, fpRegT1);
+
+ emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), target);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
+
+ fail1.link(this);
+ fail2.link(this);
+ fail3.link(this);
+ }
+
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_jless);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
+ }
+}
+
+void JIT::emit_op_jlesseq(Instruction* currentInstruction, bool invert)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ // We generate inline code for the following cases in the fast path:
+ // - int immediate to constant int immediate
+ // - constant int immediate to int immediate
+ // - int immediate to int immediate
+
+ if (isOperandConstantImmediateChar(op1)) {
+ emitGetVirtualRegister(op2, regT0);
+ addSlowCase(emitJumpIfNotJSCell(regT0));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(invert ? LessThan : GreaterThanOrEqual, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateChar(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ addSlowCase(emitJumpIfNotJSCell(regT0));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ int32_t op2imm = getConstantOperandImmediateInt(op2);
+ addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, Imm32(op2imm)), target);
+ } else if (isOperandConstantImmediateInt(op1)) {
+ emitGetVirtualRegister(op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+ int32_t op1imm = getConstantOperandImmediateInt(op1);
+ addJump(branch32(invert ? LessThan : GreaterThanOrEqual, regT1, Imm32(op1imm)), target);
+ } else {
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+
+ addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, regT1), target);
+ }
+}
+
+void JIT::emitSlow_op_jlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool invert)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ // We generate inline code for the following cases in the slow path:
+ // - floating-point number to constant int immediate
+ // - constant int immediate to floating-point number
+ // - floating-point number to floating-point number.
+
+ if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_jlesseq);
+ stubCall.addArgument(op1, regT0);
+ stubCall.addArgument(op2, regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
+ return;
+ }
+
+ if (isOperandConstantImmediateInt(op2)) {
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT0);
+
+ int32_t op2imm = getConstantOperand(op2).asInt32();;
+
+ move(Imm32(op2imm), regT1);
+ convertInt32ToDouble(regT1, fpRegT1);
+
+ emitJumpSlowToHot(branchDouble(invert ? DoubleLessThanOrUnordered : DoubleGreaterThanOrEqual, fpRegT1, fpRegT0), target);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
+
+ fail1.link(this);
+ }
+
+ JITStubCall stubCall(this, cti_op_jlesseq);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
+
+ } else if (isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT1, fpRegT1);
+
+ int32_t op1imm = getConstantOperand(op1).asInt32();;
+
+ move(Imm32(op1imm), regT0);
+ convertInt32ToDouble(regT0, fpRegT0);
+
+ emitJumpSlowToHot(branchDouble(invert ? DoubleLessThanOrUnordered : DoubleGreaterThanOrEqual, fpRegT1, fpRegT0), target);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
+
+ fail1.link(this);
+ }
+
+ JITStubCall stubCall(this, cti_op_jlesseq);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
+
+ } else {
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
+ Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
+ Jump fail3 = emitJumpIfImmediateInteger(regT1);
+ addPtr(tagTypeNumberRegister, regT0);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT0, fpRegT0);
+ movePtrToDouble(regT1, fpRegT1);
+
+ emitJumpSlowToHot(branchDouble(invert ? DoubleLessThanOrUnordered : DoubleGreaterThanOrEqual, fpRegT1, fpRegT0), target);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
+
+ fail1.link(this);
+ fail2.link(this);
+ fail3.link(this);
+ }
+
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_jlesseq);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
+ }
+}
+
+void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
+{
+ emit_op_jlesseq(currentInstruction, true);
+}
+
+void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ emitSlow_op_jlesseq(currentInstruction, iter, true);
+}
+
+void JIT::emit_op_bitand(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op1)) {
+ emitGetVirtualRegister(op2, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ int32_t imm = getConstantOperandImmediateInt(op1);
+ andPtr(Imm32(imm), regT0);
+ if (imm >= 0)
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ int32_t imm = getConstantOperandImmediateInt(op2);
+ andPtr(Imm32(imm), regT0);
+ if (imm >= 0)
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+ } else {
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ andPtr(regT1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ }
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter);
+ if (isOperandConstantImmediateInt(op1)) {
+ JITStubCall stubCall(this, cti_op_bitand);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(regT0);
+ stubCall.call(result);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ JITStubCall stubCall(this, cti_op_bitand);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
+ } else {
+ JITStubCall stubCall(this, cti_op_bitand);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(regT1);
+ stubCall.call(result);
+ }
+}
+
+void JIT::emit_op_post_inc(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(srcDst, regT0);
+ move(regT0, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT1));
+ emitFastArithIntToImmNoCheck(regT1, regT1);
+ emitPutVirtualRegister(srcDst, regT1);
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_post_inc);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(Imm32(srcDst));
+ stubCall.call(result);
+}
+
+void JIT::emit_op_post_dec(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(srcDst, regT0);
+ move(regT0, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchSub32(Zero, TrustedImm32(1), regT1));
+ emitFastArithIntToImmNoCheck(regT1, regT1);
+ emitPutVirtualRegister(srcDst, regT1);
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_post_dec);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(Imm32(srcDst));
+ stubCall.call(result);
+}
+
+void JIT::emit_op_pre_inc(Instruction* currentInstruction)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ emitGetVirtualRegister(srcDst, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0));
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+ emitPutVirtualRegister(srcDst);
+}
+
+void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ Jump notImm = getSlowCase(iter);
+ linkSlowCase(iter);
+ emitGetVirtualRegister(srcDst, regT0);
+ notImm.link(this);
+ JITStubCall stubCall(this, cti_op_pre_inc);
+ stubCall.addArgument(regT0);
+ stubCall.call(srcDst);
+}
+
+void JIT::emit_op_pre_dec(Instruction* currentInstruction)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ emitGetVirtualRegister(srcDst, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchSub32(Zero, TrustedImm32(1), regT0));
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+ emitPutVirtualRegister(srcDst);
+}
+
+void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ Jump notImm = getSlowCase(iter);
+ linkSlowCase(iter);
+ emitGetVirtualRegister(srcDst, regT0);
+ notImm.link(this);
+ JITStubCall stubCall(this, cti_op_pre_dec);
+ stubCall.addArgument(regT0);
+ stubCall.call(srcDst);
+}
+
+/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
+
+#if CPU(X86) || CPU(X86_64) || CPU(MIPS)
+
+void JIT::emit_op_mod(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+#if CPU(X86) || CPU(X86_64)
+ // Make sure registers are correct for x86 IDIV instructions.
+ ASSERT(regT0 == X86Registers::eax);
+ ASSERT(regT1 == X86Registers::edx);
+ ASSERT(regT2 == X86Registers::ecx);
+#endif
+
+ emitGetVirtualRegisters(op1, regT0, op2, regT2);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT2);
+
+ addSlowCase(branchPtr(Equal, regT2, TrustedImmPtr(JSValue::encode(jsNumber(0)))));
+ m_assembler.cdq();
+ m_assembler.idivl_r(regT2);
+ emitFastArithReTagImmediate(regT1, regT0);
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_mod);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT2);
+ stubCall.call(result);
+}
+
+#else // CPU(X86) || CPU(X86_64) || CPU(MIPS)
+
+void JIT::emit_op_mod(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_mod);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
+}
+
+void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+#if ENABLE(JIT_USE_SOFT_MODULO)
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_mod);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
+#else
+ ASSERT_NOT_REACHED();
+#endif
+}
+
+#endif // CPU(X86) || CPU(X86_64)
+
+/* ------------------------------ END: OP_MOD ------------------------------ */
+
+/* ------------------------------ BEGIN: USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
+
+void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsigned op2, OperandTypes)
+{
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+ if (opcodeID == op_add)
+ addSlowCase(branchAdd32(Overflow, regT1, regT0));
+ else if (opcodeID == op_sub)
+ addSlowCase(branchSub32(Overflow, regT1, regT0));
+ else {
+ ASSERT(opcodeID == op_mul);
+ addSlowCase(branchMul32(Overflow, regT1, regT0));
+ addSlowCase(branchTest32(Zero, regT0));
+ }
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+}
+
+void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned result, unsigned op1, unsigned op2, OperandTypes types, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase)
+{
+ // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
+ COMPILE_ASSERT(((TagTypeNumber + DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0);
+
+ Jump notImm1;
+ Jump notImm2;
+ if (op1HasImmediateIntFastCase) {
+ notImm2 = getSlowCase(iter);
+ } else if (op2HasImmediateIntFastCase) {
+ notImm1 = getSlowCase(iter);
+ } else {
+ notImm1 = getSlowCase(iter);
+ notImm2 = getSlowCase(iter);
+ }
+
+ linkSlowCase(iter); // Integer overflow case - we could handle this in JIT code, but this is likely rare.
+ if (opcodeID == op_mul && !op1HasImmediateIntFastCase && !op2HasImmediateIntFastCase) // op_mul has an extra slow case to handle 0 * negative number.
+ linkSlowCase(iter);
+ emitGetVirtualRegister(op1, regT0);
+
+ Label stubFunctionCall(this);
+ JITStubCall stubCall(this, opcodeID == op_add ? cti_op_add : opcodeID == op_sub ? cti_op_sub : cti_op_mul);
+ if (op1HasImmediateIntFastCase || op2HasImmediateIntFastCase) {
+ emitGetVirtualRegister(op1, regT0);
+ emitGetVirtualRegister(op2, regT1);
+ }
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call(result);
+ Jump end = jump();
+
+ if (op1HasImmediateIntFastCase) {
+ notImm2.link(this);
+ if (!types.second().definitelyIsNumber())
+ emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
+ emitGetVirtualRegister(op1, regT1);
+ convertInt32ToDouble(regT1, fpRegT1);
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT2);
+ } else if (op2HasImmediateIntFastCase) {
+ notImm1.link(this);
+ if (!types.first().definitelyIsNumber())
+ emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
+ emitGetVirtualRegister(op2, regT1);
+ convertInt32ToDouble(regT1, fpRegT1);
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT2);
+ } else {
+ // if we get here, eax is not an int32, edx not yet checked.
+ notImm1.link(this);
+ if (!types.first().definitelyIsNumber())
+ emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
+ if (!types.second().definitelyIsNumber())
+ emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT1);
+ Jump op2isDouble = emitJumpIfNotImmediateInteger(regT1);
+ convertInt32ToDouble(regT1, fpRegT2);
+ Jump op2wasInteger = jump();
+
+ // if we get here, eax IS an int32, edx is not.
+ notImm2.link(this);
+ if (!types.second().definitelyIsNumber())
+ emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
+ convertInt32ToDouble(regT0, fpRegT1);
+ op2isDouble.link(this);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT1, fpRegT2);
+ op2wasInteger.link(this);
+ }
+
+ if (opcodeID == op_add)
+ addDouble(fpRegT2, fpRegT1);
+ else if (opcodeID == op_sub)
+ subDouble(fpRegT2, fpRegT1);
+ else if (opcodeID == op_mul)
+ mulDouble(fpRegT2, fpRegT1);
+ else {
+ ASSERT(opcodeID == op_div);
+ divDouble(fpRegT2, fpRegT1);
+ }
+ moveDoubleToPtr(fpRegT1, regT0);
+ subPtr(tagTypeNumberRegister, regT0);
+ emitPutVirtualRegister(result, regT0);
+
+ end.link(this);
+}
+
+void JIT::emit_op_add(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
+ JITStubCall stubCall(this, cti_op_add);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
+ return;
+ }
+
+ if (isOperandConstantImmediateInt(op1)) {
+ emitGetVirtualRegister(op2, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1)), regT0));
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2)), regT0));
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+ } else
+ compileBinaryArithOp(op_add, result, op1, op2, types);
+
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (!types.first().mightBeNumber() || !types.second().mightBeNumber())
+ return;
+
+ bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1);
+ bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2);
+ compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types, op1HasImmediateIntFastCase, op2HasImmediateIntFastCase);
+}
+
+void JIT::emit_op_mul(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ // For now, only plant a fast int case if the constant operand is greater than zero.
+ int32_t value;
+ if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
+ emitGetVirtualRegister(op2, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
+ emitFastArithReTagImmediate(regT0, regT0);
+ } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
+ emitFastArithReTagImmediate(regT0, regT0);
+ } else
+ compileBinaryArithOp(op_mul, result, op1, op2, types);
+
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1) && getConstantOperandImmediateInt(op1) > 0;
+ bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2) && getConstantOperandImmediateInt(op2) > 0;
+ compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, types, op1HasImmediateIntFastCase, op2HasImmediateIntFastCase);
+}
+
+void JIT::emit_op_div(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (isOperandConstantImmediateDouble(op1)) {
+ emitGetVirtualRegister(op1, regT0);
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT0);
+ } else if (isOperandConstantImmediateInt(op1)) {
+ emitLoadInt32ToDouble(op1, fpRegT0);
+ } else {
+ emitGetVirtualRegister(op1, regT0);
+ if (!types.first().definitelyIsNumber())
+ emitJumpSlowCaseIfNotImmediateNumber(regT0);
+ Jump notInt = emitJumpIfNotImmediateInteger(regT0);
+ convertInt32ToDouble(regT0, fpRegT0);
+ Jump skipDoubleLoad = jump();
+ notInt.link(this);
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT0);
+ skipDoubleLoad.link(this);
+ }
+
+ if (isOperandConstantImmediateDouble(op2)) {
+ emitGetVirtualRegister(op2, regT1);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT1, fpRegT1);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitLoadInt32ToDouble(op2, fpRegT1);
+ } else {
+ emitGetVirtualRegister(op2, regT1);
+ if (!types.second().definitelyIsNumber())
+ emitJumpSlowCaseIfNotImmediateNumber(regT1);
+ Jump notInt = emitJumpIfNotImmediateInteger(regT1);
+ convertInt32ToDouble(regT1, fpRegT1);
+ Jump skipDoubleLoad = jump();
+ notInt.link(this);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT1, fpRegT1);
+ skipDoubleLoad.link(this);
+ }
+ divDouble(fpRegT1, fpRegT0);
+
+ // Double result.
+ moveDoubleToPtr(fpRegT0, regT0);
+ subPtr(tagTypeNumberRegister, regT0);
+
+ emitPutVirtualRegister(dst, regT0);
+}
+
+void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+ if (types.first().definitelyIsNumber() && types.second().definitelyIsNumber()) {
+#ifndef NDEBUG
+ breakpoint();
+#endif
+ return;
+ }
+ if (!isOperandConstantImmediateDouble(op1) && !isOperandConstantImmediateInt(op1)) {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter);
+ }
+ if (!isOperandConstantImmediateDouble(op2) && !isOperandConstantImmediateInt(op2)) {
+ if (!types.second().definitelyIsNumber())
+ linkSlowCase(iter);
+ }
+ // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
+ JITStubCall stubCall(this, cti_op_div);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
+}
+
+void JIT::emit_op_sub(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ compileBinaryArithOp(op_sub, result, op1, op2, types);
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ compileBinaryArithOpSlowCase(op_sub, iter, result, op1, op2, types, false, false);
+}
+
+/* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */
+
+} // namespace JSC
+
+#endif // USE(JSVALUE64)
+#endif // ENABLE(JIT)
diff --git a/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
new file mode 100644
index 0000000..6865489
--- /dev/null
+++ b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
@@ -0,0 +1,1424 @@
+/*
+* Copyright (C) 2008 Apple Inc. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+* 1. Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* 2. Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in the
+* documentation and/or other materials provided with the distribution.
+*
+* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "config.h"
+
+#if ENABLE(JIT)
+#if USE(JSVALUE32_64)
+#include "JIT.h"
+
+#include "CodeBlock.h"
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JITStubs.h"
+#include "JSArray.h"
+#include "JSFunction.h"
+#include "Interpreter.h"
+#include "ResultType.h"
+#include "SamplingTool.h"
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+using namespace std;
+
+namespace JSC {
+
+void JIT::emit_op_negate(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump srcNotInt = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
+ addSlowCase(branchTest32(Zero, regT0, TrustedImm32(0x7fffffff)));
+ neg32(regT0);
+ emitStoreInt32(dst, regT0, (dst == src));
+
+ Jump end = jump();
+
+ srcNotInt.link(this);
+ addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
+
+ xor32(TrustedImm32(1 << 31), regT1);
+ store32(regT1, tagFor(dst));
+ if (dst != src)
+ store32(regT0, payloadFor(dst));
+
+ end.link(this);
+}
+
+void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter); // 0x7fffffff check
+ linkSlowCase(iter); // double check
+
+ JITStubCall stubCall(this, cti_op_negate);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_jnless(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ // Character less.
+ if (isOperandConstantImmediateChar(op1)) {
+ emitLoad(op2, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(LessThanOrEqual, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateChar(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(GreaterThanOrEqual, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateInt(op1)) {
+ // Int32 less.
+ emitLoad(op2, regT3, regT2);
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThanOrEqual, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
+ } else {
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+ addJump(branch32(GreaterThanOrEqual, regT0, regT2), target);
+ }
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double less.
+ emitBinaryDoubleOp(op_jnless, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
+ end.link(this);
+}
+
+void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ } else {
+ if (!supportsFloatingPoint()) {
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter); // double check
+ linkSlowCase(iter); // int32 check
+ }
+ if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // double check
+ }
+ }
+
+ JITStubCall stubCall(this, cti_op_jless);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target);
+}
+
+void JIT::emit_op_jless(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ // Character less.
+ if (isOperandConstantImmediateChar(op1)) {
+ emitLoad(op2, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(GreaterThan, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateChar(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(LessThan, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateInt(op1)) {
+ emitLoad(op2, regT3, regT2);
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+ addJump(branch32(GreaterThan, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
+ } else {
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThan, regT0, regT2), target);
+ }
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double less.
+ emitBinaryDoubleOp(op_jless, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
+ end.link(this);
+}
+
+void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ } else {
+ if (!supportsFloatingPoint()) {
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter); // double check
+ linkSlowCase(iter); // int32 check
+ }
+ if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // double check
+ }
+ }
+ JITStubCall stubCall(this, cti_op_jless);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
+}
+
+void JIT::emit_op_jlesseq(Instruction* currentInstruction, bool invert)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ // Character less.
+ if (isOperandConstantImmediateChar(op1)) {
+ emitLoad(op2, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(invert ? LessThan : GreaterThanOrEqual, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateChar(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateInt(op1)) {
+ emitLoad(op2, regT3, regT2);
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+ addJump(branch32(invert ? LessThan : GreaterThanOrEqual, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
+ } else {
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+ addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, regT2), target);
+ }
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double less.
+ emitBinaryDoubleOp(invert ? op_jnlesseq : op_jlesseq, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
+ end.link(this);
+}
+
+void JIT::emitSlow_op_jlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool invert)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ } else {
+ if (!supportsFloatingPoint()) {
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter); // double check
+ linkSlowCase(iter); // int32 check
+ }
+ if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // double check
+ }
+ }
+
+ JITStubCall stubCall(this, cti_op_jlesseq);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
+}
+
+void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
+{
+ emit_op_jlesseq(currentInstruction, true);
+}
+
+void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ emitSlow_op_jlesseq(currentInstruction, iter, true);
+}
+
+// LeftShift (<<)
+
+void JIT::emit_op_lshift(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
+ emitStoreInt32(dst, regT0, dst == op1);
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ if (!isOperandConstantImmediateInt(op1))
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+ lshift32(regT2, regT0);
+ emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
+}
+
+void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_lshift);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// RightShift (>>) and UnsignedRightShift (>>>) helper
+
+void JIT::emitRightShift(Instruction* currentInstruction, bool isUnsigned)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ // Slow case of rshift makes assumptions about what registers hold the
+ // shift arguments, so any changes must be updated there as well.
+ if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ int shift = getConstantOperand(op2).asInt32();
+ if (isUnsigned) {
+ if (shift)
+ urshift32(Imm32(shift & 0x1f), regT0);
+ // unsigned shift < 0 or shift = k*2^32 may result in (essentially)
+ // a toUint conversion, which can result in a value we can represent
+ // as an immediate int.
+ if (shift < 0 || !(shift & 31))
+ addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
+ } else if (shift) { // signed right shift by zero is simply toInt conversion
+ rshift32(Imm32(shift & 0x1f), regT0);
+ }
+ emitStoreInt32(dst, regT0, dst == op1);
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ if (!isOperandConstantImmediateInt(op1))
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+ if (isUnsigned) {
+ urshift32(regT2, regT0);
+ addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
+ } else
+ rshift32(regT2, regT0);
+ emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
+}
+
+void JIT::emitRightShiftSlowCase(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool isUnsigned)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ if (isOperandConstantImmediateInt(op2)) {
+ int shift = getConstantOperand(op2).asInt32();
+ // op1 = regT1:regT0
+ linkSlowCase(iter); // int32 check
+ if (supportsFloatingPointTruncate()) {
+ JumpList failures;
+ failures.append(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag)));
+ emitLoadDouble(op1, fpRegT0);
+ failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
+ if (isUnsigned) {
+ if (shift)
+ urshift32(Imm32(shift & 0x1f), regT0);
+ if (shift < 0 || !(shift & 31))
+ failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
+ } else if (shift)
+ rshift32(Imm32(shift & 0x1f), regT0);
+ emitStoreInt32(dst, regT0, false);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
+ failures.link(this);
+ }
+ if (isUnsigned && (shift < 0 || !(shift & 31)))
+ linkSlowCase(iter); // failed to box in hot path
+ } else {
+ // op1 = regT1:regT0
+ // op2 = regT3:regT2
+ if (!isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter); // int32 check -- op1 is not an int
+ if (supportsFloatingPointTruncate()) {
+ Jump notDouble = branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)); // op1 is not a double
+ emitLoadDouble(op1, fpRegT0);
+ Jump notInt = branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)); // op2 is not an int
+ Jump cantTruncate = branchTruncateDoubleToInt32(fpRegT0, regT0);
+ if (isUnsigned)
+ urshift32(regT2, regT0);
+ else
+ rshift32(regT2, regT0);
+ emitStoreInt32(dst, regT0, false);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
+ notDouble.link(this);
+ notInt.link(this);
+ cantTruncate.link(this);
+ }
+ }
+
+ linkSlowCase(iter); // int32 check - op2 is not an int
+ if (isUnsigned)
+ linkSlowCase(iter); // Can't represent unsigned result as an immediate
+ }
+
+ JITStubCall stubCall(this, isUnsigned ? cti_op_urshift : cti_op_rshift);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// RightShift (>>)
+
+void JIT::emit_op_rshift(Instruction* currentInstruction)
+{
+ emitRightShift(currentInstruction, false);
+}
+
+void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ emitRightShiftSlowCase(currentInstruction, iter, false);
+}
+
+// UnsignedRightShift (>>>)
+
+void JIT::emit_op_urshift(Instruction* currentInstruction)
+{
+ emitRightShift(currentInstruction, true);
+}
+
+void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ emitRightShiftSlowCase(currentInstruction, iter, true);
+}
+
+// BitAnd (&)
+
+void JIT::emit_op_bitand(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ emitLoad(op, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ and32(Imm32(constant), regT0);
+ emitStoreInt32(dst, regT0, (op == dst));
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+ and32(regT2, regT0);
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+}
+
+void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_bitand);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// BitOr (|)
+
+void JIT::emit_op_bitor(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ emitLoad(op, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ or32(Imm32(constant), regT0);
+ emitStoreInt32(dst, regT0, (op == dst));
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+ or32(regT2, regT0);
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+}
+
+void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_bitor);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// BitXor (^)
+
+void JIT::emit_op_bitxor(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ emitLoad(op, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ xor32(Imm32(constant), regT0);
+ emitStoreInt32(dst, regT0, (op == dst));
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+ xor32(regT2, regT0);
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+}
+
+void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_bitxor);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// BitNot (~)
+
+void JIT::emit_op_bitnot(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+
+ not32(regT0);
+ emitStoreInt32(dst, regT0, (dst == src));
+}
+
+void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_bitnot);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(dst);
+}
+
+// PostInc (i++)
+
+void JIT::emit_op_post_inc(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ emitLoad(srcDst, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+
+ if (dst == srcDst) // x = x++ is a noop for ints.
+ return;
+
+ emitStoreInt32(dst, regT0);
+
+ addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0));
+ emitStoreInt32(srcDst, regT0, true);
+}
+
+void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter); // int32 check
+ if (dst != srcDst)
+ linkSlowCase(iter); // overflow check
+
+ JITStubCall stubCall(this, cti_op_post_inc);
+ stubCall.addArgument(srcDst);
+ stubCall.addArgument(Imm32(srcDst));
+ stubCall.call(dst);
+}
+
+// PostDec (i--)
+
+void JIT::emit_op_post_dec(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ emitLoad(srcDst, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+
+ if (dst == srcDst) // x = x-- is a noop for ints.
+ return;
+
+ emitStoreInt32(dst, regT0);
+
+ addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0));
+ emitStoreInt32(srcDst, regT0, true);
+}
+
+void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter); // int32 check
+ if (dst != srcDst)
+ linkSlowCase(iter); // overflow check
+
+ JITStubCall stubCall(this, cti_op_post_dec);
+ stubCall.addArgument(srcDst);
+ stubCall.addArgument(TrustedImm32(srcDst));
+ stubCall.call(dst);
+}
+
+// PreInc (++i)
+
+void JIT::emit_op_pre_inc(Instruction* currentInstruction)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ emitLoad(srcDst, regT1, regT0);
+
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0));
+ emitStoreInt32(srcDst, regT0, true);
+}
+
+void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // overflow check
+
+ JITStubCall stubCall(this, cti_op_pre_inc);
+ stubCall.addArgument(srcDst);
+ stubCall.call(srcDst);
+}
+
+// PreDec (--i)
+
+void JIT::emit_op_pre_dec(Instruction* currentInstruction)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ emitLoad(srcDst, regT1, regT0);
+
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0));
+ emitStoreInt32(srcDst, regT0, true);
+}
+
+void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // overflow check
+
+ JITStubCall stubCall(this, cti_op_pre_dec);
+ stubCall.addArgument(srcDst);
+ stubCall.call(srcDst);
+}
+
+// Addition (+)
+
+void JIT::emit_op_add(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
+ JITStubCall stubCall(this, cti_op_add);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+ return;
+ }
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ emitAdd32Constant(dst, op, constant, op == op1 ? types.first() : types.second());
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+
+ // Int32 case.
+ addSlowCase(branchAdd32(Overflow, regT2, regT0));
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double case.
+ emitBinaryDoubleOp(op_add, dst, op1, op2, types, notInt32Op1, notInt32Op2);
+ end.link(this);
+}
+
+void JIT::emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
+{
+ // Int32 case.
+ emitLoad(op, regT1, regT0);
+ Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
+ addSlowCase(branchAdd32(Overflow, Imm32(constant), regT0));
+ emitStoreInt32(dst, regT0, (op == dst));
+
+ // Double case.
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32);
+ return;
+ }
+ Jump end = jump();
+
+ notInt32.link(this);
+ if (!opType.definitelyIsNumber())
+ addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
+ move(Imm32(constant), regT2);
+ convertInt32ToDouble(regT2, fpRegT0);
+ emitLoadDouble(op, fpRegT1);
+ addDouble(fpRegT1, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+
+ end.link(this);
+}
+
+void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (!types.first().mightBeNumber() || !types.second().mightBeNumber())
+ return;
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ linkSlowCase(iter); // overflow check
+
+ if (!supportsFloatingPoint())
+ linkSlowCase(iter); // non-sse case
+ else {
+ ResultType opType = op == op1 ? types.first() : types.second();
+ if (!opType.definitelyIsNumber())
+ linkSlowCase(iter); // double check
+ }
+ } else {
+ linkSlowCase(iter); // overflow check
+
+ if (!supportsFloatingPoint()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter); // double check
+
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // double check
+ }
+ }
+ }
+
+ JITStubCall stubCall(this, cti_op_add);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// Subtraction (-)
+
+void JIT::emit_op_sub(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ if (isOperandConstantImmediateInt(op2)) {
+ emitSub32Constant(dst, op1, getConstantOperand(op2).asInt32(), types.first());
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+
+ // Int32 case.
+ addSlowCase(branchSub32(Overflow, regT2, regT0));
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double case.
+ emitBinaryDoubleOp(op_sub, dst, op1, op2, types, notInt32Op1, notInt32Op2);
+ end.link(this);
+}
+
+void JIT::emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
+{
+ // Int32 case.
+ emitLoad(op, regT1, regT0);
+ Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
+ addSlowCase(branchSub32(Overflow, Imm32(constant), regT0));
+ emitStoreInt32(dst, regT0, (op == dst));
+
+ // Double case.
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32);
+ return;
+ }
+ Jump end = jump();
+
+ notInt32.link(this);
+ if (!opType.definitelyIsNumber())
+ addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
+ move(Imm32(constant), regT2);
+ convertInt32ToDouble(regT2, fpRegT0);
+ emitLoadDouble(op, fpRegT1);
+ subDouble(fpRegT0, fpRegT1);
+ emitStoreDouble(dst, fpRegT1);
+
+ end.link(this);
+}
+
+void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (isOperandConstantImmediateInt(op2)) {
+ linkSlowCase(iter); // overflow check
+
+ if (!supportsFloatingPoint() || !types.first().definitelyIsNumber())
+ linkSlowCase(iter); // int32 or double check
+ } else {
+ linkSlowCase(iter); // overflow check
+
+ if (!supportsFloatingPoint()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter); // double check
+
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // double check
+ }
+ }
+ }
+
+ JITStubCall stubCall(this, cti_op_sub);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
+{
+ JumpList end;
+
+ if (!notInt32Op1.empty()) {
+ // Double case 1: Op1 is not int32; Op2 is unknown.
+ notInt32Op1.link(this);
+
+ ASSERT(op1IsInRegisters);
+
+ // Verify Op1 is double.
+ if (!types.first().definitelyIsNumber())
+ addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
+
+ if (!op2IsInRegisters)
+ emitLoad(op2, regT3, regT2);
+
+ Jump doubleOp2 = branch32(Below, regT3, TrustedImm32(JSValue::LowestTag));
+
+ if (!types.second().definitelyIsNumber())
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+
+ convertInt32ToDouble(regT2, fpRegT0);
+ Jump doTheMath = jump();
+
+ // Load Op2 as double into double register.
+ doubleOp2.link(this);
+ emitLoadDouble(op2, fpRegT0);
+
+ // Do the math.
+ doTheMath.link(this);
+ switch (opcodeID) {
+ case op_mul:
+ emitLoadDouble(op1, fpRegT2);
+ mulDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_add:
+ emitLoadDouble(op1, fpRegT2);
+ addDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_sub:
+ emitLoadDouble(op1, fpRegT1);
+ subDouble(fpRegT0, fpRegT1);
+ emitStoreDouble(dst, fpRegT1);
+ break;
+ case op_div:
+ emitLoadDouble(op1, fpRegT1);
+ divDouble(fpRegT0, fpRegT1);
+ emitStoreDouble(dst, fpRegT1);
+ break;
+ case op_jnless:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst);
+ break;
+ case op_jless:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleLessThan, fpRegT2, fpRegT0), dst);
+ break;
+ case op_jlesseq:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleLessThanOrEqual, fpRegT2, fpRegT0), dst);
+ break;
+ case op_jnlesseq:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT0, fpRegT2), dst);
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+
+ if (!notInt32Op2.empty())
+ end.append(jump());
+ }
+
+ if (!notInt32Op2.empty()) {
+ // Double case 2: Op1 is int32; Op2 is not int32.
+ notInt32Op2.link(this);
+
+ ASSERT(op2IsInRegisters);
+
+ if (!op1IsInRegisters)
+ emitLoadPayload(op1, regT0);
+
+ convertInt32ToDouble(regT0, fpRegT0);
+
+ // Verify op2 is double.
+ if (!types.second().definitelyIsNumber())
+ addSlowCase(branch32(Above, regT3, TrustedImm32(JSValue::LowestTag)));
+
+ // Do the math.
+ switch (opcodeID) {
+ case op_mul:
+ emitLoadDouble(op2, fpRegT2);
+ mulDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_add:
+ emitLoadDouble(op2, fpRegT2);
+ addDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_sub:
+ emitLoadDouble(op2, fpRegT2);
+ subDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_div:
+ emitLoadDouble(op2, fpRegT2);
+ divDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_jnless:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst);
+ break;
+ case op_jless:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), dst);
+ break;
+ case op_jnlesseq:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), dst);
+ break;
+ case op_jlesseq:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleLessThanOrEqual, fpRegT0, fpRegT1), dst);
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+ }
+
+ end.link(this);
+}
+
+// Multiplication (*)
+
+void JIT::emit_op_mul(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+
+ // Int32 case.
+ move(regT0, regT3);
+ addSlowCase(branchMul32(Overflow, regT2, regT0));
+ addSlowCase(branchTest32(Zero, regT0));
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double case.
+ emitBinaryDoubleOp(op_mul, dst, op1, op2, types, notInt32Op1, notInt32Op2);
+ end.link(this);
+}
+
+void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ Jump overflow = getSlowCase(iter); // overflow check
+ linkSlowCase(iter); // zero result check
+
+ Jump negZero = branchOr32(Signed, regT2, regT3);
+ emitStoreInt32(dst, TrustedImm32(0), (op1 == dst || op2 == dst));
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
+
+ negZero.link(this);
+ overflow.link(this);
+
+ if (!supportsFloatingPoint()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ }
+
+ if (supportsFloatingPoint()) {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter); // double check
+
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // double check
+ }
+ }
+
+ Label jitStubCall(this);
+ JITStubCall stubCall(this, cti_op_mul);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// Division (/)
+
+void JIT::emit_op_div(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(jump());
+ return;
+ }
+
+ // Int32 divide.
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ JumpList end;
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+
+ convertInt32ToDouble(regT0, fpRegT0);
+ convertInt32ToDouble(regT2, fpRegT1);
+ divDouble(fpRegT1, fpRegT0);
+
+ JumpList doubleResult;
+ branchConvertDoubleToInt32(fpRegT0, regT0, doubleResult, fpRegT1);
+
+ // Int32 result.
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+ end.append(jump());
+
+ // Double result.
+ doubleResult.link(this);
+ emitStoreDouble(dst, fpRegT0);
+ end.append(jump());
+
+ // Double divide.
+ emitBinaryDoubleOp(op_div, dst, op1, op2, types, notInt32Op1, notInt32Op2);
+ end.link(this);
+}
+
+void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (!supportsFloatingPoint())
+ linkSlowCase(iter);
+ else {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter); // double check
+
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // double check
+ }
+ }
+
+ JITStubCall stubCall(this, cti_op_div);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// Mod (%)
+
+/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
+
+#if CPU(X86) || CPU(X86_64) || CPU(MIPS)
+
+void JIT::emit_op_mod(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+#if CPU(X86) || CPU(X86_64)
+ // Make sure registers are correct for x86 IDIV instructions.
+ ASSERT(regT0 == X86Registers::eax);
+ ASSERT(regT1 == X86Registers::edx);
+ ASSERT(regT2 == X86Registers::ecx);
+ ASSERT(regT3 == X86Registers::ebx);
+#endif
+
+ if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) {
+ emitLoad(op1, regT1, regT0);
+ move(Imm32(getConstantOperand(op2).asInt32()), regT2);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ if (getConstantOperand(op2).asInt32() == -1)
+ addSlowCase(branch32(Equal, regT0, TrustedImm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
+ } else {
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+
+ addSlowCase(branch32(Equal, regT0, TrustedImm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
+ addSlowCase(branch32(Equal, regT2, TrustedImm32(0))); // divide by 0
+ }
+
+ move(regT0, regT3); // Save dividend payload, in case of 0.
+#if CPU(X86) || CPU(X86_64)
+ m_assembler.cdq();
+ m_assembler.idivl_r(regT2);
+#elif CPU(MIPS)
+ m_assembler.div(regT0, regT2);
+ m_assembler.mfhi(regT1);
+#endif
+
+ // If the remainder is zero and the dividend is negative, the result is -0.
+ Jump storeResult1 = branchTest32(NonZero, regT1);
+ Jump storeResult2 = branchTest32(Zero, regT3, TrustedImm32(0x80000000)); // not negative
+ emitStore(dst, jsNumber(-0.0));
+ Jump end = jump();
+
+ storeResult1.link(this);
+ storeResult2.link(this);
+ emitStoreInt32(dst, regT1, (op1 == dst || op2 == dst));
+ end.link(this);
+}
+
+void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) {
+ linkSlowCase(iter); // int32 check
+ if (getConstantOperand(op2).asInt32() == -1)
+ linkSlowCase(iter); // 0x80000000 check
+ } else {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // 0 check
+ linkSlowCase(iter); // 0x80000000 check
+ }
+
+ JITStubCall stubCall(this, cti_op_mod);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+#else // CPU(X86) || CPU(X86_64) || CPU(MIPS)
+
+void JIT::emit_op_mod(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+#if ENABLE(JIT_USE_SOFT_MODULO)
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+
+ addSlowCase(branch32(Equal, regT2, TrustedImm32(0)));
+
+ emitNakedCall(m_globalData->jitStubs->ctiSoftModulo());
+
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+#else
+ JITStubCall stubCall(this, cti_op_mod);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+#endif
+}
+
+void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ UNUSED_PARAM(currentInstruction);
+ UNUSED_PARAM(iter);
+#if ENABLE(JIT_USE_SOFT_MODULO)
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_mod);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(result);
+#else
+ ASSERT_NOT_REACHED();
+#endif
+}
+
+#endif // CPU(X86) || CPU(X86_64)
+
+/* ------------------------------ END: OP_MOD ------------------------------ */
+
+} // namespace JSC
+
+#endif // USE(JSVALUE32_64)
+#endif // ENABLE(JIT)
diff --git a/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITCall.cpp b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITCall.cpp
new file mode 100644
index 0000000..77c2a698
--- /dev/null
+++ b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITCall.cpp
@@ -0,0 +1,264 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(JIT)
+#if USE(JSVALUE64)
+#include "JIT.h"
+
+#include "CodeBlock.h"
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JSArray.h"
+#include "JSFunction.h"
+#include "Interpreter.h"
+#include "ResultType.h"
+#include "SamplingTool.h"
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+using namespace std;
+
+namespace JSC {
+
+void JIT::compileOpCallInitializeCallFrame()
+{
+ // regT0 holds callee, regT1 holds argCount
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), regT3); // scopeChain
+ emitPutIntToCallFrameHeader(regT1, RegisterFile::ArgumentCount);
+ emitPutCellToCallFrameHeader(regT0, RegisterFile::Callee);
+ emitPutCellToCallFrameHeader(regT3, RegisterFile::ScopeChain);
+}
+
+void JIT::emit_op_call_put_result(Instruction* instruction)
+{
+ int dst = instruction[1].u.operand;
+ emitPutVirtualRegister(dst);
+}
+
+void JIT::compileOpCallVarargs(Instruction* instruction)
+{
+ int callee = instruction[1].u.operand;
+ int argCountRegister = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
+
+ emitGetVirtualRegister(argCountRegister, regT1);
+ emitFastArithImmToInt(regT1);
+ emitGetVirtualRegister(callee, regT0);
+ addPtr(Imm32(registerOffset), regT1, regT2);
+
+ // Check for JSFunctions.
+ emitJumpSlowCaseIfNotJSCell(regT0);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsFunctionVPtr)));
+
+ // Speculatively roll the callframe, assuming argCount will match the arity.
+ mul32(TrustedImm32(sizeof(Register)), regT2, regT2);
+ intptr_t offset = (intptr_t)sizeof(Register) * (intptr_t)RegisterFile::CallerFrame;
+ addPtr(Imm32((int32_t)offset), regT2, regT3);
+ addPtr(callFrameRegister, regT3);
+ storePtr(callFrameRegister, regT3);
+ addPtr(regT2, callFrameRegister);
+ emitNakedCall(m_globalData->jitStubs->ctiVirtualCall());
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileOpCallVarargsSlowCase(Instruction*, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_call_NotJSFunction);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT2);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+#if !ENABLE(JIT_OPTIMIZE_CALL)
+
+/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
+
+void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
+{
+ int callee = instruction[1].u.operand;
+ int argCount = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
+
+ // Handle eval
+ Jump wasEval;
+ if (opcodeID == op_call_eval) {
+ JITStubCall stubCall(this, cti_op_call_eval);
+ stubCall.addArgument(callee, regT0);
+ stubCall.addArgument(JIT::Imm32(registerOffset));
+ stubCall.addArgument(JIT::Imm32(argCount));
+ stubCall.call();
+ wasEval = branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(JSValue())));
+ }
+
+ emitGetVirtualRegister(callee, regT0);
+
+ // Check for JSFunctions.
+ emitJumpSlowCaseIfNotJSCell(regT0);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsFunctionVPtr)));
+
+ // Speculatively roll the callframe, assuming argCount will match the arity.
+ storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
+ addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
+ move(Imm32(argCount), regT1);
+
+ emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstruct() : m_globalData->jitStubs->ctiVirtualCall());
+
+ if (opcodeID == op_call_eval)
+ wasEval.link(this);
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned, OpcodeID opcodeID)
+{
+ int argCount = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(JIT::Imm32(registerOffset));
+ stubCall.addArgument(JIT::Imm32(argCount));
+ stubCall.call();
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+#else // !ENABLE(JIT_OPTIMIZE_CALL)
+
+/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
+
+void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
+{
+ int callee = instruction[1].u.operand;
+ int argCount = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
+
+ // Handle eval
+ Jump wasEval;
+ if (opcodeID == op_call_eval) {
+ JITStubCall stubCall(this, cti_op_call_eval);
+ stubCall.addArgument(callee, regT0);
+ stubCall.addArgument(JIT::Imm32(registerOffset));
+ stubCall.addArgument(JIT::Imm32(argCount));
+ stubCall.call();
+ wasEval = branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(JSValue())));
+ }
+
+ // This plants a check for a cached JSFunction value, so we can plant a fast link to the callee.
+ // This deliberately leaves the callee in ecx, used when setting up the stack frame below
+ emitGetVirtualRegister(callee, regT0);
+ DataLabelPtr addressOfLinkedFunctionCheck;
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
+
+ Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(JSValue::encode(JSValue())));
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
+
+ addSlowCase(jumpToSlow);
+ ASSERT_JIT_OFFSET(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow), patchOffsetOpCallCompareToJump);
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
+
+ // The following is the fast case, only used whan a callee can be linked.
+
+ // Fast version of stack frame initialization, directly relative to edi.
+ // Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), regT1); // newScopeChain
+
+ store32(TrustedImm32(Int32Tag), intTagFor(registerOffset + RegisterFile::ArgumentCount));
+ store32(Imm32(argCount), intPayloadFor(registerOffset + RegisterFile::ArgumentCount));
+ storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register))));
+ storePtr(regT0, Address(callFrameRegister, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register))));
+ storePtr(regT1, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register))));
+ addPtr(Imm32(registerOffset * sizeof(Register)), callFrameRegister);
+
+ // Call to the callee
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
+
+ if (opcodeID == op_call_eval)
+ wasEval.link(this);
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID)
+{
+ int argCount = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
+
+ linkSlowCase(iter);
+
+ // Fast check for JS function.
+ Jump callLinkFailNotObject = emitJumpIfNotJSCell(regT0);
+ Jump callLinkFailNotJSFunction = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsFunctionVPtr));
+
+ // Speculatively roll the callframe, assuming argCount will match the arity.
+ storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
+ addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
+ move(Imm32(argCount), regT1);
+
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstructLink() : m_globalData->jitStubs->ctiVirtualCallLink());
+
+ // Done! - return back to the hot path.
+ ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
+ ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_call));
+
+ // This handles host functions
+ callLinkFailNotObject.link(this);
+ callLinkFailNotJSFunction.link(this);
+
+ JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(JIT::Imm32(registerOffset));
+ stubCall.addArgument(JIT::Imm32(argCount));
+ stubCall.call();
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
+
+#endif // !ENABLE(JIT_OPTIMIZE_CALL)
+
+} // namespace JSC
+
+#endif // USE(JSVALUE64)
+#endif // ENABLE(JIT)
diff --git a/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITCall32_64.cpp b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITCall32_64.cpp
new file mode 100644
index 0000000..9ffa495
--- /dev/null
+++ b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITCall32_64.cpp
@@ -0,0 +1,348 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(JIT)
+#if USE(JSVALUE32_64)
+#include "JIT.h"
+
+#include "CodeBlock.h"
+#include "Interpreter.h"
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JSArray.h"
+#include "JSFunction.h"
+#include "ResultType.h"
+#include "SamplingTool.h"
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+using namespace std;
+
+namespace JSC {
+
+void JIT::compileOpCallInitializeCallFrame()
+{
+ // regT0 holds callee, regT1 holds argCount
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), regT3); // scopeChain
+ emitPutIntToCallFrameHeader(regT1, RegisterFile::ArgumentCount);
+ emitPutCellToCallFrameHeader(regT0, RegisterFile::Callee);
+ emitPutCellToCallFrameHeader(regT3, RegisterFile::ScopeChain);
+}
+
+void JIT::emit_op_call_put_result(Instruction* instruction)
+{
+ int dst = instruction[1].u.operand;
+ emitStore(dst, regT1, regT0);
+}
+
+void JIT::compileOpCallVarargs(Instruction* instruction)
+{
+ int callee = instruction[1].u.operand;
+ int argCountRegister = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
+
+ emitLoad(callee, regT1, regT0);
+ emitLoadPayload(argCountRegister, regT2); // argCount
+ addPtr(Imm32(registerOffset), regT2, regT3); // registerOffset
+
+ emitJumpSlowCaseIfNotJSCell(callee, regT1);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsFunctionVPtr)));
+
+ // Speculatively roll the callframe, assuming argCount will match the arity.
+ mul32(TrustedImm32(sizeof(Register)), regT3, regT3);
+ addPtr(callFrameRegister, regT3);
+ store32(TrustedImm32(JSValue::CellTag), tagFor(RegisterFile::CallerFrame, regT3));
+ storePtr(callFrameRegister, payloadFor(RegisterFile::CallerFrame, regT3));
+ move(regT3, callFrameRegister);
+
+ move(regT2, regT1); // argCount
+
+ emitNakedCall(m_globalData->jitStubs->ctiVirtualCall());
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int callee = instruction[1].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, callee);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_call_NotJSFunction);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.addArgument(regT3);
+ stubCall.addArgument(regT2);
+ stubCall.call();
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::emit_op_ret(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ emitLoad(dst, regT1, regT0);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ restoreReturnAddressBeforeReturn(regT2);
+ ret();
+}
+
+void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned thisReg = currentInstruction[2].u.operand;
+
+ emitLoad(result, regT1, regT0);
+ Jump notJSCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ Jump notObject = branch8(NotEqual, Address(regT2, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType));
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ restoreReturnAddressBeforeReturn(regT2);
+ ret();
+
+ notJSCell.link(this);
+ notObject.link(this);
+ emitLoad(thisReg, regT1, regT0);
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ restoreReturnAddressBeforeReturn(regT2);
+ ret();
+}
+
+void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call);
+}
+
+void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call_eval);
+}
+
+void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallVarargsSlowCase(currentInstruction, iter);
+}
+
+void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_construct);
+}
+
+void JIT::emit_op_call(Instruction* currentInstruction)
+{
+ compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_call_eval(Instruction* currentInstruction)
+{
+ compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_call_varargs(Instruction* currentInstruction)
+{
+ compileOpCallVarargs(currentInstruction);
+}
+
+void JIT::emit_op_construct(Instruction* currentInstruction)
+{
+ compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
+}
+
+#if !ENABLE(JIT_OPTIMIZE_CALL)
+
+/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
+
+void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
+{
+ int callee = instruction[1].u.operand;
+ int argCount = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
+
+ Jump wasEval;
+ if (opcodeID == op_call_eval) {
+ JITStubCall stubCall(this, cti_op_call_eval);
+ stubCall.addArgument(callee);
+ stubCall.addArgument(JIT::Imm32(registerOffset));
+ stubCall.addArgument(JIT::Imm32(argCount));
+ stubCall.call();
+ wasEval = branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag));
+ }
+
+ emitLoad(callee, regT1, regT0);
+
+ emitJumpSlowCaseIfNotJSCell(callee, regT1);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsFunctionVPtr)));
+
+ // Speculatively roll the callframe, assuming argCount will match the arity.
+ store32(TrustedImm32(JSValue::CellTag), tagFor(RegisterFile::CallerFrame + registerOffset, callFrameRegister));
+ storePtr(callFrameRegister, payloadFor(RegisterFile::CallerFrame + registerOffset, callFrameRegister));
+ addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
+ move(TrustedImm32(argCount), regT1);
+
+ emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstruct() : m_globalData->jitStubs->ctiVirtualCall());
+
+ if (opcodeID == op_call_eval)
+ wasEval.link(this);
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned, OpcodeID opcodeID)
+{
+ int callee = instruction[1].u.operand;
+ int argCount = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, callee);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction);
+ stubCall.addArgument(callee);
+ stubCall.addArgument(JIT::Imm32(registerOffset));
+ stubCall.addArgument(JIT::Imm32(argCount));
+ stubCall.call();
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+#else // !ENABLE(JIT_OPTIMIZE_CALL)
+
+/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
+
+void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
+{
+ int callee = instruction[1].u.operand;
+ int argCount = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
+
+ Jump wasEval;
+ if (opcodeID == op_call_eval) {
+ JITStubCall stubCall(this, cti_op_call_eval);
+ stubCall.addArgument(callee);
+ stubCall.addArgument(JIT::Imm32(registerOffset));
+ stubCall.addArgument(JIT::Imm32(argCount));
+ stubCall.call();
+ wasEval = branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag));
+ }
+
+ emitLoad(callee, regT1, regT0);
+
+ DataLabelPtr addressOfLinkedFunctionCheck;
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
+
+ Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0));
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
+
+ addSlowCase(jumpToSlow);
+ ASSERT_JIT_OFFSET(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow), patchOffsetOpCallCompareToJump);
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
+
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+
+ // The following is the fast case, only used whan a callee can be linked.
+
+ // Fast version of stack frame initialization, directly relative to edi.
+ // Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), regT2);
+
+ store32(TrustedImm32(JSValue::Int32Tag), tagFor(registerOffset + RegisterFile::ArgumentCount));
+ store32(Imm32(argCount), payloadFor(registerOffset + RegisterFile::ArgumentCount));
+ storePtr(callFrameRegister, payloadFor(RegisterFile::CallerFrame + registerOffset, callFrameRegister));
+ emitStore(registerOffset + RegisterFile::Callee, regT1, regT0);
+ store32(TrustedImm32(JSValue::CellTag), tagFor(registerOffset + RegisterFile::ScopeChain));
+ store32(regT2, payloadFor(registerOffset + RegisterFile::ScopeChain));
+ addPtr(Imm32(registerOffset * sizeof(Register)), callFrameRegister);
+
+ // Call to the callee
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
+
+ if (opcodeID == op_call_eval)
+ wasEval.link(this);
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID)
+{
+ int callee = instruction[1].u.operand;
+ int argCount = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ // Fast check for JS function.
+ Jump callLinkFailNotObject = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
+ Jump callLinkFailNotJSFunction = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsFunctionVPtr));
+
+ // Speculatively roll the callframe, assuming argCount will match the arity.
+ store32(TrustedImm32(JSValue::CellTag), tagFor(RegisterFile::CallerFrame + registerOffset, callFrameRegister));
+ storePtr(callFrameRegister, payloadFor(RegisterFile::CallerFrame + registerOffset, callFrameRegister));
+ addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
+ move(Imm32(argCount), regT1);
+
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstructLink() : m_globalData->jitStubs->ctiVirtualCallLink());
+
+ // Done! - return back to the hot path.
+ ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
+ ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_call));
+
+ // This handles host functions
+ callLinkFailNotObject.link(this);
+ callLinkFailNotJSFunction.link(this);
+
+ JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction);
+ stubCall.addArgument(callee);
+ stubCall.addArgument(JIT::Imm32(registerOffset));
+ stubCall.addArgument(JIT::Imm32(argCount));
+ stubCall.call();
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
+
+#endif // !ENABLE(JIT_OPTIMIZE_CALL)
+
+} // namespace JSC
+
+#endif // USE(JSVALUE32_64)
+#endif // ENABLE(JIT)
diff --git a/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITCode.h b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITCode.h
new file mode 100644
index 0000000..7346fd5
--- /dev/null
+++ b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITCode.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITCode_h
+#define JITCode_h
+
+#if ENABLE(JIT)
+
+#include "CallFrame.h"
+#include "JSValue.h"
+#include "MacroAssemblerCodeRef.h"
+#include "Profiler.h"
+
+namespace JSC {
+
+ class JSGlobalData;
+ class RegisterFile;
+
+ class JITCode {
+ typedef MacroAssemblerCodeRef CodeRef;
+ typedef MacroAssemblerCodePtr CodePtr;
+ public:
+ JITCode()
+ {
+ }
+
+ JITCode(const CodeRef ref)
+ : m_ref(ref)
+ {
+ }
+
+ bool operator !() const
+ {
+ return !m_ref.m_code.executableAddress();
+ }
+
+ CodePtr addressForCall()
+ {
+ return m_ref.m_code;
+ }
+
+ // This function returns the offset in bytes of 'pointerIntoCode' into
+ // this block of code. The pointer provided must be a pointer into this
+ // block of code. It is ASSERTed that no codeblock >4gb in size.
+ unsigned offsetOf(void* pointerIntoCode)
+ {
+ intptr_t result = reinterpret_cast<intptr_t>(pointerIntoCode) - reinterpret_cast<intptr_t>(m_ref.m_code.executableAddress());
+ ASSERT(static_cast<intptr_t>(static_cast<unsigned>(result)) == result);
+ return static_cast<unsigned>(result);
+ }
+
+ // Execute the code!
+ inline JSValue execute(RegisterFile* registerFile, CallFrame* callFrame, JSGlobalData* globalData)
+ {
+ JSValue result = JSValue::decode(ctiTrampoline(m_ref.m_code.executableAddress(), registerFile, callFrame, 0, Profiler::enabledProfilerReference(), globalData));
+ return globalData->exception ? jsNull() : result;
+ }
+
+ void* start()
+ {
+ return m_ref.m_code.dataLocation();
+ }
+
+ size_t size()
+ {
+ ASSERT(m_ref.m_code.executableAddress());
+ return m_ref.m_size;
+ }
+
+ ExecutablePool* getExecutablePool()
+ {
+ return m_ref.m_executablePool.get();
+ }
+
+ // Host functions are a bit special; they have a m_code pointer but they
+ // do not individully ref the executable pool containing the trampoline.
+ static JITCode HostFunction(CodePtr code)
+ {
+ return JITCode(code.dataLocation(), 0, 0);
+ }
+
+ private:
+ JITCode(void* code, PassRefPtr<ExecutablePool> executablePool, size_t size)
+ : m_ref(code, executablePool, size)
+ {
+ }
+
+ CodeRef m_ref;
+ };
+
+};
+
+#endif
+
+#endif
diff --git a/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITInlineMethods.h b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITInlineMethods.h
new file mode 100644
index 0000000..693ae98
--- /dev/null
+++ b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITInlineMethods.h
@@ -0,0 +1,858 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITInlineMethods_h
+#define JITInlineMethods_h
+
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+/* Deprecated: Please use JITStubCall instead. */
+
+ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
+{
+ unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
+ peek(dst, argumentStackOffset);
+}
+
+ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(unsigned src)
+{
+ return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble();
+}
+
+ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
+{
+ ASSERT(m_codeBlock->isConstantRegisterIndex(src));
+ return m_codeBlock->getConstant(src);
+}
+
+ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
+{
+ storePtr(from, payloadFor(entry, callFrameRegister));
+}
+
+ALWAYS_INLINE void JIT::emitPutCellToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
+{
+#if USE(JSVALUE32_64)
+ store32(TrustedImm32(JSValue::CellTag), tagFor(entry, callFrameRegister));
+#endif
+ storePtr(from, payloadFor(entry, callFrameRegister));
+}
+
+ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
+{
+ store32(TrustedImm32(Int32Tag), intTagFor(entry, callFrameRegister));
+ store32(from, intPayloadFor(entry, callFrameRegister));
+}
+
+ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry)
+{
+ storePtr(TrustedImmPtr(value), Address(callFrameRegister, entry * sizeof(Register)));
+}
+
+ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
+{
+ loadPtr(Address(from, entry * sizeof(Register)), to);
+#if USE(JSVALUE64)
+ killLastResultRegister();
+#endif
+}
+
+ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures)
+{
+ failures.append(branchPtr(NotEqual, Address(src), TrustedImmPtr(m_globalData->jsStringVPtr)));
+ failures.append(branchTest32(NonZero, Address(src, OBJECT_OFFSETOF(JSString, m_fiberCount))));
+ failures.append(branch32(NotEqual, MacroAssembler::Address(src, ThunkHelpers::jsStringLengthOffset()), TrustedImm32(1)));
+ loadPtr(MacroAssembler::Address(src, ThunkHelpers::jsStringValueOffset()), dst);
+ loadPtr(MacroAssembler::Address(dst, ThunkHelpers::stringImplDataOffset()), dst);
+ load16(MacroAssembler::Address(dst, 0), dst);
+}
+
+ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
+{
+ load32(Address(from, entry * sizeof(Register)), to);
+#if USE(JSVALUE64)
+ killLastResultRegister();
+#endif
+}
+
+ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
+{
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+
+ Call nakedCall = nearCall();
+ m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, function.executableAddress()));
+ return nakedCall;
+}
+
+ALWAYS_INLINE bool JIT::atJumpTarget()
+{
+ while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeOffset) {
+ if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeOffset)
+ return true;
+ ++m_jumpTargetsPosition;
+ }
+ return false;
+}
+
+#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
+
+ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace)
+{
+ JSInterfaceJIT::beginUninterruptedSequence();
+#if CPU(ARM_TRADITIONAL)
+#ifndef NDEBUG
+ // Ensure the label after the sequence can also fit
+ insnSpace += sizeof(ARMWord);
+ constSpace += sizeof(uint64_t);
+#endif
+
+ ensureSpace(insnSpace, constSpace);
+
+#elif CPU(SH4)
+#ifndef NDEBUG
+ insnSpace += sizeof(SH4Word);
+ constSpace += sizeof(uint64_t);
+#endif
+
+ m_assembler.ensureSpace(insnSpace + m_assembler.maxInstructionSize + 2, constSpace + 8);
+#endif
+
+#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
+#ifndef NDEBUG
+ m_uninterruptedInstructionSequenceBegin = label();
+ m_uninterruptedConstantSequenceBegin = sizeOfConstantPool();
+#endif
+#endif
+}
+
+ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace, int dst)
+{
+ UNUSED_PARAM(dst);
+#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
+ /* There are several cases when the uninterrupted sequence is larger than
+ * maximum required offset for pathing the same sequence. Eg.: if in a
+ * uninterrupted sequence the last macroassembler's instruction is a stub
+ * call, it emits store instruction(s) which should not be included in the
+ * calculation of length of uninterrupted sequence. So, the insnSpace and
+ * constSpace should be upper limit instead of hard limit.
+ */
+#if CPU(SH4)
+ if ((dst > 15) || (dst < -16)) {
+ insnSpace += 8;
+ constSpace += 2;
+ }
+
+ if (((dst >= -16) && (dst < 0)) || ((dst > 7) && (dst <= 15)))
+ insnSpace += 8;
+#endif
+ ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin, label()) <= insnSpace);
+ ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin <= constSpace);
+#endif
+ JSInterfaceJIT::endUninterruptedSequence();
+}
+
+#endif
+
+#if CPU(ARM)
+
+ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
+{
+ move(linkRegister, reg);
+}
+
+ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
+{
+ move(reg, linkRegister);
+}
+
+ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
+{
+ loadPtr(address, linkRegister);
+}
+#elif CPU(SH4)
+
+ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
+{
+ m_assembler.stspr(reg);
+}
+
+ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
+{
+ m_assembler.ldspr(reg);
+}
+
+ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
+{
+ loadPtrLinkReg(address);
+}
+
+#elif CPU(MIPS)
+
+ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
+{
+ move(returnAddressRegister, reg);
+}
+
+ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
+{
+ move(reg, returnAddressRegister);
+}
+
+ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
+{
+ loadPtr(address, returnAddressRegister);
+}
+
+#else // CPU(X86) || CPU(X86_64)
+
+ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
+{
+ pop(reg);
+}
+
+ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
+{
+ push(reg);
+}
+
+ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
+{
+ push(address);
+}
+
+#endif
+
+ALWAYS_INLINE void JIT::restoreArgumentReference()
+{
+ move(stackPointerRegister, firstArgumentRegister);
+ poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
+}
+
+ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
+{
+#if CPU(X86)
+ // Within a trampoline the return address will be on the stack at this point.
+ addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister);
+#elif CPU(ARM)
+ move(stackPointerRegister, firstArgumentRegister);
+#elif CPU(SH4)
+ move(stackPointerRegister, firstArgumentRegister);
+#endif
+ // In the trampoline on x86-64, the first argument register is not overwritten.
+}
+
+ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
+{
+ return branchPtr(NotEqual, Address(reg, JSCell::structureOffset()), TrustedImmPtr(structure));
+}
+
+ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
+{
+ if (!m_codeBlock->isKnownNotImmediate(vReg))
+ linkSlowCase(iter);
+}
+
+ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
+{
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+
+ m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset));
+}
+
+ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
+{
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+
+ const JumpList::JumpVector& jumpVector = jumpList.jumps();
+ size_t size = jumpVector.size();
+ for (size_t i = 0; i < size; ++i)
+ m_slowCases.append(SlowCaseEntry(jumpVector[i], m_bytecodeOffset));
+}
+
+ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
+{
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+
+ m_jmpTable.append(JumpTable(jump, m_bytecodeOffset + relativeOffset));
+}
+
+ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
+{
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+
+ jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this);
+}
+
+#if ENABLE(SAMPLING_FLAGS)
+ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag)
+{
+ ASSERT(flag >= 1);
+ ASSERT(flag <= 32);
+ or32(TrustedImm32(1u << (flag - 1)), AbsoluteAddress(SamplingFlags::addressOfFlags()));
+}
+
+ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag)
+{
+ ASSERT(flag >= 1);
+ ASSERT(flag <= 32);
+ and32(TrustedImm32(~(1u << (flag - 1))), AbsoluteAddress(SamplingFlags::addressOfFlags()));
+}
+#endif
+
+#if ENABLE(SAMPLING_COUNTERS)
+ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, uint32_t count)
+{
+#if CPU(X86_64) // Or any other 64-bit plattform.
+ addPtr(TrustedImm32(count), AbsoluteAddress(counter.addressOfCounter()));
+#elif CPU(X86) // Or any other little-endian 32-bit plattform.
+ intptr_t hiWord = reinterpret_cast<intptr_t>(counter.addressOfCounter()) + sizeof(int32_t);
+ add32(TrustedImm32(count), AbsoluteAddress(counter.addressOfCounter()));
+ addWithCarry32(TrustedImm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord)));
+#else
+#error "SAMPLING_FLAGS not implemented on this platform."
+#endif
+}
+#endif
+
+#if ENABLE(OPCODE_SAMPLING)
+#if CPU(X86_64)
+ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
+{
+ move(TrustedImmPtr(m_interpreter->sampler()->sampleSlot()), X86Registers::ecx);
+ storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86Registers::ecx);
+}
+#else
+ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
+{
+ storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot());
+}
+#endif
+#endif
+
+#if ENABLE(CODEBLOCK_SAMPLING)
+#if CPU(X86_64)
+ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
+{
+ move(TrustedImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86Registers::ecx);
+ storePtr(TrustedImmPtr(codeBlock), X86Registers::ecx);
+}
+#else
+ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
+{
+ storePtr(TrustedImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot());
+}
+#endif
+#endif
+
+ALWAYS_INLINE bool JIT::isOperandConstantImmediateChar(unsigned src)
+{
+ return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isString() && asString(getConstantOperand(src).asCell())->length() == 1;
+}
+
+#if USE(JSVALUE32_64)
+
+inline void JIT::emitLoadTag(unsigned index, RegisterID tag)
+{
+ RegisterID mappedTag;
+ if (getMappedTag(index, mappedTag)) {
+ move(mappedTag, tag);
+ unmap(tag);
+ return;
+ }
+
+ if (m_codeBlock->isConstantRegisterIndex(index)) {
+ move(Imm32(getConstantOperand(index).tag()), tag);
+ unmap(tag);
+ return;
+ }
+
+ load32(tagFor(index), tag);
+ unmap(tag);
+}
+
+inline void JIT::emitLoadPayload(unsigned index, RegisterID payload)
+{
+ RegisterID mappedPayload;
+ if (getMappedPayload(index, mappedPayload)) {
+ move(mappedPayload, payload);
+ unmap(payload);
+ return;
+ }
+
+ if (m_codeBlock->isConstantRegisterIndex(index)) {
+ move(Imm32(getConstantOperand(index).payload()), payload);
+ unmap(payload);
+ return;
+ }
+
+ load32(payloadFor(index), payload);
+ unmap(payload);
+}
+
+inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload)
+{
+ move(Imm32(v.payload()), payload);
+ move(Imm32(v.tag()), tag);
+}
+
+inline void JIT::emitLoad(unsigned index, RegisterID tag, RegisterID payload, RegisterID base)
+{
+ ASSERT(tag != payload);
+
+ if (base == callFrameRegister) {
+ ASSERT(payload != base);
+ emitLoadPayload(index, payload);
+ emitLoadTag(index, tag);
+ return;
+ }
+
+ if (payload == base) { // avoid stomping base
+ load32(tagFor(index, base), tag);
+ load32(payloadFor(index, base), payload);
+ return;
+ }
+
+ load32(payloadFor(index, base), payload);
+ load32(tagFor(index, base), tag);
+}
+
+inline void JIT::emitLoad2(unsigned index1, RegisterID tag1, RegisterID payload1, unsigned index2, RegisterID tag2, RegisterID payload2)
+{
+ if (isMapped(index1)) {
+ emitLoad(index1, tag1, payload1);
+ emitLoad(index2, tag2, payload2);
+ return;
+ }
+ emitLoad(index2, tag2, payload2);
+ emitLoad(index1, tag1, payload1);
+}
+
+inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value)
+{
+ if (m_codeBlock->isConstantRegisterIndex(index)) {
+ WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
+ loadDouble(&inConstantPool, value);
+ } else
+ loadDouble(addressFor(index), value);
+}
+
+inline void JIT::emitLoadInt32ToDouble(unsigned index, FPRegisterID value)
+{
+ if (m_codeBlock->isConstantRegisterIndex(index)) {
+ WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
+ char* bytePointer = reinterpret_cast<char*>(&inConstantPool);
+ convertInt32ToDouble(AbsoluteAddress(bytePointer + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value);
+ } else
+ convertInt32ToDouble(payloadFor(index), value);
+}
+
+inline void JIT::emitStore(unsigned index, RegisterID tag, RegisterID payload, RegisterID base)
+{
+ store32(payload, payloadFor(index, base));
+ store32(tag, tagFor(index, base));
+}
+
+inline void JIT::emitStoreInt32(unsigned index, RegisterID payload, bool indexIsInt32)
+{
+ store32(payload, payloadFor(index, callFrameRegister));
+ if (!indexIsInt32)
+ store32(TrustedImm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
+}
+
+inline void JIT::emitStoreInt32(unsigned index, TrustedImm32 payload, bool indexIsInt32)
+{
+ store32(payload, payloadFor(index, callFrameRegister));
+ if (!indexIsInt32)
+ store32(TrustedImm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
+}
+
+inline void JIT::emitStoreCell(unsigned index, RegisterID payload, bool indexIsCell)
+{
+ store32(payload, payloadFor(index, callFrameRegister));
+ if (!indexIsCell)
+ store32(TrustedImm32(JSValue::CellTag), tagFor(index, callFrameRegister));
+}
+
+inline void JIT::emitStoreBool(unsigned index, RegisterID payload, bool indexIsBool)
+{
+ store32(payload, payloadFor(index, callFrameRegister));
+ if (!indexIsBool)
+ store32(TrustedImm32(JSValue::BooleanTag), tagFor(index, callFrameRegister));
+}
+
+inline void JIT::emitStoreDouble(unsigned index, FPRegisterID value)
+{
+ storeDouble(value, addressFor(index));
+}
+
+inline void JIT::emitStore(unsigned index, const JSValue constant, RegisterID base)
+{
+ store32(Imm32(constant.payload()), payloadFor(index, base));
+ store32(Imm32(constant.tag()), tagFor(index, base));
+}
+
+ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
+{
+ emitStore(dst, jsUndefined());
+}
+
+inline bool JIT::isLabeled(unsigned bytecodeOffset)
+{
+ for (size_t numberOfJumpTargets = m_codeBlock->numberOfJumpTargets(); m_jumpTargetIndex != numberOfJumpTargets; ++m_jumpTargetIndex) {
+ unsigned jumpTarget = m_codeBlock->jumpTarget(m_jumpTargetIndex);
+ if (jumpTarget == bytecodeOffset)
+ return true;
+ if (jumpTarget > bytecodeOffset)
+ return false;
+ }
+ return false;
+}
+
+inline void JIT::map(unsigned bytecodeOffset, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload)
+{
+ if (isLabeled(bytecodeOffset))
+ return;
+
+ m_mappedBytecodeOffset = bytecodeOffset;
+ m_mappedVirtualRegisterIndex = virtualRegisterIndex;
+ m_mappedTag = tag;
+ m_mappedPayload = payload;
+}
+
+inline void JIT::unmap(RegisterID registerID)
+{
+ if (m_mappedTag == registerID)
+ m_mappedTag = (RegisterID)-1;
+ else if (m_mappedPayload == registerID)
+ m_mappedPayload = (RegisterID)-1;
+}
+
+inline void JIT::unmap()
+{
+ m_mappedBytecodeOffset = (unsigned)-1;
+ m_mappedVirtualRegisterIndex = (unsigned)-1;
+ m_mappedTag = (RegisterID)-1;
+ m_mappedPayload = (RegisterID)-1;
+}
+
+inline bool JIT::isMapped(unsigned virtualRegisterIndex)
+{
+ if (m_mappedBytecodeOffset != m_bytecodeOffset)
+ return false;
+ if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
+ return false;
+ return true;
+}
+
+inline bool JIT::getMappedPayload(unsigned virtualRegisterIndex, RegisterID& payload)
+{
+ if (m_mappedBytecodeOffset != m_bytecodeOffset)
+ return false;
+ if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
+ return false;
+ if (m_mappedPayload == (RegisterID)-1)
+ return false;
+ payload = m_mappedPayload;
+ return true;
+}
+
+inline bool JIT::getMappedTag(unsigned virtualRegisterIndex, RegisterID& tag)
+{
+ if (m_mappedBytecodeOffset != m_bytecodeOffset)
+ return false;
+ if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
+ return false;
+ if (m_mappedTag == (RegisterID)-1)
+ return false;
+ tag = m_mappedTag;
+ return true;
+}
+
+inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex)
+{
+ if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
+ if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
+ addSlowCase(jump());
+ else
+ addSlowCase(emitJumpIfNotJSCell(virtualRegisterIndex));
+ }
+}
+
+inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex, RegisterID tag)
+{
+ if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
+ if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
+ addSlowCase(jump());
+ else
+ addSlowCase(branch32(NotEqual, tag, TrustedImm32(JSValue::CellTag)));
+ }
+}
+
+inline void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, unsigned virtualRegisterIndex)
+{
+ if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex))
+ linkSlowCase(iter);
+}
+
+ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
+{
+ return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
+}
+
+ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant)
+{
+ if (isOperandConstantImmediateInt(op1)) {
+ constant = getConstantOperand(op1).asInt32();
+ op = op2;
+ return true;
+ }
+
+ if (isOperandConstantImmediateInt(op2)) {
+ constant = getConstantOperand(op2).asInt32();
+ op = op1;
+ return true;
+ }
+
+ return false;
+}
+
+#else // USE(JSVALUE32_64)
+
+ALWAYS_INLINE void JIT::killLastResultRegister()
+{
+ m_lastResultBytecodeRegister = std::numeric_limits<int>::max();
+}
+
+// get arg puts an arg from the SF register array into a h/w register
+ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
+{
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+
+ // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
+ if (m_codeBlock->isConstantRegisterIndex(src)) {
+ JSValue value = m_codeBlock->getConstant(src);
+ move(ImmPtr(JSValue::encode(value)), dst);
+ killLastResultRegister();
+ return;
+ }
+
+ if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src) && !atJumpTarget()) {
+ // The argument we want is already stored in eax
+ if (dst != cachedResultRegister)
+ move(cachedResultRegister, dst);
+ killLastResultRegister();
+ return;
+ }
+
+ loadPtr(Address(callFrameRegister, src * sizeof(Register)), dst);
+ killLastResultRegister();
+}
+
+ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2)
+{
+ if (src2 == m_lastResultBytecodeRegister) {
+ emitGetVirtualRegister(src2, dst2);
+ emitGetVirtualRegister(src1, dst1);
+ } else {
+ emitGetVirtualRegister(src1, dst1);
+ emitGetVirtualRegister(src2, dst2);
+ }
+}
+
+ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(unsigned src)
+{
+ return getConstantOperand(src).asInt32();
+}
+
+ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
+{
+ return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
+}
+
+ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
+{
+ storePtr(from, Address(callFrameRegister, dst * sizeof(Register)));
+ m_lastResultBytecodeRegister = (from == cachedResultRegister) ? static_cast<int>(dst) : std::numeric_limits<int>::max();
+}
+
+ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
+{
+ storePtr(TrustedImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg)
+{
+#if USE(JSVALUE64)
+ return branchTestPtr(Zero, reg, tagMaskRegister);
+#else
+ return branchTest32(Zero, reg, TrustedImm32(TagMask));
+#endif
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfBothJSCells(RegisterID reg1, RegisterID reg2, RegisterID scratch)
+{
+ move(reg1, scratch);
+ orPtr(reg2, scratch);
+ return emitJumpIfJSCell(scratch);
+}
+
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg)
+{
+ addSlowCase(emitJumpIfJSCell(reg));
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg)
+{
+#if USE(JSVALUE64)
+ return branchTestPtr(NonZero, reg, tagMaskRegister);
+#else
+ return branchTest32(NonZero, reg, TrustedImm32(TagMask));
+#endif
+}
+
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg)
+{
+ addSlowCase(emitJumpIfNotJSCell(reg));
+}
+
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg)
+{
+ if (!m_codeBlock->isKnownNotImmediate(vReg))
+ emitJumpSlowCaseIfNotJSCell(reg);
+}
+
+#if USE(JSVALUE64)
+
+inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value)
+{
+ if (m_codeBlock->isConstantRegisterIndex(index)) {
+ WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
+ loadDouble(&inConstantPool, value);
+ } else
+ loadDouble(addressFor(index), value);
+}
+
+inline void JIT::emitLoadInt32ToDouble(unsigned index, FPRegisterID value)
+{
+ if (m_codeBlock->isConstantRegisterIndex(index)) {
+ ASSERT(isOperandConstantImmediateInt(index));
+ convertInt32ToDouble(Imm32(getConstantOperand(index).asInt32()), value);
+ } else
+ convertInt32ToDouble(addressFor(index), value);
+}
+#endif
+
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
+{
+#if USE(JSVALUE64)
+ return branchPtr(AboveOrEqual, reg, tagTypeNumberRegister);
+#else
+ return branchTest32(NonZero, reg, TrustedImm32(TagTypeNumber));
+#endif
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg)
+{
+#if USE(JSVALUE64)
+ return branchPtr(Below, reg, tagTypeNumberRegister);
+#else
+ return branchTest32(Zero, reg, TrustedImm32(TagTypeNumber));
+#endif
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
+{
+ move(reg1, scratch);
+ andPtr(reg2, scratch);
+ return emitJumpIfNotImmediateInteger(scratch);
+}
+
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg)
+{
+ addSlowCase(emitJumpIfNotImmediateInteger(reg));
+}
+
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
+{
+ addSlowCase(emitJumpIfNotImmediateIntegers(reg1, reg2, scratch));
+}
+
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg)
+{
+ addSlowCase(emitJumpIfNotImmediateNumber(reg));
+}
+
+#if USE(JSVALUE32_64)
+ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg)
+{
+ subPtr(TrustedImm32(TagTypeNumber), reg);
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg)
+{
+ return branchSubPtr(Zero, TrustedImm32(TagTypeNumber), reg);
+}
+#endif
+
+ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest)
+{
+#if USE(JSVALUE64)
+ emitFastArithIntToImmNoCheck(src, dest);
+#else
+ if (src != dest)
+ move(src, dest);
+ addPtr(TrustedImm32(TagTypeNumber), dest);
+#endif
+}
+
+// operand is int32_t, must have been zero-extended if register is 64-bit.
+ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
+{
+#if USE(JSVALUE64)
+ if (src != dest)
+ move(src, dest);
+ orPtr(tagTypeNumberRegister, dest);
+#else
+ signExtend32ToPtr(src, dest);
+ addPtr(dest, dest);
+ emitFastArithReTagImmediate(dest, dest);
+#endif
+}
+
+ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
+{
+ or32(TrustedImm32(static_cast<int32_t>(ValueFalse)), reg);
+}
+
+#endif // USE(JSVALUE32_64)
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif
diff --git a/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITOpcodes.cpp b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITOpcodes.cpp
new file mode 100644
index 0000000..f4c416d
--- /dev/null
+++ b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITOpcodes.cpp
@@ -0,0 +1,1750 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#if ENABLE(JIT)
+#include "JIT.h"
+
+#include "Arguments.h"
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JSArray.h"
+#include "JSCell.h"
+#include "JSFunction.h"
+#include "JSPropertyNameIterator.h"
+#include "LinkBuffer.h"
+
+namespace JSC {
+
+#if USE(JSVALUE64)
+
+void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* globalData, TrampolineStructure *trampolines)
+{
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ // (2) The second function provides fast property access for string length
+ Label stringLengthBegin = align();
+
+ // Check eax is a string
+ Jump string_failureCases1 = emitJumpIfNotJSCell(regT0);
+ Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr));
+
+ // Checks out okay! - get the length from the Ustring.
+ load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_length)), regT0);
+
+ Jump string_failureCases3 = branch32(LessThan, regT0, TrustedImm32(0));
+
+ // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+
+ ret();
+#endif
+
+ // (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
+ COMPILE_ASSERT(sizeof(CodeType) == 4, CodeTypeEnumMustBe32Bit);
+
+ // VirtualCallLink Trampoline
+ // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
+ JumpList callLinkFailures;
+ Label virtualCallLinkBegin = align();
+ compileOpCallInitializeCallFrame();
+ preserveReturnAddressAfterCall(regT3);
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+ restoreArgumentReference();
+ Call callLazyLinkCall = call();
+ callLinkFailures.append(branchTestPtr(Zero, regT0));
+ restoreReturnAddressBeforeReturn(regT3);
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
+ jump(regT0);
+
+ // VirtualConstructLink Trampoline
+ // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
+ Label virtualConstructLinkBegin = align();
+ compileOpCallInitializeCallFrame();
+ preserveReturnAddressAfterCall(regT3);
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+ restoreArgumentReference();
+ Call callLazyLinkConstruct = call();
+ callLinkFailures.append(branchTestPtr(Zero, regT0));
+ restoreReturnAddressBeforeReturn(regT3);
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
+ jump(regT0);
+
+ // VirtualCall Trampoline
+ // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
+ Label virtualCallBegin = align();
+ compileOpCallInitializeCallFrame();
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+
+ Jump hasCodeBlock3 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), TrustedImm32(0));
+ preserveReturnAddressAfterCall(regT3);
+ restoreArgumentReference();
+ Call callCompileCall = call();
+ callLinkFailures.append(branchTestPtr(Zero, regT0));
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
+ restoreReturnAddressBeforeReturn(regT3);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ hasCodeBlock3.link(this);
+
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForCallWithArityCheck)), regT0);
+ jump(regT0);
+
+ // VirtualConstruct Trampoline
+ // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
+ Label virtualConstructBegin = align();
+ compileOpCallInitializeCallFrame();
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+
+ Jump hasCodeBlock4 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), TrustedImm32(0));
+ preserveReturnAddressAfterCall(regT3);
+ restoreArgumentReference();
+ Call callCompileConstruct = call();
+ callLinkFailures.append(branchTestPtr(Zero, regT0));
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
+ restoreReturnAddressBeforeReturn(regT3);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ hasCodeBlock4.link(this);
+
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForConstructWithArityCheck)), regT0);
+ jump(regT0);
+
+ // If the parser fails we want to be able to be able to keep going,
+ // So we handle this as a parse failure.
+ callLinkFailures.link(this);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ restoreReturnAddressBeforeReturn(regT1);
+ move(TrustedImmPtr(&globalData->exceptionLocation), regT2);
+ storePtr(regT1, regT2);
+ poke(callFrameRegister, 1 + OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
+ poke(TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()));
+ ret();
+
+ // NativeCall Trampoline
+ Label nativeCallThunk = privateCompileCTINativeCall(globalData);
+ Label nativeConstructThunk = privateCompileCTINativeCall(globalData, true);
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1);
+ Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2);
+ Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
+#endif
+
+ // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
+ LinkBuffer patchBuffer(this, m_globalData->executableAllocator);
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
+ patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
+ patchBuffer.link(string_failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
+#endif
+#if ENABLE(JIT_OPTIMIZE_CALL)
+ patchBuffer.link(callLazyLinkCall, FunctionPtr(cti_vm_lazyLinkCall));
+ patchBuffer.link(callLazyLinkConstruct, FunctionPtr(cti_vm_lazyLinkConstruct));
+#endif
+ patchBuffer.link(callCompileCall, FunctionPtr(cti_op_call_jitCompile));
+ patchBuffer.link(callCompileConstruct, FunctionPtr(cti_op_construct_jitCompile));
+
+ CodeRef finalCode = patchBuffer.finalizeCode();
+ *executablePool = finalCode.m_executablePool;
+
+ trampolines->ctiVirtualCallLink = patchBuffer.trampolineAt(virtualCallLinkBegin);
+ trampolines->ctiVirtualConstructLink = patchBuffer.trampolineAt(virtualConstructLinkBegin);
+ trampolines->ctiVirtualCall = patchBuffer.trampolineAt(virtualCallBegin);
+ trampolines->ctiVirtualConstruct = patchBuffer.trampolineAt(virtualConstructBegin);
+ trampolines->ctiNativeCall = patchBuffer.trampolineAt(nativeCallThunk);
+ trampolines->ctiNativeConstruct = patchBuffer.trampolineAt(nativeConstructThunk);
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ trampolines->ctiStringLengthTrampoline = patchBuffer.trampolineAt(stringLengthBegin);
+#endif
+}
+
+JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isConstruct)
+{
+ int executableOffsetToFunction = isConstruct ? OBJECT_OFFSETOF(NativeExecutable, m_constructor) : OBJECT_OFFSETOF(NativeExecutable, m_function);
+
+ Label nativeCallThunk = align();
+
+ emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock);
+
+#if CPU(X86_64)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ peek(regT1);
+ emitPutToCallFrameHeader(regT1, RegisterFile::ReturnPC);
+
+ // Calling convention: f(edi, esi, edx, ecx, ...);
+ // Host function signature: f(ExecState*);
+ move(callFrameRegister, X86Registers::edi);
+
+ subPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call.
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::esi);
+ loadPtr(Address(X86Registers::esi, OBJECT_OFFSETOF(JSFunction, m_executable)), X86Registers::r9);
+ move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ call(Address(X86Registers::r9, executableOffsetToFunction));
+
+ addPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister);
+
+#elif CPU(ARM)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ preserveReturnAddressAfterCall(regT3); // Callee preserved
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+
+ // Calling convention: f(r0 == regT0, r1 == regT1, ...);
+ // Host function signature: f(ExecState*);
+ move(callFrameRegister, ARMRegisters::r0);
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, ARMRegisters::r1);
+ move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ loadPtr(Address(ARMRegisters::r1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ call(Address(regT2, executableOffsetToFunction));
+
+ restoreReturnAddressBeforeReturn(regT3);
+
+#elif CPU(MIPS)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ preserveReturnAddressAfterCall(regT3); // Callee preserved
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+
+ // Calling convention: f(a0, a1, a2, a3);
+ // Host function signature: f(ExecState*);
+
+ // Allocate stack space for 16 bytes (8-byte aligned)
+ // 16 bytes (unused) for 4 arguments
+ subPtr(TrustedImm32(16), stackPointerRegister);
+
+ // Setup arg0
+ move(callFrameRegister, MIPSRegisters::a0);
+
+ // Call
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, MIPSRegisters::a2);
+ loadPtr(Address(MIPSRegisters::a2, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ call(Address(regT2, executableOffsetToFunction));
+
+ // Restore stack space
+ addPtr(TrustedImm32(16), stackPointerRegister);
+
+ restoreReturnAddressBeforeReturn(regT3);
+
+#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
+#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
+#else
+ UNUSED_PARAM(executableOffsetToFunction);
+ breakpoint();
+#endif
+
+ // Check for an exception
+ loadPtr(&(globalData->exception), regT2);
+ Jump exceptionHandler = branchTestPtr(NonZero, regT2);
+
+ // Return.
+ ret();
+
+ // Handle an exception
+ exceptionHandler.link(this);
+
+ // Grab the return address.
+ preserveReturnAddressAfterCall(regT1);
+
+ move(TrustedImmPtr(&globalData->exceptionLocation), regT2);
+ storePtr(regT1, regT2);
+ poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
+
+ // Set the return address.
+ move(TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1);
+ restoreReturnAddressBeforeReturn(regT1);
+
+ ret();
+
+ return nativeCallThunk;
+}
+
+JIT::CodePtr JIT::privateCompileCTINativeCall(PassRefPtr<ExecutablePool>, JSGlobalData* globalData, NativeFunction)
+{
+ return globalData->jitStubs->ctiNativeCall();
+}
+
+void JIT::emit_op_mov(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
+
+ if (m_codeBlock->isConstantRegisterIndex(src)) {
+ storePtr(ImmPtr(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register)));
+ if (dst == m_lastResultBytecodeRegister)
+ killLastResultRegister();
+ } else if ((src == m_lastResultBytecodeRegister) || (dst == m_lastResultBytecodeRegister)) {
+ // If either the src or dst is the cached register go though
+ // get/put registers to make sure we track this correctly.
+ emitGetVirtualRegister(src, regT0);
+ emitPutVirtualRegister(dst);
+ } else {
+ // Perform the copy via regT1; do not disturb any mapping in regT0.
+ loadPtr(Address(callFrameRegister, src * sizeof(Register)), regT1);
+ storePtr(regT1, Address(callFrameRegister, dst * sizeof(Register)));
+ }
+}
+
+void JIT::emit_op_end(Instruction* currentInstruction)
+{
+ ASSERT(returnValueRegister != callFrameRegister);
+ emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
+ restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
+ ret();
+}
+
+void JIT::emit_op_jmp(Instruction* currentInstruction)
+{
+ unsigned target = currentInstruction[1].u.operand;
+ addJump(jump(), target);
+}
+
+void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
+{
+ emitTimeoutCheck();
+
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+ if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ int32_t op2imm = getConstantOperandImmediateInt(op2);
+ addJump(branch32(LessThanOrEqual, regT0, Imm32(op2imm)), target);
+ } else {
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+ addJump(branch32(LessThanOrEqual, regT0, regT1), target);
+ }
+}
+
+void JIT::emit_op_new_object(Instruction* currentInstruction)
+{
+ JITStubCall(this, cti_op_new_object).call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_check_has_instance(Instruction* currentInstruction)
+{
+ unsigned baseVal = currentInstruction[1].u.operand;
+
+ emitGetVirtualRegister(baseVal, regT0);
+
+ // Check that baseVal is a cell.
+ emitJumpSlowCaseIfNotJSCell(regT0, baseVal);
+
+ // Check that baseVal 'ImplementsHasInstance'.
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT0);
+ addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsHasInstance)));
+}
+
+void JIT::emit_op_instanceof(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned value = currentInstruction[2].u.operand;
+ unsigned baseVal = currentInstruction[3].u.operand;
+ unsigned proto = currentInstruction[4].u.operand;
+
+ // Load the operands (baseVal, proto, and value respectively) into registers.
+ // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
+ emitGetVirtualRegister(value, regT2);
+ emitGetVirtualRegister(baseVal, regT0);
+ emitGetVirtualRegister(proto, regT1);
+
+ // Check that proto are cells. baseVal must be a cell - this is checked by op_check_has_instance.
+ emitJumpSlowCaseIfNotJSCell(regT2, value);
+ emitJumpSlowCaseIfNotJSCell(regT1, proto);
+
+ // Check that prototype is an object
+ loadPtr(Address(regT1, JSCell::structureOffset()), regT3);
+ addSlowCase(branch8(NotEqual, Address(regT3, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType)));
+
+ // Fixme: this check is only needed because the JSC API allows HasInstance to be overridden; we should deprecate this.
+ // Check that baseVal 'ImplementsDefaultHasInstance'.
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT0);
+ addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance)));
+
+ // Optimistically load the result true, and start looping.
+ // Initially, regT1 still contains proto and regT2 still contains value.
+ // As we loop regT2 will be updated with its prototype, recursively walking the prototype chain.
+ move(TrustedImmPtr(JSValue::encode(jsBoolean(true))), regT0);
+ Label loop(this);
+
+ // Load the prototype of the object in regT2. If this is equal to regT1 - WIN!
+ // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
+ loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
+ loadPtr(Address(regT2, Structure::prototypeOffset()), regT2);
+ Jump isInstance = branchPtr(Equal, regT2, regT1);
+ emitJumpIfJSCell(regT2).linkTo(loop, this);
+
+ // We get here either by dropping out of the loop, or if value was not an Object. Result is false.
+ move(TrustedImmPtr(JSValue::encode(jsBoolean(false))), regT0);
+
+ // isInstance jumps right down to here, to skip setting the result to false (it has already set true).
+ isInstance.link(this);
+ emitPutVirtualRegister(dst);
+}
+
+void JIT::emit_op_call(Instruction* currentInstruction)
+{
+ compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_call_eval(Instruction* currentInstruction)
+{
+ compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_call_varargs(Instruction* currentInstruction)
+{
+ compileOpCallVarargs(currentInstruction);
+}
+
+void JIT::emit_op_construct(Instruction* currentInstruction)
+{
+ compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_get_global_var(Instruction* currentInstruction)
+{
+ JSVariableObject* globalObject = m_codeBlock->globalObject();
+ loadPtr(&globalObject->m_registers, regT0);
+ loadPtr(Address(regT0, currentInstruction[2].u.operand * sizeof(Register)), regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_put_global_var(Instruction* currentInstruction)
+{
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT1);
+ JSVariableObject* globalObject = m_codeBlock->globalObject();
+ loadPtr(&globalObject->m_registers, regT0);
+ storePtr(regT1, Address(regT0, currentInstruction[1].u.operand * sizeof(Register)));
+}
+
+void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
+{
+ int skip = currentInstruction[3].u.operand;
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0);
+ bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ Jump activationNotCreated;
+ if (checkTopLevel)
+ activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
+ activationNotCreated.link(this);
+ }
+ while (skip--)
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, object)), regT0);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSVariableObject, m_registers)), regT0);
+ loadPtr(Address(regT0, currentInstruction[2].u.operand * sizeof(Register)), regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
+{
+ int skip = currentInstruction[2].u.operand;
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1);
+ emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
+ bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ Jump activationNotCreated;
+ if (checkTopLevel)
+ activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, next)), regT1);
+ activationNotCreated.link(this);
+ }
+ while (skip--)
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, next)), regT1);
+
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, object)), regT1);
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(JSVariableObject, m_registers)), regT1);
+ storePtr(regT0, Address(regT1, currentInstruction[1].u.operand * sizeof(Register)));
+}
+
+void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
+{
+ unsigned activation = currentInstruction[1].u.operand;
+ unsigned arguments = currentInstruction[2].u.operand;
+ Jump activationCreated = branchTestPtr(NonZero, addressFor(activation));
+ Jump argumentsNotCreated = branchTestPtr(Zero, addressFor(arguments));
+ activationCreated.link(this);
+ JITStubCall stubCall(this, cti_op_tear_off_activation);
+ stubCall.addArgument(activation, regT2);
+ stubCall.addArgument(unmodifiedArgumentsRegister(arguments), regT2);
+ stubCall.call();
+ argumentsNotCreated.link(this);
+}
+
+void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ Jump argsNotCreated = branchTestPtr(Zero, Address(callFrameRegister, sizeof(Register) * (unmodifiedArgumentsRegister(dst))));
+ JITStubCall stubCall(this, cti_op_tear_off_arguments);
+ stubCall.addArgument(unmodifiedArgumentsRegister(dst), regT2);
+ stubCall.call();
+ argsNotCreated.link(this);
+}
+
+void JIT::emit_op_ret(Instruction* currentInstruction)
+{
+ ASSERT(callFrameRegister != regT1);
+ ASSERT(regT1 != returnValueRegister);
+ ASSERT(returnValueRegister != callFrameRegister);
+
+ // Return the result in %eax.
+ emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
+
+ // Grab the return address.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+
+ // Restore our caller's "r".
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ // Return.
+ restoreReturnAddressBeforeReturn(regT1);
+ ret();
+}
+
+void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
+{
+ ASSERT(callFrameRegister != regT1);
+ ASSERT(regT1 != returnValueRegister);
+ ASSERT(returnValueRegister != callFrameRegister);
+
+ // Return the result in %eax.
+ emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
+ Jump notJSCell = emitJumpIfNotJSCell(returnValueRegister);
+ loadPtr(Address(returnValueRegister, JSCell::structureOffset()), regT2);
+ Jump notObject = branch8(NotEqual, Address(regT2, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType));
+
+ // Grab the return address.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+
+ // Restore our caller's "r".
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ // Return.
+ restoreReturnAddressBeforeReturn(regT1);
+ ret();
+
+ // Return 'this' in %eax.
+ notJSCell.link(this);
+ notObject.link(this);
+ emitGetVirtualRegister(currentInstruction[2].u.operand, returnValueRegister);
+
+ // Grab the return address.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+
+ // Restore our caller's "r".
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ // Return.
+ restoreReturnAddressBeforeReturn(regT1);
+ ret();
+}
+
+void JIT::emit_op_new_array(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_new_array);
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_to_primitive(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(src, regT0);
+
+ Jump isImm = emitJumpIfNotJSCell(regT0);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr)));
+ isImm.link(this);
+
+ if (dst != src)
+ emitPutVirtualRegister(dst);
+
+}
+
+void JIT::emit_op_strcat(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_strcat);
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_base(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, currentInstruction[3].u.operand ? cti_op_resolve_base_strict_put : cti_op_resolve_base);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_ensure_property_exists(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_ensure_property_exists);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve_skip);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool)
+{
+ // Fast case
+ void* globalObject = m_codeBlock->globalObject();
+ unsigned currentIndex = m_globalResolveInfoIndex++;
+ void* structureAddress = &(m_codeBlock->globalResolveInfo(currentIndex).structure);
+ void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset);
+
+ // Check Structure of global object
+ move(TrustedImmPtr(globalObject), regT0);
+ loadPtr(structureAddress, regT1);
+ addSlowCase(branchPtr(NotEqual, regT1, Address(regT0, JSCell::structureOffset()))); // Structures don't match
+
+ // Load cached property
+ // Assume that the global object always uses external storage.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSGlobalObject, m_propertyStorage)), regT0);
+ load32(offsetAddr, regT1);
+ loadPtr(BaseIndex(regT0, regT1, ScalePtr), regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ Identifier* ident = &m_codeBlock->identifier(currentInstruction[2].u.operand);
+
+ unsigned currentIndex = m_globalResolveInfoIndex++;
+
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_resolve_global);
+ stubCall.addArgument(TrustedImmPtr(ident));
+ stubCall.addArgument(Imm32(currentIndex));
+ stubCall.addArgument(regT0);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_not(Instruction* currentInstruction)
+{
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
+
+ // Invert against JSValue(false); if the value was tagged as a boolean, then all bits will be
+ // clear other than the low bit (which will be 0 or 1 for false or true inputs respectively).
+ // Then invert against JSValue(true), which will add the tag back in, and flip the low bit.
+ xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
+ addSlowCase(branchTestPtr(NonZero, regT0, TrustedImm32(static_cast<int32_t>(~1))));
+ xorPtr(TrustedImm32(static_cast<int32_t>(ValueTrue)), regT0);
+
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_jfalse(Instruction* currentInstruction)
+{
+ unsigned target = currentInstruction[2].u.operand;
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+
+ addJump(branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsNumber(0)))), target);
+ Jump isNonZero = emitJumpIfImmediateInteger(regT0);
+
+ addJump(branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsBoolean(false)))), target);
+ addSlowCase(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(jsBoolean(true)))));
+
+ isNonZero.link(this);
+}
+
+void JIT::emit_op_jeq_null(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(src, regT0);
+ Jump isImmediate = emitJumpIfNotJSCell(regT0);
+
+ // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ addJump(branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
+ Jump wasNotImmediate = jump();
+
+ // Now handle the immediate cases - undefined & null
+ isImmediate.link(this);
+ andPtr(TrustedImm32(~TagBitUndefined), regT0);
+ addJump(branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsNull()))), target);
+
+ wasNotImmediate.link(this);
+};
+void JIT::emit_op_jneq_null(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(src, regT0);
+ Jump isImmediate = emitJumpIfNotJSCell(regT0);
+
+ // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ addJump(branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
+ Jump wasNotImmediate = jump();
+
+ // Now handle the immediate cases - undefined & null
+ isImmediate.link(this);
+ andPtr(TrustedImm32(~TagBitUndefined), regT0);
+ addJump(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(jsNull()))), target);
+
+ wasNotImmediate.link(this);
+}
+
+void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ JSCell* ptr = currentInstruction[2].u.jsCell.get();
+ unsigned target = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegister(src, regT0);
+ addJump(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(JSValue(ptr)))), target);
+}
+
+void JIT::emit_op_jsr(Instruction* currentInstruction)
+{
+ int retAddrDst = currentInstruction[1].u.operand;
+ int target = currentInstruction[2].u.operand;
+ DataLabelPtr storeLocation = storePtrWithPatch(TrustedImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst));
+ addJump(jump(), target);
+ m_jsrSites.append(JSRInfo(storeLocation, label()));
+ killLastResultRegister();
+}
+
+void JIT::emit_op_sret(Instruction* currentInstruction)
+{
+ jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand));
+ killLastResultRegister();
+}
+
+void JIT::emit_op_eq(Instruction* currentInstruction)
+{
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+ emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+ compare32(Equal, regT1, regT0, regT0);
+ emitTagAsBoolImmediate(regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_bitnot(Instruction* currentInstruction)
+{
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ not32(regT0);
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve_with_base);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.call(currentInstruction[2].u.operand);
+}
+
+void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_new_func_exp);
+ stubCall.addArgument(TrustedImmPtr(m_codeBlock->functionExpr(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_jtrue(Instruction* currentInstruction)
+{
+ unsigned target = currentInstruction[2].u.operand;
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+
+ Jump isZero = branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsNumber(0))));
+ addJump(emitJumpIfImmediateInteger(regT0), target);
+
+ addJump(branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsBoolean(true)))), target);
+ addSlowCase(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(jsBoolean(false)))));
+
+ isZero.link(this);
+}
+
+void JIT::emit_op_neq(Instruction* currentInstruction)
+{
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+ emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+ compare32(NotEqual, regT1, regT0, regT0);
+ emitTagAsBoolImmediate(regT0);
+
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+
+}
+
+void JIT::emit_op_bitxor(Instruction* currentInstruction)
+{
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+ emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+ xorPtr(regT1, regT0);
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_bitor(Instruction* currentInstruction)
+{
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+ emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+ orPtr(regT1, regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_throw(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_throw);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+ stubCall.call();
+ ASSERT(regT0 == returnValueRegister);
+#ifndef NDEBUG
+ // cti_op_throw always changes it's return address,
+ // this point in the code should never be reached.
+ breakpoint();
+#endif
+}
+
+void JIT::emit_op_get_pnames(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int i = currentInstruction[3].u.operand;
+ int size = currentInstruction[4].u.operand;
+ int breakTarget = currentInstruction[5].u.operand;
+
+ JumpList isNotObject;
+
+ emitGetVirtualRegister(base, regT0);
+ if (!m_codeBlock->isKnownNotImmediate(base))
+ isNotObject.append(emitJumpIfNotJSCell(regT0));
+ if (base != m_codeBlock->thisRegister() || m_codeBlock->isStrictMode()) {
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ isNotObject.append(branch8(NotEqual, Address(regT2, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType)));
+ }
+
+ // We could inline the case where you have a valid cache, but
+ // this call doesn't seem to be hot.
+ Label isObject(this);
+ JITStubCall getPnamesStubCall(this, cti_op_get_pnames);
+ getPnamesStubCall.addArgument(regT0);
+ getPnamesStubCall.call(dst);
+ load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3);
+ storePtr(tagTypeNumberRegister, payloadFor(i));
+ store32(TrustedImm32(Int32Tag), intTagFor(size));
+ store32(regT3, intPayloadFor(size));
+ Jump end = jump();
+
+ isNotObject.link(this);
+ move(regT0, regT1);
+ and32(TrustedImm32(~TagBitUndefined), regT1);
+ addJump(branch32(Equal, regT1, TrustedImm32(ValueNull)), breakTarget);
+
+ JITStubCall toObjectStubCall(this, cti_to_object);
+ toObjectStubCall.addArgument(regT0);
+ toObjectStubCall.call(base);
+ jump().linkTo(isObject, this);
+
+ end.link(this);
+}
+
+void JIT::emit_op_next_pname(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int i = currentInstruction[3].u.operand;
+ int size = currentInstruction[4].u.operand;
+ int it = currentInstruction[5].u.operand;
+ int target = currentInstruction[6].u.operand;
+
+ JumpList callHasProperty;
+
+ Label begin(this);
+ load32(intPayloadFor(i), regT0);
+ Jump end = branch32(Equal, regT0, intPayloadFor(size));
+
+ // Grab key @ i
+ loadPtr(addressFor(it), regT1);
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2);
+
+ loadPtr(BaseIndex(regT2, regT0, TimesEight), regT2);
+
+ emitPutVirtualRegister(dst, regT2);
+
+ // Increment i
+ add32(TrustedImm32(1), regT0);
+ store32(regT0, intPayloadFor(i));
+
+ // Verify that i is valid:
+ emitGetVirtualRegister(base, regT0);
+
+ // Test base's structure
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))));
+
+ // Test base's prototype chain
+ loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3);
+ loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3);
+ addJump(branchTestPtr(Zero, Address(regT3)), target);
+
+ Label checkPrototype(this);
+ loadPtr(Address(regT2, Structure::prototypeOffset()), regT2);
+ callHasProperty.append(emitJumpIfNotJSCell(regT2));
+ loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
+ callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3)));
+ addPtr(TrustedImm32(sizeof(Structure*)), regT3);
+ branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this);
+
+ // Continue loop.
+ addJump(jump(), target);
+
+ // Slow case: Ask the object if i is valid.
+ callHasProperty.link(this);
+ emitGetVirtualRegister(dst, regT1);
+ JITStubCall stubCall(this, cti_has_property);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+
+ // Test for valid key.
+ addJump(branchTest32(NonZero, regT0), target);
+ jump().linkTo(begin, this);
+
+ // End of loop.
+ end.link(this);
+}
+
+void JIT::emit_op_push_scope(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_push_scope);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_pop_scope(Instruction*)
+{
+ JITStubCall(this, cti_op_pop_scope).call();
+}
+
+void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegisters(src1, regT0, src2, regT1);
+
+ // Jump to a slow case if either operand is a number, or if both are JSCell*s.
+ move(regT0, regT2);
+ orPtr(regT1, regT2);
+ addSlowCase(emitJumpIfJSCell(regT2));
+ addSlowCase(emitJumpIfImmediateNumber(regT2));
+
+ if (type == OpStrictEq)
+ compare32(Equal, regT1, regT0, regT0);
+ else
+ compare32(NotEqual, regT1, regT0, regT0);
+ emitTagAsBoolImmediate(regT0);
+
+ emitPutVirtualRegister(dst);
+}
+
+void JIT::emit_op_stricteq(Instruction* currentInstruction)
+{
+ compileOpStrictEq(currentInstruction, OpStrictEq);
+}
+
+void JIT::emit_op_nstricteq(Instruction* currentInstruction)
+{
+ compileOpStrictEq(currentInstruction, OpNStrictEq);
+}
+
+void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
+{
+ int srcVReg = currentInstruction[2].u.operand;
+ emitGetVirtualRegister(srcVReg, regT0);
+
+ Jump wasImmediate = emitJumpIfImmediateInteger(regT0);
+
+ emitJumpSlowCaseIfNotJSCell(regT0, srcVReg);
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ addSlowCase(branch8(NotEqual, Address(regT2, Structure::typeInfoTypeOffset()), TrustedImm32(NumberType)));
+
+ wasImmediate.link(this);
+
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_push_new_scope(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_push_new_scope);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_catch(Instruction* currentInstruction)
+{
+ killLastResultRegister(); // FIXME: Implicitly treat op_catch as a labeled statement, and remove this line of code.
+ move(regT0, callFrameRegister);
+ peek(regT3, OBJECT_OFFSETOF(struct JITStackFrame, globalData) / sizeof(void*));
+ loadPtr(Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception)), regT0);
+ storePtr(TrustedImmPtr(JSValue::encode(JSValue())), Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception)));
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_jmp_scopes(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_jmp_scopes);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.call();
+ addJump(jump(), currentInstruction[2].u.operand);
+}
+
+void JIT::emit_op_switch_imm(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate));
+ jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+
+ JITStubCall stubCall(this, cti_op_switch_imm);
+ stubCall.addArgument(scrutinee, regT2);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_switch_char(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character));
+ jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+
+ JITStubCall stubCall(this, cti_op_switch_char);
+ stubCall.addArgument(scrutinee, regT2);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_switch_string(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset));
+
+ JITStubCall stubCall(this, cti_op_switch_string);
+ stubCall.addArgument(scrutinee, regT2);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_throw_reference_error(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_throw_reference_error);
+ stubCall.addArgument(ImmPtr(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))));
+ stubCall.call();
+}
+
+void JIT::emit_op_debug(Instruction* currentInstruction)
+{
+#if ENABLE(DEBUG_WITH_BREAKPOINT)
+ UNUSED_PARAM(currentInstruction);
+ breakpoint();
+#else
+ JITStubCall stubCall(this, cti_op_debug);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call();
+#endif
+}
+
+void JIT::emit_op_eq_null(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(src1, regT0);
+ Jump isImmediate = emitJumpIfNotJSCell(regT0);
+
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ test8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined), regT0);
+
+ Jump wasNotImmediate = jump();
+
+ isImmediate.link(this);
+
+ andPtr(TrustedImm32(~TagBitUndefined), regT0);
+ comparePtr(Equal, regT0, TrustedImm32(ValueNull), regT0);
+
+ wasNotImmediate.link(this);
+
+ emitTagAsBoolImmediate(regT0);
+ emitPutVirtualRegister(dst);
+
+}
+
+void JIT::emit_op_neq_null(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(src1, regT0);
+ Jump isImmediate = emitJumpIfNotJSCell(regT0);
+
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ test8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined), regT0);
+
+ Jump wasNotImmediate = jump();
+
+ isImmediate.link(this);
+
+ andPtr(TrustedImm32(~TagBitUndefined), regT0);
+ comparePtr(NotEqual, regT0, TrustedImm32(ValueNull), regT0);
+
+ wasNotImmediate.link(this);
+
+ emitTagAsBoolImmediate(regT0);
+ emitPutVirtualRegister(dst);
+}
+
+void JIT::emit_op_enter(Instruction*)
+{
+ // Even though CTI doesn't use them, we initialize our constant
+ // registers to zap stale pointers, to avoid unnecessarily prolonging
+ // object lifetime and increasing GC pressure.
+ size_t count = m_codeBlock->m_numVars;
+ for (size_t j = 0; j < count; ++j)
+ emitInitRegister(j);
+
+}
+
+void JIT::emit_op_create_activation(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ Jump activationCreated = branchTestPtr(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
+ JITStubCall(this, cti_op_push_activation).call(currentInstruction[1].u.operand);
+ emitPutVirtualRegister(dst);
+ activationCreated.link(this);
+}
+
+void JIT::emit_op_create_arguments(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ Jump argsCreated = branchTestPtr(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
+ if (m_codeBlock->m_numParameters == 1)
+ JITStubCall(this, cti_op_create_arguments_no_params).call();
+ else
+ JITStubCall(this, cti_op_create_arguments).call();
+ emitPutVirtualRegister(dst);
+ emitPutVirtualRegister(unmodifiedArgumentsRegister(dst));
+ argsCreated.link(this);
+}
+
+void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ storePtr(TrustedImmPtr(0), Address(callFrameRegister, sizeof(Register) * dst));
+}
+
+void JIT::emit_op_convert_this(Instruction* currentInstruction)
+{
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+
+ emitJumpSlowCaseIfNotJSCell(regT0);
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
+ addSlowCase(branchTest8(NonZero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(NeedsThisConversion)));
+}
+
+void JIT::emit_op_convert_this_strict(Instruction* currentInstruction)
+{
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+ Jump notNull = branchTestPtr(NonZero, regT0);
+ move(TrustedImmPtr(JSValue::encode(jsNull())), regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand, regT0);
+ Jump setThis = jump();
+ notNull.link(this);
+ Jump isImmediate = emitJumpIfNotJSCell(regT0);
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
+ Jump notAnObject = branch8(NotEqual, Address(regT1, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType));
+ addSlowCase(branchTest8(NonZero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(NeedsThisConversion)));
+ isImmediate.link(this);
+ notAnObject.link(this);
+ setThis.link(this);
+}
+
+void JIT::emit_op_get_callee(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT0);
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emit_op_create_this(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_create_this);
+ stubCall.addArgument(currentInstruction[2].u.operand, regT1);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
+{
+ peek(regT1, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof(void*));
+ Jump noProfiler = branchTestPtr(Zero, Address(regT1));
+
+ JITStubCall stubCall(this, cti_op_profile_will_call);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT1);
+ stubCall.call();
+ noProfiler.link(this);
+
+}
+
+void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
+{
+ peek(regT1, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof(void*));
+ Jump noProfiler = branchTestPtr(Zero, Address(regT1));
+
+ JITStubCall stubCall(this, cti_op_profile_did_call);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT1);
+ stubCall.call();
+ noProfiler.link(this);
+}
+
+
+// Slow cases
+
+void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_convert_this);
+ stubCall.addArgument(regT0);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_convert_this_strict(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_convert_this_strict);
+ stubCall.addArgument(regT0);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_to_primitive);
+ stubCall.addArgument(regT0);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+ if (isOperandConstantImmediateInt(op2)) {
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_loop_if_lesseq);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(currentInstruction[2].u.operand, regT2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
+ } else {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_loop_if_lesseq);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
+ }
+}
+
+void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter); // property int32 check
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ linkSlowCase(iter); // base not array check
+ linkSlowCase(iter); // in vector check
+
+ JITStubCall stubPutByValCall(this, cti_op_put_by_val);
+ stubPutByValCall.addArgument(regT0);
+ stubPutByValCall.addArgument(property, regT2);
+ stubPutByValCall.addArgument(value, regT2);
+ stubPutByValCall.call();
+}
+
+void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
+ JITStubCall stubCall(this, cti_op_not);
+ stubCall.addArgument(regT0);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_jtrue);
+ stubCall.addArgument(regT0);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), currentInstruction[2].u.operand); // inverted!
+}
+
+void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_bitnot);
+ stubCall.addArgument(regT0);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_jtrue);
+ stubCall.addArgument(regT0);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), currentInstruction[2].u.operand);
+}
+
+void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_bitxor);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_bitor);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_eq);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+ emitTagAsBoolImmediate(regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_eq);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+ xor32(TrustedImm32(0x1), regT0);
+ emitTagAsBoolImmediate(regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_stricteq);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_nstricteq);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_check_has_instance(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned baseVal = currentInstruction[1].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, baseVal);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_check_has_instance);
+ stubCall.addArgument(baseVal, regT2);
+ stubCall.call();
+}
+
+void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned value = currentInstruction[2].u.operand;
+ unsigned baseVal = currentInstruction[3].u.operand;
+ unsigned proto = currentInstruction[4].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, value);
+ linkSlowCaseIfNotJSCell(iter, proto);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_instanceof);
+ stubCall.addArgument(value, regT2);
+ stubCall.addArgument(baseVal, regT2);
+ stubCall.addArgument(proto, regT2);
+ stubCall.call(dst);
+}
+
+void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call);
+}
+
+void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call_eval);
+}
+
+void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallVarargsSlowCase(currentInstruction, iter);
+}
+
+void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_construct);
+}
+
+void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCaseIfNotJSCell(iter, currentInstruction[2].u.operand);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_to_jsnumber);
+ stubCall.addArgument(regT0);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_get_arguments_length(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int argumentsRegister = currentInstruction[2].u.operand;
+ addSlowCase(branchTestPtr(NonZero, addressFor(argumentsRegister)));
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
+ sub32(TrustedImm32(1), regT0);
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(dst, regT0);
+}
+
+void JIT::emitSlow_op_get_arguments_length(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ emitGetVirtualRegister(base, regT0);
+ JITStubCall stubCall(this, cti_op_get_by_id_generic);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(ident));
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int argumentsRegister = currentInstruction[2].u.operand;
+ int property = currentInstruction[3].u.operand;
+ addSlowCase(branchTestPtr(NonZero, addressFor(argumentsRegister)));
+ emitGetVirtualRegister(property, regT1);
+ addSlowCase(emitJumpIfNotImmediateInteger(regT1));
+ add32(TrustedImm32(1), regT1);
+ // regT1 now contains the integer index of the argument we want, including this
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT2);
+ addSlowCase(branch32(AboveOrEqual, regT1, regT2));
+
+ Jump skipOutofLineParams;
+ int numArgs = m_codeBlock->m_numParameters;
+ if (numArgs) {
+ Jump notInInPlaceArgs = branch32(AboveOrEqual, regT1, Imm32(numArgs));
+ addPtr(Imm32(static_cast<unsigned>(-(RegisterFile::CallFrameHeaderSize + numArgs) * sizeof(Register))), callFrameRegister, regT0);
+ loadPtr(BaseIndex(regT0, regT1, TimesEight, 0), regT0);
+ skipOutofLineParams = jump();
+ notInInPlaceArgs.link(this);
+ }
+
+ addPtr(Imm32(static_cast<unsigned>(-(RegisterFile::CallFrameHeaderSize + numArgs) * sizeof(Register))), callFrameRegister, regT0);
+ mul32(TrustedImm32(sizeof(Register)), regT2, regT2);
+ subPtr(regT2, regT0);
+ loadPtr(BaseIndex(regT0, regT1, TimesEight, 0), regT0);
+ if (numArgs)
+ skipOutofLineParams.link(this);
+ emitPutVirtualRegister(dst, regT0);
+}
+
+void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned arguments = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter);
+ Jump skipArgumentsCreation = jump();
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ if (m_codeBlock->m_numParameters == 1)
+ JITStubCall(this, cti_op_create_arguments_no_params).call();
+ else
+ JITStubCall(this, cti_op_create_arguments).call();
+ emitPutVirtualRegister(arguments);
+ emitPutVirtualRegister(unmodifiedArgumentsRegister(arguments));
+
+ skipArgumentsCreation.link(this);
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(arguments, regT2);
+ stubCall.addArgument(property, regT2);
+ stubCall.call(dst);
+}
+
+#endif // USE(JSVALUE64)
+
+void JIT::emit_op_resolve_global_dynamic(Instruction* currentInstruction)
+{
+ int skip = currentInstruction[5].u.operand;
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0);
+
+ bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ Jump activationNotCreated;
+ if (checkTopLevel)
+ activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, object)), regT1);
+ addSlowCase(checkStructure(regT1, m_globalData->activationStructure.get()));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
+ activationNotCreated.link(this);
+ }
+ while (skip--) {
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, object)), regT1);
+ addSlowCase(checkStructure(regT1, m_globalData->activationStructure.get()));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
+ }
+ emit_op_resolve_global(currentInstruction, true);
+}
+
+void JIT::emitSlow_op_resolve_global_dynamic(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ Identifier* ident = &m_codeBlock->identifier(currentInstruction[2].u.operand);
+ int skip = currentInstruction[5].u.operand;
+ while (skip--)
+ linkSlowCase(iter);
+ JITStubCall resolveStubCall(this, cti_op_resolve);
+ resolveStubCall.addArgument(TrustedImmPtr(ident));
+ resolveStubCall.call(dst);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_resolve_global_dynamic));
+
+ unsigned currentIndex = m_globalResolveInfoIndex++;
+
+ linkSlowCase(iter); // We managed to skip all the nodes in the scope chain, but the cache missed.
+ JITStubCall stubCall(this, cti_op_resolve_global);
+ stubCall.addArgument(TrustedImmPtr(ident));
+ stubCall.addArgument(Imm32(currentIndex));
+ stubCall.addArgument(regT0);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_new_regexp(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_new_regexp);
+ stubCall.addArgument(TrustedImmPtr(m_codeBlock->regexp(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_load_varargs(Instruction* currentInstruction)
+{
+ int argCountDst = currentInstruction[1].u.operand;
+ int argsOffset = currentInstruction[2].u.operand;
+ int registerOffset = currentInstruction[3].u.operand;
+ ASSERT(argsOffset <= registerOffset);
+
+ int expectedParams = m_codeBlock->m_numParameters - 1;
+ // Don't do inline copying if we aren't guaranteed to have a single stream
+ // of arguments
+ if (expectedParams) {
+ JITStubCall stubCall(this, cti_op_load_varargs);
+ stubCall.addArgument(Imm32(argsOffset));
+ stubCall.call();
+ // Stores a naked int32 in the register file.
+ store32(returnValueRegister, Address(callFrameRegister, argCountDst * sizeof(Register)));
+ return;
+ }
+
+#if USE(JSVALUE32_64)
+ addSlowCase(branch32(NotEqual, tagFor(argsOffset), TrustedImm32(JSValue::EmptyValueTag)));
+#else
+ addSlowCase(branchTestPtr(NonZero, addressFor(argsOffset)));
+#endif
+ // Load arg count into regT0
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
+ store32(TrustedImm32(Int32Tag), intTagFor(argCountDst));
+ store32(regT0, intPayloadFor(argCountDst));
+ Jump endBranch = branch32(Equal, regT0, TrustedImm32(1));
+
+ mul32(TrustedImm32(sizeof(Register)), regT0, regT3);
+ addPtr(TrustedImm32(static_cast<unsigned>(sizeof(Register) - RegisterFile::CallFrameHeaderSize * sizeof(Register))), callFrameRegister, regT1);
+ subPtr(regT3, regT1); // regT1 is now the start of the out of line arguments
+ addPtr(Imm32(argsOffset * sizeof(Register)), callFrameRegister, regT2); // regT2 is the target buffer
+
+ // Bounds check the registerfile
+ addPtr(regT2, regT3);
+ addPtr(Imm32((registerOffset - argsOffset) * sizeof(Register)), regT3);
+ addSlowCase(branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), regT3));
+
+ sub32(TrustedImm32(1), regT0);
+ Label loopStart = label();
+ loadPtr(BaseIndex(regT1, regT0, TimesEight, static_cast<unsigned>(0 - 2 * sizeof(Register))), regT3);
+ storePtr(regT3, BaseIndex(regT2, regT0, TimesEight, static_cast<unsigned>(0 - sizeof(Register))));
+#if USE(JSVALUE32_64)
+ loadPtr(BaseIndex(regT1, regT0, TimesEight, static_cast<unsigned>(sizeof(void*) - 2 * sizeof(Register))), regT3);
+ storePtr(regT3, BaseIndex(regT2, regT0, TimesEight, static_cast<unsigned>(sizeof(void*) - sizeof(Register))));
+#endif
+ branchSubPtr(NonZero, TrustedImm32(1), regT0).linkTo(loopStart, this);
+ endBranch.link(this);
+}
+
+void JIT::emitSlow_op_load_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int argCountDst = currentInstruction[1].u.operand;
+ int argsOffset = currentInstruction[2].u.operand;
+ int expectedParams = m_codeBlock->m_numParameters - 1;
+ if (expectedParams)
+ return;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_load_varargs);
+ stubCall.addArgument(Imm32(argsOffset));
+ stubCall.call();
+
+ store32(TrustedImm32(Int32Tag), intTagFor(argCountDst));
+ store32(returnValueRegister, intPayloadFor(argCountDst));
+}
+
+void JIT::emit_op_new_func(Instruction* currentInstruction)
+{
+ Jump lazyJump;
+ int dst = currentInstruction[1].u.operand;
+ if (currentInstruction[3].u.operand) {
+#if USE(JSVALUE32_64)
+ lazyJump = branch32(NotEqual, tagFor(dst), TrustedImm32(JSValue::EmptyValueTag));
+#else
+ lazyJump = branchTestPtr(NonZero, addressFor(dst));
+#endif
+ }
+ JITStubCall stubCall(this, cti_op_new_func);
+ stubCall.addArgument(TrustedImmPtr(m_codeBlock->functionDecl(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+ if (currentInstruction[3].u.operand)
+ lazyJump.link(this);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
new file mode 100644
index 0000000..2f657e7
--- /dev/null
+++ b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
@@ -0,0 +1,1847 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(JIT)
+#if USE(JSVALUE32_64)
+#include "JIT.h"
+
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JSArray.h"
+#include "JSCell.h"
+#include "JSFunction.h"
+#include "JSPropertyNameIterator.h"
+#include "LinkBuffer.h"
+
+namespace JSC {
+
+void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* globalData, TrampolineStructure *trampolines)
+{
+#if ENABLE(JIT_USE_SOFT_MODULO)
+ Label softModBegin = align();
+ softModulo();
+#endif
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ // (1) This function provides fast property access for string length
+ Label stringLengthBegin = align();
+
+ // regT0 holds payload, regT1 holds tag
+
+ Jump string_failureCases1 = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
+ Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr));
+
+ // Checks out okay! - get the length from the Ustring.
+ load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_length)), regT2);
+
+ Jump string_failureCases3 = branch32(Above, regT2, TrustedImm32(INT_MAX));
+ move(regT2, regT0);
+ move(TrustedImm32(JSValue::Int32Tag), regT1);
+
+ ret();
+#endif
+
+ JumpList callLinkFailures;
+ // (2) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
+#if ENABLE(JIT_OPTIMIZE_CALL)
+ // VirtualCallLink Trampoline
+ // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
+ Label virtualCallLinkBegin = align();
+ compileOpCallInitializeCallFrame();
+ preserveReturnAddressAfterCall(regT3);
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+ restoreArgumentReference();
+ Call callLazyLinkCall = call();
+ callLinkFailures.append(branchTestPtr(Zero, regT0));
+ restoreReturnAddressBeforeReturn(regT3);
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
+ jump(regT0);
+
+ // VirtualConstructLink Trampoline
+ // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
+ Label virtualConstructLinkBegin = align();
+ compileOpCallInitializeCallFrame();
+ preserveReturnAddressAfterCall(regT3);
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+ restoreArgumentReference();
+ Call callLazyLinkConstruct = call();
+ restoreReturnAddressBeforeReturn(regT3);
+ callLinkFailures.append(branchTestPtr(Zero, regT0));
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
+ jump(regT0);
+
+#endif // ENABLE(JIT_OPTIMIZE_CALL)
+
+ // VirtualCall Trampoline
+ // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
+ Label virtualCallBegin = align();
+ compileOpCallInitializeCallFrame();
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+
+ Jump hasCodeBlock3 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), TrustedImm32(0));
+ preserveReturnAddressAfterCall(regT3);
+ restoreArgumentReference();
+ Call callCompileCall = call();
+ callLinkFailures.append(branchTestPtr(Zero, regT0));
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
+ restoreReturnAddressBeforeReturn(regT3);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ hasCodeBlock3.link(this);
+
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForCallWithArityCheck)), regT0);
+ jump(regT0);
+
+ // VirtualConstruct Trampoline
+ // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
+ Label virtualConstructBegin = align();
+ compileOpCallInitializeCallFrame();
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+
+ Jump hasCodeBlock4 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), TrustedImm32(0));
+ preserveReturnAddressAfterCall(regT3);
+ restoreArgumentReference();
+ Call callCompileCconstruct = call();
+ callLinkFailures.append(branchTestPtr(Zero, regT0));
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
+ restoreReturnAddressBeforeReturn(regT3);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ hasCodeBlock4.link(this);
+
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForConstructWithArityCheck)), regT0);
+ jump(regT0);
+
+ // If the parser fails we want to be able to be able to keep going,
+ // So we handle this as a parse failure.
+ callLinkFailures.link(this);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ restoreReturnAddressBeforeReturn(regT1);
+ move(TrustedImmPtr(&globalData->exceptionLocation), regT2);
+ storePtr(regT1, regT2);
+ poke(callFrameRegister, 1 + OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
+ poke(TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()));
+ ret();
+
+ // NativeCall Trampoline
+ Label nativeCallThunk = privateCompileCTINativeCall(globalData);
+ Label nativeConstructThunk = privateCompileCTINativeCall(globalData, true);
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1);
+ Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2);
+ Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
+#endif
+
+ // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
+ LinkBuffer patchBuffer(this, m_globalData->executableAllocator);
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
+ patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
+ patchBuffer.link(string_failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
+#endif
+#if ENABLE(JIT_OPTIMIZE_CALL)
+ patchBuffer.link(callLazyLinkCall, FunctionPtr(cti_vm_lazyLinkCall));
+ patchBuffer.link(callLazyLinkConstruct, FunctionPtr(cti_vm_lazyLinkConstruct));
+#endif
+ patchBuffer.link(callCompileCall, FunctionPtr(cti_op_call_jitCompile));
+ patchBuffer.link(callCompileCconstruct, FunctionPtr(cti_op_construct_jitCompile));
+
+ CodeRef finalCode = patchBuffer.finalizeCode();
+ *executablePool = finalCode.m_executablePool;
+
+ trampolines->ctiVirtualCall = patchBuffer.trampolineAt(virtualCallBegin);
+ trampolines->ctiVirtualConstruct = patchBuffer.trampolineAt(virtualConstructBegin);
+ trampolines->ctiNativeCall = patchBuffer.trampolineAt(nativeCallThunk);
+ trampolines->ctiNativeConstruct = patchBuffer.trampolineAt(nativeConstructThunk);
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ trampolines->ctiStringLengthTrampoline = patchBuffer.trampolineAt(stringLengthBegin);
+#endif
+#if ENABLE(JIT_OPTIMIZE_CALL)
+ trampolines->ctiVirtualCallLink = patchBuffer.trampolineAt(virtualCallLinkBegin);
+ trampolines->ctiVirtualConstructLink = patchBuffer.trampolineAt(virtualConstructLinkBegin);
+#endif
+#if ENABLE(JIT_USE_SOFT_MODULO)
+ trampolines->ctiSoftModulo = patchBuffer.trampolineAt(softModBegin);
+#endif
+}
+
+JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isConstruct)
+{
+ int executableOffsetToFunction = isConstruct ? OBJECT_OFFSETOF(NativeExecutable, m_constructor) : OBJECT_OFFSETOF(NativeExecutable, m_function);
+
+ Label nativeCallThunk = align();
+
+ emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock);
+
+#if CPU(X86)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ peek(regT1);
+ emitPutToCallFrameHeader(regT1, RegisterFile::ReturnPC);
+
+ // Calling convention: f(ecx, edx, ...);
+ // Host function signature: f(ExecState*);
+ move(callFrameRegister, X86Registers::ecx);
+
+ subPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call.
+
+ // call the function
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT1);
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT1);
+ move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ call(Address(regT1, executableOffsetToFunction));
+
+ addPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister);
+
+#elif CPU(ARM)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ preserveReturnAddressAfterCall(regT3); // Callee preserved
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+
+ // Calling convention: f(r0 == regT0, r1 == regT1, ...);
+ // Host function signature: f(ExecState*);
+ move(callFrameRegister, ARMRegisters::r0);
+
+ // call the function
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, ARMRegisters::r1);
+ move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ loadPtr(Address(ARMRegisters::r1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ call(Address(regT2, executableOffsetToFunction));
+
+ restoreReturnAddressBeforeReturn(regT3);
+#elif CPU(SH4)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2);
+ emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ preserveReturnAddressAfterCall(regT3); // Callee preserved
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+
+ // Calling convention: f(r0 == regT4, r1 == regT5, ...);
+ // Host function signature: f(ExecState*);
+ move(callFrameRegister, regT4);
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT5);
+ move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ loadPtr(Address(regT5, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+
+ call(Address(regT2, executableOffsetToFunction), regT0);
+ restoreReturnAddressBeforeReturn(regT3);
+#elif CPU(MIPS)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ preserveReturnAddressAfterCall(regT3); // Callee preserved
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+
+ // Calling convention: f(a0, a1, a2, a3);
+ // Host function signature: f(ExecState*);
+
+ // Allocate stack space for 16 bytes (8-byte aligned)
+ // 16 bytes (unused) for 4 arguments
+ subPtr(TrustedImm32(16), stackPointerRegister);
+
+ // Setup arg0
+ move(callFrameRegister, MIPSRegisters::a0);
+
+ // Call
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, MIPSRegisters::a2);
+ loadPtr(Address(MIPSRegisters::a2, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ call(Address(regT2, executableOffsetToFunction));
+
+ // Restore stack space
+ addPtr(TrustedImm32(16), stackPointerRegister);
+
+ restoreReturnAddressBeforeReturn(regT3);
+
+#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
+#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
+#else
+ UNUSED_PARAM(executableOffsetToFunction);
+ breakpoint();
+#endif // CPU(X86)
+
+ // Check for an exception
+ Jump sawException = branch32(NotEqual, AbsoluteAddress(reinterpret_cast<char*>(&globalData->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
+
+ // Return.
+ ret();
+
+ // Handle an exception
+ sawException.link(this);
+
+ // Grab the return address.
+ preserveReturnAddressAfterCall(regT1);
+
+ move(TrustedImmPtr(&globalData->exceptionLocation), regT2);
+ storePtr(regT1, regT2);
+ poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
+
+ // Set the return address.
+ move(TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1);
+ restoreReturnAddressBeforeReturn(regT1);
+
+ ret();
+
+ return nativeCallThunk;
+}
+
+JIT::CodePtr JIT::privateCompileCTINativeCall(PassRefPtr<ExecutablePool> executablePool, JSGlobalData* globalData, NativeFunction func)
+{
+ Call nativeCall;
+ Label nativeCallThunk = align();
+
+ emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock);
+
+#if CPU(X86)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ peek(regT1);
+ emitPutToCallFrameHeader(regT1, RegisterFile::ReturnPC);
+
+ // Calling convention: f(ecx, edx, ...);
+ // Host function signature: f(ExecState*);
+ move(callFrameRegister, X86Registers::ecx);
+
+ subPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call.
+
+ move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+
+ // call the function
+ nativeCall = call();
+
+ addPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister);
+
+#elif CPU(ARM)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ preserveReturnAddressAfterCall(regT3); // Callee preserved
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+
+ // Calling convention: f(r0 == regT0, r1 == regT1, ...);
+ // Host function signature: f(ExecState*);
+ move(callFrameRegister, ARMRegisters::r0);
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, ARMRegisters::r1);
+ move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ loadPtr(Address(ARMRegisters::r1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+
+ // call the function
+ nativeCall = call();
+
+ restoreReturnAddressBeforeReturn(regT3);
+
+#elif CPU(MIPS)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ preserveReturnAddressAfterCall(regT3); // Callee preserved
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+
+ // Calling convention: f(a0, a1, a2, a3);
+ // Host function signature: f(ExecState*);
+
+ // Allocate stack space for 16 bytes (8-byte aligned)
+ // 16 bytes (unused) for 4 arguments
+ subPtr(TrustedImm32(16), stackPointerRegister);
+
+ // Setup arg0
+ move(callFrameRegister, MIPSRegisters::a0);
+
+ // Call
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, MIPSRegisters::a2);
+ loadPtr(Address(MIPSRegisters::a2, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+
+ // call the function
+ nativeCall = call();
+
+ // Restore stack space
+ addPtr(TrustedImm32(16), stackPointerRegister);
+
+ restoreReturnAddressBeforeReturn(regT3);
+#elif CPU(SH4)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2);
+ emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ preserveReturnAddressAfterCall(regT3); // Callee preserved
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+
+ // Calling convention: f(r0 == regT4, r1 == regT5, ...);
+ // Host function signature: f(ExecState*);
+ move(callFrameRegister, regT4);
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT5);
+ move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ loadPtr(Address(regT5, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+
+ // call the function
+ nativeCall = call();
+
+ restoreReturnAddressBeforeReturn(regT3);
+#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
+#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
+#else
+ breakpoint();
+#endif // CPU(X86)
+
+ // Check for an exception
+ Jump sawException = branch32(NotEqual, AbsoluteAddress(reinterpret_cast<char*>(&globalData->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
+
+ // Return.
+ ret();
+
+ // Handle an exception
+ sawException.link(this);
+
+ // Grab the return address.
+ preserveReturnAddressAfterCall(regT1);
+
+ move(TrustedImmPtr(&globalData->exceptionLocation), regT2);
+ storePtr(regT1, regT2);
+ poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
+
+ // Set the return address.
+ move(TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1);
+ restoreReturnAddressBeforeReturn(regT1);
+
+ ret();
+
+ // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
+ LinkBuffer patchBuffer(this, executablePool);
+
+ patchBuffer.link(nativeCall, FunctionPtr(func));
+ patchBuffer.finalizeCode();
+
+ return patchBuffer.trampolineAt(nativeCallThunk);
+}
+
+void JIT::emit_op_mov(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ if (m_codeBlock->isConstantRegisterIndex(src))
+ emitStore(dst, getConstantOperand(src));
+ else {
+ emitLoad(src, regT1, regT0);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_mov), dst, regT1, regT0);
+ }
+}
+
+void JIT::emit_op_end(Instruction* currentInstruction)
+{
+ ASSERT(returnValueRegister != callFrameRegister);
+ emitLoad(currentInstruction[1].u.operand, regT1, regT0);
+ restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
+ ret();
+}
+
+void JIT::emit_op_jmp(Instruction* currentInstruction)
+{
+ unsigned target = currentInstruction[1].u.operand;
+ addJump(jump(), target);
+}
+
+void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emitTimeoutCheck();
+
+ if (isOperandConstantImmediateInt(op1)) {
+ emitLoad(op2, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op1).asInt32())), target);
+ return;
+ }
+
+ if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThanOrEqual, regT0, regT2), target);
+}
+
+void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_loop_if_lesseq);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
+}
+
+void JIT::emit_op_new_object(Instruction* currentInstruction)
+{
+ JITStubCall(this, cti_op_new_object).call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_check_has_instance(Instruction* currentInstruction)
+{
+ unsigned baseVal = currentInstruction[1].u.operand;
+
+ emitLoadPayload(baseVal, regT0);
+
+ // Check that baseVal is a cell.
+ emitJumpSlowCaseIfNotJSCell(baseVal);
+
+ // Check that baseVal 'ImplementsHasInstance'.
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT0);
+ addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsHasInstance)));
+}
+
+void JIT::emit_op_instanceof(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned value = currentInstruction[2].u.operand;
+ unsigned baseVal = currentInstruction[3].u.operand;
+ unsigned proto = currentInstruction[4].u.operand;
+
+ // Load the operands into registers.
+ // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
+ emitLoadPayload(value, regT2);
+ emitLoadPayload(baseVal, regT0);
+ emitLoadPayload(proto, regT1);
+
+ // Check that proto are cells. baseVal must be a cell - this is checked by op_check_has_instance.
+ emitJumpSlowCaseIfNotJSCell(value);
+ emitJumpSlowCaseIfNotJSCell(proto);
+
+ // Check that prototype is an object
+ loadPtr(Address(regT1, JSCell::structureOffset()), regT3);
+ addSlowCase(branch8(NotEqual, Address(regT3, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType)));
+
+ // Fixme: this check is only needed because the JSC API allows HasInstance to be overridden; we should deprecate this.
+ // Check that baseVal 'ImplementsDefaultHasInstance'.
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT0);
+ addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance)));
+
+ // Optimistically load the result true, and start looping.
+ // Initially, regT1 still contains proto and regT2 still contains value.
+ // As we loop regT2 will be updated with its prototype, recursively walking the prototype chain.
+ move(TrustedImm32(1), regT0);
+ Label loop(this);
+
+ // Load the prototype of the cell in regT2. If this is equal to regT1 - WIN!
+ // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
+ loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
+ load32(Address(regT2, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
+ Jump isInstance = branchPtr(Equal, regT2, regT1);
+ branchTest32(NonZero, regT2).linkTo(loop, this);
+
+ // We get here either by dropping out of the loop, or if value was not an Object. Result is false.
+ move(TrustedImm32(0), regT0);
+
+ // isInstance jumps right down to here, to skip setting the result to false (it has already set true).
+ isInstance.link(this);
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emitSlow_op_check_has_instance(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned baseVal = currentInstruction[1].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, baseVal);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_check_has_instance);
+ stubCall.addArgument(baseVal);
+ stubCall.call();
+}
+
+void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned value = currentInstruction[2].u.operand;
+ unsigned baseVal = currentInstruction[3].u.operand;
+ unsigned proto = currentInstruction[4].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, value);
+ linkSlowCaseIfNotJSCell(iter, proto);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_instanceof);
+ stubCall.addArgument(value);
+ stubCall.addArgument(baseVal);
+ stubCall.addArgument(proto);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_get_global_var(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ JSGlobalObject* globalObject = m_codeBlock->globalObject();
+ ASSERT(globalObject->isGlobalObject());
+ int index = currentInstruction[2].u.operand;
+
+ loadPtr(&globalObject->m_registers, regT2);
+
+ emitLoad(index, regT1, regT0, regT2);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_get_global_var), dst, regT1, regT0);
+}
+
+void JIT::emit_op_put_global_var(Instruction* currentInstruction)
+{
+ JSGlobalObject* globalObject = m_codeBlock->globalObject();
+ ASSERT(globalObject->isGlobalObject());
+ int index = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
+
+ emitLoad(value, regT1, regT0);
+
+ loadPtr(&globalObject->m_registers, regT2);
+ emitStore(index, regT1, regT0, regT2);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_put_global_var), value, regT1, regT0);
+}
+
+void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int index = currentInstruction[2].u.operand;
+ int skip = currentInstruction[3].u.operand;
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
+ bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ Jump activationNotCreated;
+ if (checkTopLevel)
+ activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), TrustedImm32(JSValue::EmptyValueTag));
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
+ activationNotCreated.link(this);
+ }
+ while (skip--)
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
+
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject, m_registers)), regT2);
+
+ emitLoad(index, regT1, regT0, regT2);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_get_scoped_var), dst, regT1, regT0);
+}
+
+void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
+{
+ int index = currentInstruction[1].u.operand;
+ int skip = currentInstruction[2].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ emitLoad(value, regT1, regT0);
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
+ bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ Jump activationNotCreated;
+ if (checkTopLevel)
+ activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), TrustedImm32(JSValue::EmptyValueTag));
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
+ activationNotCreated.link(this);
+ }
+ while (skip--)
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
+
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject, m_registers)), regT2);
+
+ emitStore(index, regT1, regT0, regT2);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_put_scoped_var), value, regT1, regT0);
+}
+
+void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
+{
+ unsigned activation = currentInstruction[1].u.operand;
+ unsigned arguments = currentInstruction[2].u.operand;
+ Jump activationCreated = branch32(NotEqual, tagFor(activation), TrustedImm32(JSValue::EmptyValueTag));
+ Jump argumentsNotCreated = branch32(Equal, tagFor(arguments), TrustedImm32(JSValue::EmptyValueTag));
+ activationCreated.link(this);
+ JITStubCall stubCall(this, cti_op_tear_off_activation);
+ stubCall.addArgument(currentInstruction[1].u.operand);
+ stubCall.addArgument(unmodifiedArgumentsRegister(currentInstruction[2].u.operand));
+ stubCall.call();
+ argumentsNotCreated.link(this);
+}
+
+void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+
+ Jump argsNotCreated = branch32(Equal, tagFor(unmodifiedArgumentsRegister(dst)), TrustedImm32(JSValue::EmptyValueTag));
+ JITStubCall stubCall(this, cti_op_tear_off_arguments);
+ stubCall.addArgument(unmodifiedArgumentsRegister(dst));
+ stubCall.call();
+ argsNotCreated.link(this);
+}
+
+void JIT::emit_op_new_array(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_new_array);
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_to_primitive(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump isImm = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
+ addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr)));
+ isImm.link(this);
+
+ if (dst != src)
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_to_primitive), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_to_primitive);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_strcat(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_strcat);
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_base(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, currentInstruction[3].u.operand ? cti_op_resolve_base_strict_put : cti_op_resolve_base);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_ensure_property_exists(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_ensure_property_exists);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve_skip);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool dynamic)
+{
+ // FIXME: Optimize to use patching instead of so many memory accesses.
+
+ unsigned dst = currentInstruction[1].u.operand;
+ void* globalObject = m_codeBlock->globalObject();
+
+ unsigned currentIndex = m_globalResolveInfoIndex++;
+ void* structureAddress = &(m_codeBlock->globalResolveInfo(currentIndex).structure);
+ void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset);
+
+ // Verify structure.
+ move(TrustedImmPtr(globalObject), regT0);
+ loadPtr(structureAddress, regT1);
+ addSlowCase(branchPtr(NotEqual, regT1, Address(regT0, JSCell::structureOffset())));
+
+ // Load property.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSGlobalObject, m_propertyStorage)), regT2);
+ load32(offsetAddr, regT3);
+ load32(BaseIndex(regT2, regT3, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
+ load32(BaseIndex(regT2, regT3, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + dynamic ? OPCODE_LENGTH(op_resolve_global_dynamic) : OPCODE_LENGTH(op_resolve_global), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ Identifier* ident = &m_codeBlock->identifier(currentInstruction[2].u.operand);
+
+ unsigned currentIndex = m_globalResolveInfoIndex++;
+
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_resolve_global);
+ stubCall.addArgument(TrustedImmPtr(ident));
+ stubCall.addArgument(Imm32(currentIndex));
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_not(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoadTag(src, regT0);
+
+ emitLoad(src, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::BooleanTag)));
+ xor32(TrustedImm32(1), regT0);
+
+ emitStoreBool(dst, regT0, (dst == src));
+}
+
+void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_not);
+ stubCall.addArgument(src);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_jfalse(Instruction* currentInstruction)
+{
+ unsigned cond = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitLoad(cond, regT1, regT0);
+
+ ASSERT((JSValue::BooleanTag + 1 == JSValue::Int32Tag) && !(JSValue::Int32Tag + 1));
+ addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::BooleanTag)));
+ addJump(branchTest32(Zero, regT0), target);
+}
+
+void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned cond = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+ // regT1 contains the tag from the hot path.
+ Jump notNumber = branch32(Above, regT1, Imm32(JSValue::LowestTag));
+
+ emitLoadDouble(cond, fpRegT0);
+ emitJumpSlowToHot(branchDoubleZeroOrNaN(fpRegT0, fpRegT1), target);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jfalse));
+
+ notNumber.link(this);
+ }
+
+ JITStubCall stubCall(this, cti_op_jtrue);
+ stubCall.addArgument(cond);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target); // Inverted.
+}
+
+void JIT::emit_op_jtrue(Instruction* currentInstruction)
+{
+ unsigned cond = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitLoad(cond, regT1, regT0);
+
+ ASSERT((JSValue::BooleanTag + 1 == JSValue::Int32Tag) && !(JSValue::Int32Tag + 1));
+ addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::BooleanTag)));
+ addJump(branchTest32(NonZero, regT0), target);
+}
+
+void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned cond = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+ // regT1 contains the tag from the hot path.
+ Jump notNumber = branch32(Above, regT1, Imm32(JSValue::LowestTag));
+
+ emitLoadDouble(cond, fpRegT0);
+ emitJumpSlowToHot(branchDoubleNonZero(fpRegT0, fpRegT1), target);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jtrue));
+
+ notNumber.link(this);
+ }
+
+ JITStubCall stubCall(this, cti_op_jtrue);
+ stubCall.addArgument(cond);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
+}
+
+void JIT::emit_op_jeq_null(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
+
+ // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ addJump(branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
+
+ Jump wasNotImmediate = jump();
+
+ // Now handle the immediate cases - undefined & null
+ isImmediate.link(this);
+
+ ASSERT((JSValue::UndefinedTag + 1 == JSValue::NullTag) && (JSValue::NullTag & 0x1));
+ or32(TrustedImm32(1), regT1);
+ addJump(branch32(Equal, regT1, TrustedImm32(JSValue::NullTag)), target);
+
+ wasNotImmediate.link(this);
+}
+
+void JIT::emit_op_jneq_null(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
+
+ // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ addJump(branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
+
+ Jump wasNotImmediate = jump();
+
+ // Now handle the immediate cases - undefined & null
+ isImmediate.link(this);
+
+ ASSERT((JSValue::UndefinedTag + 1 == JSValue::NullTag) && (JSValue::NullTag & 0x1));
+ or32(TrustedImm32(1), regT1);
+ addJump(branch32(NotEqual, regT1, TrustedImm32(JSValue::NullTag)), target);
+
+ wasNotImmediate.link(this);
+}
+
+void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ JSCell* ptr = currentInstruction[2].u.jsCell.get();
+ unsigned target = currentInstruction[3].u.operand;
+
+ emitLoad(src, regT1, regT0);
+ addJump(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)), target);
+ addJump(branchPtr(NotEqual, regT0, TrustedImmPtr(ptr)), target);
+}
+
+void JIT::emit_op_jsr(Instruction* currentInstruction)
+{
+ int retAddrDst = currentInstruction[1].u.operand;
+ int target = currentInstruction[2].u.operand;
+ DataLabelPtr storeLocation = storePtrWithPatch(TrustedImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst));
+ addJump(jump(), target);
+ m_jsrSites.append(JSRInfo(storeLocation, label()));
+}
+
+void JIT::emit_op_sret(Instruction* currentInstruction)
+{
+ jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand));
+}
+
+void JIT::emit_op_eq(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, regT3));
+ addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::CellTag)));
+ addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::LowestTag)));
+
+ compare32(Equal, regT0, regT2, regT0);
+
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ JumpList storeResult;
+ JumpList genericCase;
+
+ genericCase.append(getSlowCase(iter)); // tags not equal
+
+ linkSlowCase(iter); // tags equal and JSCell
+ genericCase.append(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr)));
+ genericCase.append(branchPtr(NotEqual, Address(regT2), TrustedImmPtr(m_globalData->jsStringVPtr)));
+
+ // String case.
+ JITStubCall stubCallEqStrings(this, cti_op_eq_strings);
+ stubCallEqStrings.addArgument(regT0);
+ stubCallEqStrings.addArgument(regT2);
+ stubCallEqStrings.call();
+ storeResult.append(jump());
+
+ // Generic case.
+ genericCase.append(getSlowCase(iter)); // doubles
+ genericCase.link(this);
+ JITStubCall stubCallEq(this, cti_op_eq);
+ stubCallEq.addArgument(op1);
+ stubCallEq.addArgument(op2);
+ stubCallEq.call(regT0);
+
+ storeResult.link(this);
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emit_op_neq(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, regT3));
+ addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::CellTag)));
+ addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::LowestTag)));
+
+ compare32(NotEqual, regT0, regT2, regT0);
+
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ JumpList storeResult;
+ JumpList genericCase;
+
+ genericCase.append(getSlowCase(iter)); // tags not equal
+
+ linkSlowCase(iter); // tags equal and JSCell
+ genericCase.append(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr)));
+ genericCase.append(branchPtr(NotEqual, Address(regT2), TrustedImmPtr(m_globalData->jsStringVPtr)));
+
+ // String case.
+ JITStubCall stubCallEqStrings(this, cti_op_eq_strings);
+ stubCallEqStrings.addArgument(regT0);
+ stubCallEqStrings.addArgument(regT2);
+ stubCallEqStrings.call(regT0);
+ storeResult.append(jump());
+
+ // Generic case.
+ genericCase.append(getSlowCase(iter)); // doubles
+ genericCase.link(this);
+ JITStubCall stubCallEq(this, cti_op_eq);
+ stubCallEq.addArgument(regT1, regT0);
+ stubCallEq.addArgument(regT3, regT2);
+ stubCallEq.call(regT0);
+
+ storeResult.link(this);
+ xor32(TrustedImm32(0x1), regT0);
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ emitLoadTag(src1, regT0);
+ emitLoadTag(src2, regT1);
+
+ // Jump to a slow case if either operand is double, or if both operands are
+ // cells and/or Int32s.
+ move(regT0, regT2);
+ and32(regT1, regT2);
+ addSlowCase(branch32(Below, regT2, TrustedImm32(JSValue::LowestTag)));
+ addSlowCase(branch32(AboveOrEqual, regT2, TrustedImm32(JSValue::CellTag)));
+
+ if (type == OpStrictEq)
+ compare32(Equal, regT0, regT1, regT0);
+ else
+ compare32(NotEqual, regT0, regT1, regT0);
+
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emit_op_stricteq(Instruction* currentInstruction)
+{
+ compileOpStrictEq(currentInstruction, OpStrictEq);
+}
+
+void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_stricteq);
+ stubCall.addArgument(src1);
+ stubCall.addArgument(src2);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_nstricteq(Instruction* currentInstruction)
+{
+ compileOpStrictEq(currentInstruction, OpNStrictEq);
+}
+
+void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_nstricteq);
+ stubCall.addArgument(src1);
+ stubCall.addArgument(src2);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_eq_null(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+ Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
+
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
+ test8(NonZero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined), regT1);
+
+ Jump wasNotImmediate = jump();
+
+ isImmediate.link(this);
+
+ compare32(Equal, regT1, TrustedImm32(JSValue::NullTag), regT2);
+ compare32(Equal, regT1, TrustedImm32(JSValue::UndefinedTag), regT1);
+ or32(regT2, regT1);
+
+ wasNotImmediate.link(this);
+
+ emitStoreBool(dst, regT1);
+}
+
+void JIT::emit_op_neq_null(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+ Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
+
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
+ test8(Zero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined), regT1);
+
+ Jump wasNotImmediate = jump();
+
+ isImmediate.link(this);
+
+ compare32(NotEqual, regT1, TrustedImm32(JSValue::NullTag), regT2);
+ compare32(NotEqual, regT1, TrustedImm32(JSValue::UndefinedTag), regT1);
+ and32(regT2, regT1);
+
+ wasNotImmediate.link(this);
+
+ emitStoreBool(dst, regT1);
+}
+
+void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve_with_base);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.call(currentInstruction[2].u.operand);
+}
+
+void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_new_func_exp);
+ stubCall.addArgument(TrustedImmPtr(m_codeBlock->functionExpr(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_throw(Instruction* currentInstruction)
+{
+ unsigned exception = currentInstruction[1].u.operand;
+ JITStubCall stubCall(this, cti_op_throw);
+ stubCall.addArgument(exception);
+ stubCall.call();
+
+#ifndef NDEBUG
+ // cti_op_throw always changes it's return address,
+ // this point in the code should never be reached.
+ breakpoint();
+#endif
+}
+
+void JIT::emit_op_get_pnames(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int i = currentInstruction[3].u.operand;
+ int size = currentInstruction[4].u.operand;
+ int breakTarget = currentInstruction[5].u.operand;
+
+ JumpList isNotObject;
+
+ emitLoad(base, regT1, regT0);
+ if (!m_codeBlock->isKnownNotImmediate(base))
+ isNotObject.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+ if (base != m_codeBlock->thisRegister() || m_codeBlock->isStrictMode()) {
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ isNotObject.append(branch8(NotEqual, Address(regT2, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType)));
+ }
+
+ // We could inline the case where you have a valid cache, but
+ // this call doesn't seem to be hot.
+ Label isObject(this);
+ JITStubCall getPnamesStubCall(this, cti_op_get_pnames);
+ getPnamesStubCall.addArgument(regT0);
+ getPnamesStubCall.call(dst);
+ load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3);
+ store32(TrustedImm32(Int32Tag), intTagFor(i));
+ store32(TrustedImm32(0), intPayloadFor(i));
+ store32(TrustedImm32(Int32Tag), intTagFor(size));
+ store32(regT3, payloadFor(size));
+ Jump end = jump();
+
+ isNotObject.link(this);
+ addJump(branch32(Equal, regT1, TrustedImm32(JSValue::NullTag)), breakTarget);
+ addJump(branch32(Equal, regT1, TrustedImm32(JSValue::UndefinedTag)), breakTarget);
+ JITStubCall toObjectStubCall(this, cti_to_object);
+ toObjectStubCall.addArgument(regT1, regT0);
+ toObjectStubCall.call(base);
+ jump().linkTo(isObject, this);
+
+ end.link(this);
+}
+
+void JIT::emit_op_next_pname(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int i = currentInstruction[3].u.operand;
+ int size = currentInstruction[4].u.operand;
+ int it = currentInstruction[5].u.operand;
+ int target = currentInstruction[6].u.operand;
+
+ JumpList callHasProperty;
+
+ Label begin(this);
+ load32(intPayloadFor(i), regT0);
+ Jump end = branch32(Equal, regT0, intPayloadFor(size));
+
+ // Grab key @ i
+ loadPtr(payloadFor(it), regT1);
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2);
+ load32(BaseIndex(regT2, regT0, TimesEight), regT2);
+ store32(TrustedImm32(JSValue::CellTag), tagFor(dst));
+ store32(regT2, payloadFor(dst));
+
+ // Increment i
+ add32(TrustedImm32(1), regT0);
+ store32(regT0, intPayloadFor(i));
+
+ // Verify that i is valid:
+ loadPtr(payloadFor(base), regT0);
+
+ // Test base's structure
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))));
+
+ // Test base's prototype chain
+ loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3);
+ loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3);
+ addJump(branchTestPtr(Zero, Address(regT3)), target);
+
+ Label checkPrototype(this);
+ callHasProperty.append(branch32(Equal, Address(regT2, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::NullTag)));
+ loadPtr(Address(regT2, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
+ loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
+ callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3)));
+ addPtr(TrustedImm32(sizeof(Structure*)), regT3);
+ branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this);
+
+ // Continue loop.
+ addJump(jump(), target);
+
+ // Slow case: Ask the object if i is valid.
+ callHasProperty.link(this);
+ loadPtr(addressFor(dst), regT1);
+ JITStubCall stubCall(this, cti_has_property);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+
+ // Test for valid key.
+ addJump(branchTest32(NonZero, regT0), target);
+ jump().linkTo(begin, this);
+
+ // End of loop.
+ end.link(this);
+}
+
+void JIT::emit_op_push_scope(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_push_scope);
+ stubCall.addArgument(currentInstruction[1].u.operand);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_pop_scope(Instruction*)
+{
+ JITStubCall(this, cti_op_pop_scope).call();
+}
+
+void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump isInt32 = branch32(Equal, regT1, TrustedImm32(JSValue::Int32Tag));
+ addSlowCase(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::EmptyValueTag)));
+ isInt32.link(this);
+
+ if (src != dst)
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_to_jsnumber), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_to_jsnumber);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_push_new_scope(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_push_new_scope);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(currentInstruction[3].u.operand);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_catch(Instruction* currentInstruction)
+{
+ // cti_op_throw returns the callFrame for the handler.
+ move(regT0, callFrameRegister);
+
+ // Now store the exception returned by cti_op_throw.
+ loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(struct JITStackFrame, globalData)), regT3);
+ load32(Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
+ load32(Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
+ store32(TrustedImm32(JSValue().payload()), Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ store32(TrustedImm32(JSValue().tag()), Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+
+ unsigned exception = currentInstruction[1].u.operand;
+ emitStore(exception, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_catch), exception, regT1, regT0);
+}
+
+void JIT::emit_op_jmp_scopes(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_jmp_scopes);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.call();
+ addJump(jump(), currentInstruction[2].u.operand);
+}
+
+void JIT::emit_op_switch_imm(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate));
+ jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+
+ JITStubCall stubCall(this, cti_op_switch_imm);
+ stubCall.addArgument(scrutinee);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_switch_char(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character));
+ jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+
+ JITStubCall stubCall(this, cti_op_switch_char);
+ stubCall.addArgument(scrutinee);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_switch_string(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset));
+
+ JITStubCall stubCall(this, cti_op_switch_string);
+ stubCall.addArgument(scrutinee);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_throw_reference_error(Instruction* currentInstruction)
+{
+ unsigned message = currentInstruction[1].u.operand;
+
+ JITStubCall stubCall(this, cti_op_throw_reference_error);
+ stubCall.addArgument(m_codeBlock->getConstant(message));
+ stubCall.call();
+}
+
+void JIT::emit_op_debug(Instruction* currentInstruction)
+{
+#if ENABLE(DEBUG_WITH_BREAKPOINT)
+ UNUSED_PARAM(currentInstruction);
+ breakpoint();
+#else
+ JITStubCall stubCall(this, cti_op_debug);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call();
+#endif
+}
+
+
+void JIT::emit_op_enter(Instruction*)
+{
+ // Even though JIT code doesn't use them, we initialize our constant
+ // registers to zap stale pointers, to avoid unnecessarily prolonging
+ // object lifetime and increasing GC pressure.
+ for (int i = 0; i < m_codeBlock->m_numVars; ++i)
+ emitStore(i, jsUndefined());
+}
+
+void JIT::emit_op_create_activation(Instruction* currentInstruction)
+{
+ unsigned activation = currentInstruction[1].u.operand;
+
+ Jump activationCreated = branch32(NotEqual, tagFor(activation), TrustedImm32(JSValue::EmptyValueTag));
+ JITStubCall(this, cti_op_push_activation).call(activation);
+ activationCreated.link(this);
+}
+
+void JIT::emit_op_create_arguments(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ Jump argsCreated = branch32(NotEqual, tagFor(dst), TrustedImm32(JSValue::EmptyValueTag));
+
+ if (m_codeBlock->m_numParameters == 1)
+ JITStubCall(this, cti_op_create_arguments_no_params).call();
+ else
+ JITStubCall(this, cti_op_create_arguments).call();
+
+ emitStore(dst, regT1, regT0);
+ emitStore(unmodifiedArgumentsRegister(dst), regT1, regT0);
+
+ argsCreated.link(this);
+}
+
+void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ emitStore(dst, JSValue());
+}
+
+void JIT::emit_op_get_callee(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT0);
+ emitStoreCell(dst, regT0);
+}
+
+void JIT::emit_op_create_this(Instruction* currentInstruction)
+{
+ unsigned protoRegister = currentInstruction[2].u.operand;
+ emitLoad(protoRegister, regT1, regT0);
+ JITStubCall stubCall(this, cti_op_create_this);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_convert_this(Instruction* currentInstruction)
+{
+ unsigned thisRegister = currentInstruction[1].u.operand;
+
+ emitLoad(thisRegister, regT1, regT0);
+
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ addSlowCase(branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(NeedsThisConversion)));
+
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_convert_this), thisRegister, regT1, regT0);
+}
+
+void JIT::emit_op_convert_this_strict(Instruction* currentInstruction)
+{
+ unsigned thisRegister = currentInstruction[1].u.operand;
+
+ emitLoad(thisRegister, regT1, regT0);
+
+ Jump notNull = branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag));
+ emitStore(thisRegister, jsNull());
+ Jump setThis = jump();
+ notNull.link(this);
+ Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ Jump notAnObject = branch8(NotEqual, Address(regT2, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType));
+ addSlowCase(branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(NeedsThisConversion)));
+ isImmediate.link(this);
+ notAnObject.link(this);
+ setThis.link(this);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_convert_this_strict), thisRegister, regT1, regT0);
+}
+
+void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned thisRegister = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_convert_this);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(thisRegister);
+}
+
+void JIT::emitSlow_op_convert_this_strict(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned thisRegister = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_convert_this_strict);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(thisRegister);
+}
+
+void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
+{
+ peek(regT2, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof(void*));
+ Jump noProfiler = branchTestPtr(Zero, Address(regT2));
+
+ JITStubCall stubCall(this, cti_op_profile_will_call);
+ stubCall.addArgument(currentInstruction[1].u.operand);
+ stubCall.call();
+ noProfiler.link(this);
+}
+
+void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
+{
+ peek(regT2, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof(void*));
+ Jump noProfiler = branchTestPtr(Zero, Address(regT2));
+
+ JITStubCall stubCall(this, cti_op_profile_did_call);
+ stubCall.addArgument(currentInstruction[1].u.operand);
+ stubCall.call();
+ noProfiler.link(this);
+}
+
+void JIT::emit_op_get_arguments_length(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int argumentsRegister = currentInstruction[2].u.operand;
+ addSlowCase(branch32(NotEqual, tagFor(argumentsRegister), TrustedImm32(JSValue::EmptyValueTag)));
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
+ sub32(TrustedImm32(1), regT0);
+ emitStoreInt32(dst, regT0);
+}
+
+void JIT::emitSlow_op_get_arguments_length(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int ident = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_get_by_id_generic);
+ stubCall.addArgument(base);
+ stubCall.addArgument(TrustedImmPtr(&(m_codeBlock->identifier(ident))));
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int argumentsRegister = currentInstruction[2].u.operand;
+ int property = currentInstruction[3].u.operand;
+ addSlowCase(branch32(NotEqual, tagFor(argumentsRegister), TrustedImm32(JSValue::EmptyValueTag)));
+ emitLoad(property, regT1, regT2);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ add32(TrustedImm32(1), regT2);
+ // regT2 now contains the integer index of the argument we want, including this
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT3);
+ addSlowCase(branch32(AboveOrEqual, regT2, regT3));
+
+ Jump skipOutofLineParams;
+ int numArgs = m_codeBlock->m_numParameters;
+ if (numArgs) {
+ Jump notInInPlaceArgs = branch32(AboveOrEqual, regT2, Imm32(numArgs));
+ addPtr(Imm32(static_cast<unsigned>(-(RegisterFile::CallFrameHeaderSize + numArgs) * sizeof(Register))), callFrameRegister, regT1);
+ loadPtr(BaseIndex(regT1, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
+ loadPtr(BaseIndex(regT1, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
+ skipOutofLineParams = jump();
+ notInInPlaceArgs.link(this);
+ }
+
+ addPtr(Imm32(static_cast<unsigned>(-(RegisterFile::CallFrameHeaderSize + numArgs) * sizeof(Register))), callFrameRegister, regT1);
+ mul32(TrustedImm32(sizeof(Register)), regT3, regT3);
+ subPtr(regT3, regT1);
+ loadPtr(BaseIndex(regT1, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
+ loadPtr(BaseIndex(regT1, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
+ if (numArgs)
+ skipOutofLineParams.link(this);
+ emitStore(dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned arguments = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter);
+ Jump skipArgumentsCreation = jump();
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ if (m_codeBlock->m_numParameters == 1)
+ JITStubCall(this, cti_op_create_arguments_no_params).call();
+ else
+ JITStubCall(this, cti_op_create_arguments).call();
+
+ emitStore(arguments, regT1, regT0);
+ emitStore(unmodifiedArgumentsRegister(arguments), regT1, regT0);
+
+ skipArgumentsCreation.link(this);
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(arguments);
+ stubCall.addArgument(property);
+ stubCall.call(dst);
+}
+
+#if ENABLE(JIT_USE_SOFT_MODULO)
+void JIT::softModulo()
+{
+ push(regT1);
+ push(regT3);
+ move(regT2, regT3);
+ move(regT0, regT2);
+ move(TrustedImm32(0), regT1);
+
+ // Check for negative result reminder
+ Jump positiveRegT3 = branch32(GreaterThanOrEqual, regT3, TrustedImm32(0));
+ neg32(regT3);
+ xor32(TrustedImm32(1), regT1);
+ positiveRegT3.link(this);
+
+ Jump positiveRegT2 = branch32(GreaterThanOrEqual, regT2, TrustedImm32(0));
+ neg32(regT2);
+ xor32(TrustedImm32(2), regT1);
+ positiveRegT2.link(this);
+
+ // Save the condition for negative reminder
+ push(regT1);
+
+ Jump exitBranch = branch32(LessThan, regT2, regT3);
+
+ // Power of two fast case
+ move(regT3, regT0);
+ sub32(TrustedImm32(1), regT0);
+ Jump powerOfTwo = branchTest32(NonZero, regT0, regT3);
+ and32(regT0, regT2);
+ powerOfTwo.link(this);
+
+ and32(regT3, regT0);
+
+ Jump exitBranch2 = branchTest32(Zero, regT0);
+
+ countLeadingZeros32(regT2, regT0);
+ countLeadingZeros32(regT3, regT1);
+ sub32(regT0, regT1);
+
+ Jump useFullTable = branch32(Equal, regT1, TrustedImm32(31));
+
+ neg32(regT1);
+ add32(TrustedImm32(31), regT1);
+
+ int elementSizeByShift = -1;
+#if CPU(ARM)
+ elementSizeByShift = 3;
+#else
+#error "JIT_OPTIMIZE_MOD not yet supported on this platform."
+#endif
+ relativeTableJump(regT1, elementSizeByShift);
+
+ useFullTable.link(this);
+ // Modulo table
+ for (int i = 31; i > 0; --i) {
+#if CPU(ARM_TRADITIONAL)
+ m_assembler.cmp_r(regT2, m_assembler.lsl(regT3, i));
+ m_assembler.sub_r(regT2, regT2, m_assembler.lsl(regT3, i), ARMAssembler::CS);
+#elif CPU(ARM_THUMB2)
+ ShiftTypeAndAmount shift(SRType_LSL, i);
+ m_assembler.sub_S(regT1, regT2, regT3, shift);
+ m_assembler.it(ARMv7Assembler::ConditionCS);
+ m_assembler.mov(regT2, regT1);
+#else
+#error "JIT_OPTIMIZE_MOD not yet supported on this platform."
+#endif
+ }
+
+ Jump lower = branch32(Below, regT2, regT3);
+ sub32(regT3, regT2);
+ lower.link(this);
+
+ exitBranch.link(this);
+ exitBranch2.link(this);
+
+ // Check for negative reminder
+ pop(regT1);
+ Jump positiveResult = branch32(Equal, regT1, TrustedImm32(0));
+ neg32(regT2);
+ positiveResult.link(this);
+
+ move(regT2, regT0);
+
+ pop(regT3);
+ pop(regT1);
+ ret();
+}
+#endif // ENABLE(JIT_USE_SOFT_MODULO)
+
+} // namespace JSC
+
+#endif // USE(JSVALUE32_64)
+#endif // ENABLE(JIT)
diff --git a/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITPropertyAccess.cpp b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
new file mode 100644
index 0000000..32e42af
--- /dev/null
+++ b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -0,0 +1,1038 @@
+/*
+ * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(JIT)
+#include "JIT.h"
+
+#include "CodeBlock.h"
+#include "GetterSetter.h"
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JSArray.h"
+#include "JSFunction.h"
+#include "JSPropertyNameIterator.h"
+#include "Interpreter.h"
+#include "LinkBuffer.h"
+#include "RepatchBuffer.h"
+#include "ResultType.h"
+#include "SamplingTool.h"
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+using namespace std;
+
+namespace JSC {
+#if USE(JSVALUE64)
+
+JIT::CodePtr JIT::stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+{
+ JSInterfaceJIT jit;
+ JumpList failures;
+ failures.append(jit.branchPtr(NotEqual, Address(regT0), TrustedImmPtr(globalData->jsStringVPtr)));
+ failures.append(jit.branchTest32(NonZero, Address(regT0, OBJECT_OFFSETOF(JSString, m_fiberCount))));
+
+ // Load string length to regT1, and start the process of loading the data pointer into regT0
+ jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT2);
+ jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0);
+ jit.loadPtr(Address(regT0, ThunkHelpers::stringImplDataOffset()), regT0);
+
+ // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
+ failures.append(jit.branch32(AboveOrEqual, regT1, regT2));
+
+ // Load the character
+ jit.load16(BaseIndex(regT0, regT1, TimesTwo, 0), regT0);
+
+ failures.append(jit.branch32(AboveOrEqual, regT0, TrustedImm32(0x100)));
+ jit.move(TrustedImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
+ jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
+ jit.ret();
+
+ failures.link(&jit);
+ jit.move(TrustedImm32(0), regT0);
+ jit.ret();
+
+ LinkBuffer patchBuffer(&jit, pool);
+ return patchBuffer.finalizeCode().m_code;
+}
+
+void JIT::emit_op_get_by_val(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegisters(base, regT0, property, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+
+ // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
+ // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
+ // number was signed since m_vectorLength is always less than intmax (since the total allocation
+ // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value
+ // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
+ // extending since it makes it easier to re-tag the value in the slow case.
+ zeroExtend32ToPtr(regT1, regT1);
+
+ emitJumpSlowCaseIfNotJSCell(regT0, base);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr)));
+
+ loadPtr(Address(regT0, JSArray::storageOffset()), regT2);
+ addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, JSArray::vectorLengthOffset())));
+
+ loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0);
+ addSlowCase(branchTestPtr(Zero, regT0));
+
+ emitPutVirtualRegister(dst);
+}
+
+void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter); // property int32 check
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ Jump nonCell = jump();
+ linkSlowCase(iter); // base array check
+ Jump notString = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr));
+ emitNakedCall(m_globalData->getCTIStub(stringGetByValStubGenerator));
+ Jump failed = branchTestPtr(Zero, regT0);
+ emitPutVirtualRegister(dst, regT0);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
+ failed.link(this);
+ notString.link(this);
+ nonCell.link(this);
+
+ linkSlowCase(iter); // vector length check
+ linkSlowCase(iter); // empty value
+
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(base, regT2);
+ stubCall.addArgument(property, regT2);
+ stubCall.call(dst);
+}
+
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch)
+{
+ loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_propertyStorage)), scratch);
+ loadPtr(BaseIndex(scratch, offset, ScalePtr, 0), result);
+}
+
+void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+ unsigned expected = currentInstruction[4].u.operand;
+ unsigned iter = currentInstruction[5].u.operand;
+ unsigned i = currentInstruction[6].u.operand;
+
+ emitGetVirtualRegister(property, regT0);
+ addSlowCase(branchPtr(NotEqual, regT0, addressFor(expected)));
+ emitGetVirtualRegisters(base, regT0, iter, regT1);
+ emitJumpSlowCaseIfNotJSCell(regT0, base);
+
+ // Test base's structure
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ addSlowCase(branchPtr(NotEqual, regT2, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
+ load32(addressFor(i), regT3);
+ sub32(TrustedImm32(1), regT3);
+ addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
+ compileGetDirectOffset(regT0, regT0, regT3, regT1);
+
+ emitPutVirtualRegister(dst, regT0);
+}
+
+void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCaseIfNotJSCell(iter, base);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(base, regT2);
+ stubCall.addArgument(property, regT2);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_put_by_val(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegisters(base, regT0, property, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+ // See comment in op_get_by_val.
+ zeroExtend32ToPtr(regT1, regT1);
+ emitJumpSlowCaseIfNotJSCell(regT0, base);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr)));
+ addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, JSArray::vectorLengthOffset())));
+
+ loadPtr(Address(regT0, JSArray::storageOffset()), regT2);
+ Jump empty = branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+
+ Label storeResult(this);
+ emitGetVirtualRegister(value, regT0);
+ storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+ Jump end = jump();
+
+ empty.link(this);
+ add32(TrustedImm32(1), Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
+ branch32(Below, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
+
+ move(regT1, regT0);
+ add32(TrustedImm32(1), regT0);
+ store32(regT0, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)));
+ jump().linkTo(storeResult, this);
+
+ end.link(this);
+}
+
+void JIT::emit_op_put_by_index(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_put_by_index);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+ stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+ stubCall.call();
+}
+
+void JIT::emit_op_put_getter(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_put_getter);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+ stubCall.call();
+}
+
+void JIT::emit_op_put_setter(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_put_setter);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+ stubCall.call();
+}
+
+void JIT::emit_op_del_by_id(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_del_by_id);
+ stubCall.addArgument(currentInstruction[2].u.operand, regT2);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+
+#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
+
+// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
+void JIT::emit_op_method_check(Instruction*) {}
+void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
+#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
+#error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
+#endif
+
+void JIT::emit_op_get_by_id(Instruction* currentInstruction)
+{
+ unsigned resultVReg = currentInstruction[1].u.operand;
+ unsigned baseVReg = currentInstruction[2].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ emitGetVirtualRegister(baseVReg, regT0);
+ JITStubCall stubCall(this, cti_op_get_by_id_generic);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(ident));
+ stubCall.call(resultVReg);
+
+ m_propertyAccessInstructionIndex++;
+}
+
+void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ ASSERT_NOT_REACHED();
+}
+
+void JIT::emit_op_put_by_id(Instruction* currentInstruction)
+{
+ unsigned baseVReg = currentInstruction[1].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
+ unsigned valueVReg = currentInstruction[3].u.operand;
+ unsigned direct = currentInstruction[8].u.operand;
+
+ emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
+
+ JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct_generic, cti_op_put_by_id_generic);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(ident));
+ stubCall.addArgument(regT1);
+ stubCall.call();
+
+ m_propertyAccessInstructionIndex++;
+}
+
+void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ ASSERT_NOT_REACHED();
+}
+
+#else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
+
+#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
+
+void JIT::emit_op_method_check(Instruction* currentInstruction)
+{
+ // Assert that the following instruction is a get_by_id.
+ ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
+
+ currentInstruction += OPCODE_LENGTH(op_method_check);
+ unsigned resultVReg = currentInstruction[1].u.operand;
+ unsigned baseVReg = currentInstruction[2].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ emitGetVirtualRegister(baseVReg, regT0);
+
+ // Do the method check - check the object & its prototype's structure inline (this is the common case).
+ m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
+ MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
+
+ Jump notCell = emitJumpIfNotJSCell(regT0);
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
+
+ Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), info.structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(TrustedImmPtr(0), regT1);
+ Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT1, JSCell::structureOffset()), protoStructureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+
+ // This will be relinked to load the function without doing a load.
+ DataLabelPtr putFunction = moveWithPatch(TrustedImmPtr(0), regT0);
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
+
+ Jump match = jump();
+
+ ASSERT_JIT_OFFSET_UNUSED(protoObj, differenceBetween(info.structureToCompare, protoObj), patchOffsetMethodCheckProtoObj);
+ ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoStructureToCompare), patchOffsetMethodCheckProtoStruct);
+ ASSERT_JIT_OFFSET_UNUSED(putFunction, differenceBetween(info.structureToCompare, putFunction), patchOffsetMethodCheckPutFunction);
+
+ // Link the failure cases here.
+ notCell.link(this);
+ structureCheck.link(this);
+ protoStructureCheck.link(this);
+
+ // Do a regular(ish) get_by_id (the slow case will be link to
+ // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
+ compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++);
+
+ match.link(this);
+ emitPutVirtualRegister(resultVReg);
+
+ // We've already generated the following get_by_id, so make sure it's skipped over.
+ m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
+}
+
+void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ currentInstruction += OPCODE_LENGTH(op_method_check);
+ unsigned resultVReg = currentInstruction[1].u.operand;
+ unsigned baseVReg = currentInstruction[2].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, true);
+
+ // We've already generated the following get_by_id, so make sure it's skipped over.
+ m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
+}
+
+#else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
+
+// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
+void JIT::emit_op_method_check(Instruction*) {}
+void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
+
+#endif
+
+void JIT::emit_op_get_by_id(Instruction* currentInstruction)
+{
+ unsigned resultVReg = currentInstruction[1].u.operand;
+ unsigned baseVReg = currentInstruction[2].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ emitGetVirtualRegister(baseVReg, regT0);
+ compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++);
+ emitPutVirtualRegister(resultVReg);
+}
+
+void JIT::compileGetByIdHotPath(int, int baseVReg, Identifier*, unsigned propertyAccessInstructionIndex)
+{
+ // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
+ // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
+ // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
+ // to jump back to if one of these trampolies finds a match.
+
+ emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
+
+ Label hotPathBegin(this);
+ m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
+
+ DataLabelPtr structureToCompare;
+ Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ addSlowCase(structureCheck);
+ ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetGetByIdStructure);
+ ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureCheck), patchOffsetGetByIdBranchToSlowCase)
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSObject, m_propertyStorage)), regT0);
+ DataLabel32 displacementLabel = loadPtrWithAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
+ ASSERT_JIT_OFFSET_UNUSED(displacementLabel, differenceBetween(hotPathBegin, displacementLabel), patchOffsetGetByIdPropertyMapOffset);
+
+ Label putResult(this);
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
+
+ ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, putResult), patchOffsetGetByIdPutResult);
+}
+
+void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned resultVReg = currentInstruction[1].u.operand;
+ unsigned baseVReg = currentInstruction[2].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, false);
+}
+
+void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
+{
+ // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
+ // so that we only need track one pointer into the slow case code - we track a pointer to the location
+ // of the call (which we can use to look up the patch information), but should a array-length or
+ // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
+ // the distance from the call to the head of the slow case.
+
+ linkSlowCaseIfNotJSCell(iter, baseVReg);
+ linkSlowCase(iter);
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
+
+#ifndef NDEBUG
+ Label coldPathBegin(this);
+#endif
+ JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(ident));
+ Call call = stubCall.call(resultVReg);
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
+
+ ASSERT_JIT_OFFSET(differenceBetween(coldPathBegin, call), patchOffsetGetByIdSlowCaseCall);
+
+ // Track the location of the call; this will be used to recover patch information.
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
+ m_propertyAccessInstructionIndex++;
+}
+
+void JIT::emit_op_put_by_id(Instruction* currentInstruction)
+{
+ unsigned baseVReg = currentInstruction[1].u.operand;
+ unsigned valueVReg = currentInstruction[3].u.operand;
+
+ unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
+
+ // In order to be able to patch both the Structure, and the object offset, we store one pointer,
+ // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
+ // such that the Structure & offset are always at the same distance from this.
+
+ emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
+
+ // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
+ emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
+
+ Label hotPathBegin(this);
+ m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
+
+ // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
+ DataLabelPtr structureToCompare;
+ addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
+ ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetPutByIdStructure);
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSObject, m_propertyStorage)), regT0);
+ DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT0, patchGetByIdDefaultOffset));
+
+ END_UNINTERRUPTED_SEQUENCE(sequencePutById);
+
+ ASSERT_JIT_OFFSET_UNUSED(displacementLabel, differenceBetween(hotPathBegin, displacementLabel), patchOffsetPutByIdPropertyMapOffset);
+}
+
+void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned baseVReg = currentInstruction[1].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
+ unsigned direct = currentInstruction[8].u.operand;
+
+ unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
+
+ linkSlowCaseIfNotJSCell(iter, baseVReg);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(ident));
+ stubCall.addArgument(regT1);
+ Call call = stubCall.call();
+
+ // Track the location of the call; this will be used to recover patch information.
+ m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
+}
+
+// Compile a store into an object's property storage. May overwrite the
+// value in objectReg.
+void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset)
+{
+ int offset = cachedOffset * sizeof(JSValue);
+ if (structure->isUsingInlineStorage())
+ offset += JSObject::offsetOfInlineStorage();
+ else
+ loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_propertyStorage)), base);
+ storePtr(value, Address(base, offset));
+}
+
+// Compile a load from an object's property storage. May overwrite base.
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset)
+{
+ int offset = cachedOffset * sizeof(JSValue);
+ if (structure->isUsingInlineStorage()) {
+ offset += JSObject::offsetOfInlineStorage();
+ loadPtr(Address(base, offset), result);
+ } else {
+ loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_propertyStorage)), result);
+ loadPtr(Address(result, offset), result);
+ }
+}
+
+void JIT::compileGetDirectOffset(JSObject* base, RegisterID result, size_t cachedOffset)
+{
+ loadPtr(static_cast<void*>(&base->m_propertyStorage[cachedOffset]), result);
+}
+
+void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
+{
+ JumpList failureCases;
+ // Check eax is an object of the right Structure.
+ failureCases.append(emitJumpIfNotJSCell(regT0));
+ failureCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(oldStructure)));
+ testPrototype(oldStructure->storedPrototype(), failureCases);
+
+ // ecx = baseObject->m_structure
+ if (!direct) {
+ for (WriteBarrier<Structure>* it = chain->head(); *it; ++it)
+ testPrototype((*it)->storedPrototype(), failureCases);
+ }
+
+ Call callTarget;
+
+ // emit a call only if storage realloc is needed
+ bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
+ if (willNeedStorageRealloc) {
+ // This trampoline was called to like a JIT stub; before we can can call again we need to
+ // remove the return address from the stack, to prevent the stack from becoming misaligned.
+ preserveReturnAddressAfterCall(regT3);
+
+ JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
+ stubCall.skipArgument(); // base
+ stubCall.skipArgument(); // ident
+ stubCall.skipArgument(); // value
+ stubCall.addArgument(TrustedImm32(oldStructure->propertyStorageCapacity()));
+ stubCall.addArgument(TrustedImm32(newStructure->propertyStorageCapacity()));
+ stubCall.call(regT0);
+ emitGetJITStubArg(2, regT1);
+
+ restoreReturnAddressBeforeReturn(regT3);
+ }
+
+ storePtrWithWriteBarrier(TrustedImmPtr(newStructure), regT0, Address(regT0, JSCell::structureOffset()));
+
+ // write the value
+ compilePutDirectOffset(regT0, regT1, newStructure, cachedOffset);
+
+ ret();
+
+ ASSERT(!failureCases.empty());
+ failureCases.link(this);
+ restoreArgumentReferenceForTrampoline();
+ Call failureCall = tailRecursiveCall();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
+
+ if (willNeedStorageRealloc) {
+ ASSERT(m_calls.size() == 1);
+ patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
+ }
+
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
+}
+
+void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
+{
+ RepatchBuffer repatchBuffer(codeBlock);
+
+ // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
+ // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
+
+ int offset = sizeof(JSValue) * cachedOffset;
+
+ // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset), offset);
+}
+
+void JIT::patchMethodCallProto(JSGlobalData& globalData, CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
+{
+ RepatchBuffer repatchBuffer(codeBlock);
+
+ ASSERT(!methodCallLinkInfo.cachedStructure);
+ methodCallLinkInfo.cachedStructure.set(globalData, codeBlock->ownerExecutable(), structure);
+
+ Structure* prototypeStructure = proto->structure();
+ methodCallLinkInfo.cachedPrototypeStructure.set(globalData, codeBlock->ownerExecutable(), prototypeStructure);
+
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure);
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
+
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id));
+}
+
+void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct)
+{
+ RepatchBuffer repatchBuffer(codeBlock);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
+
+ int offset = sizeof(JSValue) * cachedOffset;
+
+ // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset), offset);
+}
+
+void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
+{
+ StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
+
+ // Check eax is an array
+ Jump failureCases1 = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr));
+
+ // Checks out okay! - get the length from the storage
+ loadPtr(Address(regT0, JSArray::storageOffset()), regT3);
+ load32(Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
+ Jump failureCases2 = branch32(LessThan, regT2, TrustedImm32(0));
+
+ emitFastArithIntToImmNoCheck(regT2, regT0);
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+ patchBuffer.link(failureCases1, slowCaseBegin);
+ patchBuffer.link(failureCases2, slowCaseBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ // Track the stub we have created so that it will be deleted later.
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
+}
+
+void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+{
+ // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
+ // referencing the prototype object - let's speculatively load it's table nice and early!)
+ JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
+
+ // Check eax is an object of the right Structure.
+ Jump failureCases1 = checkStructure(regT0, structure);
+
+ // Check the prototype object's Structure had not changed.
+ move(TrustedImmPtr(protoObject), regT3);
+ Jump failureCases2 = branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure));
+
+ bool needsStubLink = false;
+
+ // Checks out okay!
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(protoObject, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else
+ compileGetDirectOffset(protoObject, regT0, cachedOffset);
+ Jump success = jump();
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+ patchBuffer.link(failureCases1, slowCaseBegin);
+ patchBuffer.link(failureCases2, slowCaseBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
+ // Track the stub we have created so that it will be deleted later.
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
+}
+
+void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
+{
+ Jump failureCase = checkStructure(regT0, structure);
+ bool needsStubLink = false;
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(regT0, regT1, structure, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else
+ compileGetDirectOffset(regT0, regT0, structure, cachedOffset);
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
+ if (!lastProtoBegin)
+ lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+
+ patchBuffer.link(failureCase, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+
+ polymorphicStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), entryLabel, structure);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+}
+
+void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
+{
+ // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
+ // referencing the prototype object - let's speculatively load it's table nice and early!)
+ JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
+
+ // Check eax is an object of the right Structure.
+ Jump failureCases1 = checkStructure(regT0, structure);
+
+ // Check the prototype object's Structure had not changed.
+ move(TrustedImmPtr(protoObject), regT3);
+ Jump failureCases2 = branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure));
+
+ // Checks out okay!
+ bool needsStubLink = false;
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(protoObject, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else
+ compileGetDirectOffset(protoObject, regT0, cachedOffset);
+
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
+ patchBuffer.link(failureCases1, lastProtoBegin);
+ patchBuffer.link(failureCases2, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ prototypeStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), entryLabel, structure, prototypeStructure);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+}
+
+void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
+{
+ ASSERT(count);
+ JumpList bucketsOfFail;
+
+ // Check eax is an object of the right Structure.
+ Jump baseObjectCheck = checkStructure(regT0, structure);
+ bucketsOfFail.append(baseObjectCheck);
+
+ Structure* currStructure = structure;
+ WriteBarrier<Structure>* it = chain->head();
+ JSObject* protoObject = 0;
+ for (unsigned i = 0; i < count; ++i, ++it) {
+ protoObject = asObject(currStructure->prototypeForLookup(callFrame));
+ currStructure = it->get();
+ testPrototype(protoObject, bucketsOfFail);
+ }
+ ASSERT(protoObject);
+
+ bool needsStubLink = false;
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(protoObject, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else
+ compileGetDirectOffset(protoObject, regT0, cachedOffset);
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
+
+ patchBuffer.link(bucketsOfFail, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+
+ // Track the stub we have created so that it will be deleted later.
+ prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), entryLabel, structure, chain);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+}
+
+void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+{
+ ASSERT(count);
+
+ JumpList bucketsOfFail;
+
+ // Check eax is an object of the right Structure.
+ bucketsOfFail.append(checkStructure(regT0, structure));
+
+ Structure* currStructure = structure;
+ WriteBarrier<Structure>* it = chain->head();
+ JSObject* protoObject = 0;
+ for (unsigned i = 0; i < count; ++i, ++it) {
+ protoObject = asObject(currStructure->prototypeForLookup(callFrame));
+ currStructure = it->get();
+ testPrototype(protoObject, bucketsOfFail);
+ }
+ ASSERT(protoObject);
+
+ bool needsStubLink = false;
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(protoObject, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else
+ compileGetDirectOffset(protoObject, regT0, cachedOffset);
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ // Track the stub we have created so that it will be deleted later.
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
+}
+
+/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
+
+#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+#endif // USE(JSVALUE64)
+
+void JIT::testPrototype(JSValue prototype, JumpList& failureCases)
+{
+ if (prototype.isNull())
+ return;
+
+ ASSERT(prototype.isCell());
+ move(TrustedImmPtr(prototype.asCell()), regT3);
+ failureCases.append(branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototype.asCell()->structure())));
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
new file mode 100644
index 0000000..f18e277
--- /dev/null
+++ b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
@@ -0,0 +1,1112 @@
+/*
+ * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(JIT)
+#if USE(JSVALUE32_64)
+#include "JIT.h"
+
+#include "CodeBlock.h"
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JSArray.h"
+#include "JSFunction.h"
+#include "JSPropertyNameIterator.h"
+#include "Interpreter.h"
+#include "LinkBuffer.h"
+#include "RepatchBuffer.h"
+#include "ResultType.h"
+#include "SamplingTool.h"
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+using namespace std;
+
+namespace JSC {
+
+void JIT::emit_op_put_by_index(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_put_by_index);
+ stubCall.addArgument(base);
+ stubCall.addArgument(Imm32(property));
+ stubCall.addArgument(value);
+ stubCall.call();
+}
+
+void JIT::emit_op_put_getter(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned function = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_put_getter);
+ stubCall.addArgument(base);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(property)));
+ stubCall.addArgument(function);
+ stubCall.call();
+}
+
+void JIT::emit_op_put_setter(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned function = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_put_setter);
+ stubCall.addArgument(base);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(property)));
+ stubCall.addArgument(function);
+ stubCall.call();
+}
+
+void JIT::emit_op_del_by_id(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_del_by_id);
+ stubCall.addArgument(base);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(property)));
+ stubCall.call(dst);
+}
+
+
+#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
+
+// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
+void JIT::emit_op_method_check(Instruction*) {}
+void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
+#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
+#error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
+#endif
+
+void JIT::emit_op_get_by_val(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(base);
+ stubCall.addArgument(property);
+ stubCall.call(dst);
+}
+
+void JIT::emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ ASSERT_NOT_REACHED();
+}
+
+void JIT::emit_op_put_by_val(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_put_by_val);
+ stubCall.addArgument(base);
+ stubCall.addArgument(property);
+ stubCall.addArgument(value);
+ stubCall.call();
+}
+
+void JIT::emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ ASSERT_NOT_REACHED();
+}
+
+void JIT::emit_op_get_by_id(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int ident = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_get_by_id_generic);
+ stubCall.addArgument(base);
+ stubCall.addArgument(TrustedImmPtr(&(m_codeBlock->identifier(ident))));
+ stubCall.call(dst);
+
+ m_propertyAccessInstructionIndex++;
+}
+
+void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ m_propertyAccessInstructionIndex++;
+ ASSERT_NOT_REACHED();
+}
+
+void JIT::emit_op_put_by_id(Instruction* currentInstruction)
+{
+ int base = currentInstruction[1].u.operand;
+ int ident = currentInstruction[2].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_put_by_id_generic);
+ stubCall.addArgument(base);
+ stubCall.addArgument(TrustedImmPtr(&(m_codeBlock->identifier(ident))));
+ stubCall.addArgument(value);
+ stubCall.call();
+
+ m_propertyAccessInstructionIndex++;
+}
+
+void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ m_propertyAccessInstructionIndex++;
+ ASSERT_NOT_REACHED();
+}
+
+#else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
+
+#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
+
+void JIT::emit_op_method_check(Instruction* currentInstruction)
+{
+ // Assert that the following instruction is a get_by_id.
+ ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
+
+ currentInstruction += OPCODE_LENGTH(op_method_check);
+
+ // Do the method check - check the object & its prototype's structure inline (this is the common case).
+ m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
+ MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
+
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+
+ emitLoad(base, regT1, regT0);
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
+
+ Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), info.structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(TrustedImmPtr(0), regT2);
+ Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT2, JSCell::structureOffset()), protoStructureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+
+ // This will be relinked to load the function without doing a load.
+ DataLabelPtr putFunction = moveWithPatch(TrustedImmPtr(0), regT0);
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
+
+ move(TrustedImm32(JSValue::CellTag), regT1);
+ Jump match = jump();
+
+ ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoObj), patchOffsetMethodCheckProtoObj);
+ ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoStructureToCompare), patchOffsetMethodCheckProtoStruct);
+ ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, putFunction), patchOffsetMethodCheckPutFunction);
+
+ // Link the failure cases here.
+ structureCheck.link(this);
+ protoStructureCheck.link(this);
+
+ // Do a regular(ish) get_by_id (the slow case will be link to
+ // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
+ compileGetByIdHotPath();
+
+ match.link(this);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_method_check), dst, regT1, regT0);
+
+ // We've already generated the following get_by_id, so make sure it's skipped over.
+ m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
+}
+
+void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ currentInstruction += OPCODE_LENGTH(op_method_check);
+
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int ident = currentInstruction[3].u.operand;
+
+ compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter, true);
+
+ // We've already generated the following get_by_id, so make sure it's skipped over.
+ m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
+}
+
+#else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
+
+// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
+void JIT::emit_op_method_check(Instruction*) {}
+void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
+
+#endif
+
+JIT::CodePtr JIT::stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+{
+ JSInterfaceJIT jit;
+ JumpList failures;
+ failures.append(jit.branchPtr(NotEqual, Address(regT0), TrustedImmPtr(globalData->jsStringVPtr)));
+ failures.append(jit.branchTest32(NonZero, Address(regT0, OBJECT_OFFSETOF(JSString, m_fiberCount))));
+
+ // Load string length to regT1, and start the process of loading the data pointer into regT0
+ jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT1);
+ jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0);
+ jit.loadPtr(Address(regT0, ThunkHelpers::stringImplDataOffset()), regT0);
+
+ // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
+ failures.append(jit.branch32(AboveOrEqual, regT2, regT1));
+
+ // Load the character
+ jit.load16(BaseIndex(regT0, regT2, TimesTwo, 0), regT0);
+
+ failures.append(jit.branch32(AboveOrEqual, regT0, TrustedImm32(0x100)));
+ jit.move(TrustedImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
+ jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
+ jit.move(TrustedImm32(JSValue::CellTag), regT1); // We null check regT0 on return so this is safe
+ jit.ret();
+
+ failures.link(&jit);
+ jit.move(TrustedImm32(0), regT0);
+ jit.ret();
+
+ LinkBuffer patchBuffer(&jit, pool);
+ return patchBuffer.finalizeCode().m_code;
+}
+
+void JIT::emit_op_get_by_val(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ emitLoad2(base, regT1, regT0, property, regT3, regT2);
+
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr)));
+
+ loadPtr(Address(regT0, JSArray::storageOffset()), regT3);
+ addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, JSArray::vectorLengthOffset())));
+
+ load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
+ load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
+ addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
+
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter); // property int32 check
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
+
+ Jump nonCell = jump();
+ linkSlowCase(iter); // base array check
+ Jump notString = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr));
+ emitNakedCall(m_globalData->getCTIStub(stringGetByValStubGenerator));
+ Jump failed = branchTestPtr(Zero, regT0);
+ emitStore(dst, regT1, regT0);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
+ failed.link(this);
+ notString.link(this);
+ nonCell.link(this);
+
+ linkSlowCase(iter); // vector length check
+ linkSlowCase(iter); // empty value
+
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(base);
+ stubCall.addArgument(property);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_put_by_val(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ emitLoad2(base, regT1, regT0, property, regT3, regT2);
+
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr)));
+ addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, JSArray::vectorLengthOffset())));
+
+ loadPtr(Address(regT0, JSArray::storageOffset()), regT3);
+
+ Jump empty = branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
+
+ Label storeResult(this);
+ emitLoad(value, regT1, regT0);
+ store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); // payload
+ store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); // tag
+ Jump end = jump();
+
+ empty.link(this);
+ add32(TrustedImm32(1), Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
+ branch32(Below, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
+
+ add32(TrustedImm32(1), regT2, regT0);
+ store32(regT0, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)));
+ jump().linkTo(storeResult, this);
+
+ end.link(this);
+}
+
+void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter); // property int32 check
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ linkSlowCase(iter); // base not array check
+ linkSlowCase(iter); // in vector check
+
+ JITStubCall stubPutByValCall(this, cti_op_put_by_val);
+ stubPutByValCall.addArgument(base);
+ stubPutByValCall.addArgument(property);
+ stubPutByValCall.addArgument(value);
+ stubPutByValCall.call();
+}
+
+void JIT::emit_op_get_by_id(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+
+ emitLoad(base, regT1, regT0);
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+ compileGetByIdHotPath();
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0);
+}
+
+void JIT::compileGetByIdHotPath()
+{
+ // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
+ // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
+ // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
+ // to jump back to if one of these trampolies finds a match.
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
+
+ Label hotPathBegin(this);
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
+ m_propertyAccessInstructionIndex++;
+
+ DataLabelPtr structureToCompare;
+ Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ addSlowCase(structureCheck);
+ ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetGetByIdStructure);
+ ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureCheck), patchOffsetGetByIdBranchToSlowCase);
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSObject, m_propertyStorage)), regT2);
+ DataLabel32 displacementLabel1 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT0); // payload
+ ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel1), patchOffsetGetByIdPropertyMapOffset1);
+ DataLabel32 displacementLabel2 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT1); // tag
+ ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel2), patchOffsetGetByIdPropertyMapOffset2);
+
+ Label putResult(this);
+ ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, putResult), patchOffsetGetByIdPutResult);
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
+}
+
+void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int ident = currentInstruction[3].u.operand;
+
+ compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter);
+}
+
+void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
+{
+ // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
+ // so that we only need track one pointer into the slow case code - we track a pointer to the location
+ // of the call (which we can use to look up the patch information), but should a array-length or
+ // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
+ // the distance from the call to the head of the slow case.
+ linkSlowCaseIfNotJSCell(iter, base);
+ linkSlowCase(iter);
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
+
+#ifndef NDEBUG
+ Label coldPathBegin(this);
+#endif
+ JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.addArgument(TrustedImmPtr(ident));
+ Call call = stubCall.call(dst);
+
+ END_UNINTERRUPTED_SEQUENCE_FOR_PUT(sequenceGetByIdSlowCase, dst);
+
+ ASSERT_JIT_OFFSET(differenceBetween(coldPathBegin, call), patchOffsetGetByIdSlowCaseCall);
+
+ // Track the location of the call; this will be used to recover patch information.
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
+ m_propertyAccessInstructionIndex++;
+}
+
+void JIT::emit_op_put_by_id(Instruction* currentInstruction)
+{
+ // In order to be able to patch both the Structure, and the object offset, we store one pointer,
+ // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
+ // such that the Structure & offset are always at the same distance from this.
+
+ int base = currentInstruction[1].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ emitLoad2(base, regT1, regT0, value, regT3, regT2);
+
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
+
+ Label hotPathBegin(this);
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
+ m_propertyAccessInstructionIndex++;
+
+ // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
+ DataLabelPtr structureToCompare;
+ addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
+ ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetPutByIdStructure);
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSObject, m_propertyStorage)), regT0);
+ DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT0, patchGetByIdDefaultOffset)); // payload
+ DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT0, patchGetByIdDefaultOffset)); // tag
+
+ END_UNINTERRUPTED_SEQUENCE(sequencePutById);
+
+ ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel1), patchOffsetPutByIdPropertyMapOffset1);
+ ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel2), patchOffsetPutByIdPropertyMapOffset2);
+}
+
+void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int base = currentInstruction[1].u.operand;
+ int ident = currentInstruction[2].u.operand;
+ int direct = currentInstruction[8].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, base);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.addArgument(TrustedImmPtr(&(m_codeBlock->identifier(ident))));
+ stubCall.addArgument(regT3, regT2);
+ Call call = stubCall.call();
+
+ // Track the location of the call; this will be used to recover patch information.
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
+ m_propertyAccessInstructionIndex++;
+}
+
+// Compile a store into an object's property storage. May overwrite base.
+void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, Structure* structure, size_t cachedOffset)
+{
+ int offset = cachedOffset;
+ if (structure->isUsingInlineStorage())
+ offset += JSObject::offsetOfInlineStorage() / sizeof(Register);
+ else
+ loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_propertyStorage)), base);
+ emitStore(offset, valueTag, valuePayload, base);
+}
+
+// Compile a load from an object's property storage. May overwrite base.
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset)
+{
+ int offset = cachedOffset;
+ if (structure->isUsingInlineStorage()) {
+ offset += JSObject::offsetOfInlineStorage() / sizeof(Register);
+ emitLoad(offset, resultTag, resultPayload, base);
+ } else {
+ RegisterID temp = resultPayload;
+ loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_propertyStorage)), temp);
+ emitLoad(offset, resultTag, resultPayload, temp);
+ }
+}
+
+void JIT::compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset)
+{
+ load32(reinterpret_cast<char*>(&base->m_propertyStorage[cachedOffset]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload), resultPayload);
+ load32(reinterpret_cast<char*>(&base->m_propertyStorage[cachedOffset]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag), resultTag);
+}
+
+void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
+{
+ // It is assumed that regT0 contains the basePayload and regT1 contains the baseTag. The value can be found on the stack.
+
+ JumpList failureCases;
+ failureCases.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+ failureCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(oldStructure)));
+ testPrototype(oldStructure->storedPrototype(), failureCases);
+
+ if (!direct) {
+ // Verify that nothing in the prototype chain has a setter for this property.
+ for (WriteBarrier<Structure>* it = chain->head(); *it; ++it)
+ testPrototype((*it)->storedPrototype(), failureCases);
+ }
+
+ // Reallocate property storage if needed.
+ Call callTarget;
+ bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
+ if (willNeedStorageRealloc) {
+ // This trampoline was called to like a JIT stub; before we can can call again we need to
+ // remove the return address from the stack, to prevent the stack from becoming misaligned.
+ preserveReturnAddressAfterCall(regT3);
+
+ JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
+ stubCall.skipArgument(); // base
+ stubCall.skipArgument(); // ident
+ stubCall.skipArgument(); // value
+ stubCall.addArgument(TrustedImm32(oldStructure->propertyStorageCapacity()));
+ stubCall.addArgument(TrustedImm32(newStructure->propertyStorageCapacity()));
+ stubCall.call(regT0);
+
+ restoreReturnAddressBeforeReturn(regT3);
+ }
+
+ storePtrWithWriteBarrier(TrustedImmPtr(newStructure), regT0, Address(regT0, JSCell::structureOffset()));
+
+#if CPU(MIPS) || CPU(SH4)
+ // For MIPS, we don't add sizeof(void*) to the stack offset.
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT3);
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT2);
+#else
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT3);
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT2);
+#endif
+
+ // Write the value
+ compilePutDirectOffset(regT0, regT2, regT3, newStructure, cachedOffset);
+
+ ret();
+
+ ASSERT(!failureCases.empty());
+ failureCases.link(this);
+ restoreArgumentReferenceForTrampoline();
+ Call failureCall = tailRecursiveCall();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
+
+ if (willNeedStorageRealloc) {
+ ASSERT(m_calls.size() == 1);
+ patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
+ }
+
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
+}
+
+void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
+{
+ RepatchBuffer repatchBuffer(codeBlock);
+
+ // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
+ // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
+
+ int offset = sizeof(JSValue) * cachedOffset;
+
+ // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset1), offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset2), offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag
+}
+
+void JIT::patchMethodCallProto(JSGlobalData& globalData, CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
+{
+ RepatchBuffer repatchBuffer(codeBlock);
+
+ ASSERT(!methodCallLinkInfo.cachedStructure);
+ methodCallLinkInfo.cachedStructure.set(globalData, codeBlock->ownerExecutable(), structure);
+ Structure* prototypeStructure = proto->structure();
+ methodCallLinkInfo.cachedPrototypeStructure.set(globalData, codeBlock->ownerExecutable(), prototypeStructure);
+
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure);
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
+
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id));
+}
+
+void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct)
+{
+ RepatchBuffer repatchBuffer(codeBlock);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
+
+ int offset = sizeof(JSValue) * cachedOffset;
+
+ // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1), offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2), offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag
+}
+
+void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
+{
+ StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
+
+ // regT0 holds a JSCell*
+
+ // Check for array
+ Jump failureCases1 = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr));
+
+ // Checks out okay! - get the length from the storage
+ loadPtr(Address(regT0, JSArray::storageOffset()), regT2);
+ load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
+
+ Jump failureCases2 = branch32(Above, regT2, TrustedImm32(INT_MAX));
+ move(regT2, regT0);
+ move(TrustedImm32(JSValue::Int32Tag), regT1);
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+ patchBuffer.link(failureCases1, slowCaseBegin);
+ patchBuffer.link(failureCases2, slowCaseBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ // Track the stub we have created so that it will be deleted later.
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
+}
+
+void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+{
+ // regT0 holds a JSCell*
+
+ // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
+ // referencing the prototype object - let's speculatively load it's table nice and early!)
+ JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
+
+ Jump failureCases1 = checkStructure(regT0, structure);
+
+ // Check the prototype object's Structure had not changed.
+ move(TrustedImmPtr(protoObject), regT3);
+ Jump failureCases2 = branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure));
+
+ bool needsStubLink = false;
+ // Checks out okay!
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else
+ compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
+
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+ patchBuffer.link(failureCases1, slowCaseBegin);
+ patchBuffer.link(failureCases2, slowCaseBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
+
+ // Track the stub we have created so that it will be deleted later.
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
+}
+
+
+void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
+{
+ // regT0 holds a JSCell*
+ Jump failureCase = checkStructure(regT0, structure);
+ bool needsStubLink = false;
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(regT0, regT2, regT1, structure, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else
+ compileGetDirectOffset(regT0, regT1, regT0, structure, cachedOffset);
+
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
+ if (!lastProtoBegin)
+ lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+
+ patchBuffer.link(failureCase, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+
+ polymorphicStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), entryLabel, structure);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+}
+
+void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
+{
+ // regT0 holds a JSCell*
+
+ // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
+ // referencing the prototype object - let's speculatively load it's table nice and early!)
+ JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
+
+ // Check eax is an object of the right Structure.
+ Jump failureCases1 = checkStructure(regT0, structure);
+
+ // Check the prototype object's Structure had not changed.
+ move(TrustedImmPtr(protoObject), regT3);
+ Jump failureCases2 = branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure));
+
+ bool needsStubLink = false;
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else
+ compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
+
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
+ patchBuffer.link(failureCases1, lastProtoBegin);
+ patchBuffer.link(failureCases2, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+
+ prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), entryLabel, structure, prototypeStructure);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+}
+
+void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
+{
+ // regT0 holds a JSCell*
+ ASSERT(count);
+
+ JumpList bucketsOfFail;
+
+ // Check eax is an object of the right Structure.
+ bucketsOfFail.append(checkStructure(regT0, structure));
+
+ Structure* currStructure = structure;
+ WriteBarrier<Structure>* it = chain->head();
+ JSObject* protoObject = 0;
+ for (unsigned i = 0; i < count; ++i, ++it) {
+ protoObject = asObject(currStructure->prototypeForLookup(callFrame));
+ currStructure = it->get();
+ testPrototype(protoObject, bucketsOfFail);
+ }
+ ASSERT(protoObject);
+
+ bool needsStubLink = false;
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else
+ compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
+
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
+
+ patchBuffer.link(bucketsOfFail, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+
+ // Track the stub we have created so that it will be deleted later.
+ prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), entryLabel, structure, chain);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+}
+
+void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+{
+ // regT0 holds a JSCell*
+ ASSERT(count);
+
+ JumpList bucketsOfFail;
+
+ // Check eax is an object of the right Structure.
+ bucketsOfFail.append(checkStructure(regT0, structure));
+
+ Structure* currStructure = structure;
+ WriteBarrier<Structure>* it = chain->head();
+ JSObject* protoObject = 0;
+ for (unsigned i = 0; i < count; ++i, ++it) {
+ protoObject = asObject(currStructure->prototypeForLookup(callFrame));
+ currStructure = it->get();
+ testPrototype(protoObject, bucketsOfFail);
+ }
+ ASSERT(protoObject);
+
+ bool needsStubLink = false;
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else
+ compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ // Track the stub we have created so that it will be deleted later.
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
+}
+
+/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
+
+#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset)
+{
+ ASSERT(sizeof(JSValue) == 8);
+
+ loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_propertyStorage)), base);
+ loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
+ loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
+}
+
+void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+ unsigned expected = currentInstruction[4].u.operand;
+ unsigned iter = currentInstruction[5].u.operand;
+ unsigned i = currentInstruction[6].u.operand;
+
+ emitLoad2(property, regT1, regT0, base, regT3, regT2);
+ emitJumpSlowCaseIfNotJSCell(property, regT1);
+ addSlowCase(branchPtr(NotEqual, regT0, payloadFor(expected)));
+ // Property registers are now available as the property is known
+ emitJumpSlowCaseIfNotJSCell(base, regT3);
+ emitLoadPayload(iter, regT1);
+
+ // Test base's structure
+ loadPtr(Address(regT2, JSCell::structureOffset()), regT0);
+ addSlowCase(branchPtr(NotEqual, regT0, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
+ load32(addressFor(i), regT3);
+ sub32(TrustedImm32(1), regT3);
+ addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
+ compileGetDirectOffset(regT2, regT1, regT0, regT3);
+
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_pname), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, property);
+ linkSlowCase(iter);
+ linkSlowCaseIfNotJSCell(iter, base);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(base);
+ stubCall.addArgument(property);
+ stubCall.call(dst);
+}
+
+} // namespace JSC
+
+#endif // USE(JSVALUE32_64)
+#endif // ENABLE(JIT)
diff --git a/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITStubCall.h b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITStubCall.h
new file mode 100644
index 0000000..a0341d6
--- /dev/null
+++ b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITStubCall.h
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITStubCall_h
+#define JITStubCall_h
+
+#include "MacroAssemblerCodeRef.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+ class JITStubCall {
+ public:
+ JITStubCall(JIT* jit, JSObject* (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
+ : m_jit(jit)
+ , m_stub(stub)
+ , m_returnType(Cell)
+ , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
+ {
+ }
+
+ JITStubCall(JIT* jit, JSPropertyNameIterator* (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
+ : m_jit(jit)
+ , m_stub(stub)
+ , m_returnType(Cell)
+ , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
+ {
+ }
+
+ JITStubCall(JIT* jit, void* (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
+ : m_jit(jit)
+ , m_stub(stub)
+ , m_returnType(VoidPtr)
+ , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
+ {
+ }
+
+ JITStubCall(JIT* jit, int (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
+ : m_jit(jit)
+ , m_stub(stub)
+ , m_returnType(Int)
+ , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
+ {
+ }
+
+ JITStubCall(JIT* jit, bool (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
+ : m_jit(jit)
+ , m_stub(stub)
+ , m_returnType(Int)
+ , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
+ {
+ }
+
+ JITStubCall(JIT* jit, void (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
+ : m_jit(jit)
+ , m_stub(stub)
+ , m_returnType(Void)
+ , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
+ {
+ }
+
+#if USE(JSVALUE32_64)
+ JITStubCall(JIT* jit, EncodedJSValue (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
+ : m_jit(jit)
+ , m_stub(stub)
+ , m_returnType(Value)
+ , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
+ {
+ }
+#endif
+
+ // Arguments are added first to last.
+
+ void skipArgument()
+ {
+ m_stackIndex += stackIndexStep;
+ }
+
+ void addArgument(JIT::TrustedImm32 argument)
+ {
+ m_jit->poke(argument, m_stackIndex);
+ m_stackIndex += stackIndexStep;
+ }
+
+ void addArgument(JIT::TrustedImmPtr argument)
+ {
+ m_jit->poke(argument, m_stackIndex);
+ m_stackIndex += stackIndexStep;
+ }
+
+ void addArgument(JIT::RegisterID argument)
+ {
+ m_jit->poke(argument, m_stackIndex);
+ m_stackIndex += stackIndexStep;
+ }
+
+#if USE(JSVALUE32_64)
+ void addArgument(const JSValue& value)
+ {
+ m_jit->poke(JIT::Imm32(value.payload()), m_stackIndex);
+ m_jit->poke(JIT::Imm32(value.tag()), m_stackIndex + 1);
+ m_stackIndex += stackIndexStep;
+ }
+#endif
+
+ void addArgument(JIT::RegisterID tag, JIT::RegisterID payload)
+ {
+ m_jit->poke(payload, m_stackIndex);
+ m_jit->poke(tag, m_stackIndex + 1);
+ m_stackIndex += stackIndexStep;
+ }
+
+#if USE(JSVALUE32_64)
+ void addArgument(unsigned srcVirtualRegister)
+ {
+ if (m_jit->m_codeBlock->isConstantRegisterIndex(srcVirtualRegister)) {
+ addArgument(m_jit->getConstantOperand(srcVirtualRegister));
+ return;
+ }
+
+ m_jit->emitLoad(srcVirtualRegister, JIT::regT1, JIT::regT0);
+ addArgument(JIT::regT1, JIT::regT0);
+ }
+
+ void getArgument(size_t argumentNumber, JIT::RegisterID tag, JIT::RegisterID payload)
+ {
+ size_t stackIndex = JITSTACKFRAME_ARGS_INDEX + (argumentNumber * stackIndexStep);
+ m_jit->peek(payload, stackIndex);
+ m_jit->peek(tag, stackIndex + 1);
+ }
+#else
+ void addArgument(unsigned src, JIT::RegisterID scratchRegister) // src is a virtual register.
+ {
+ if (m_jit->m_codeBlock->isConstantRegisterIndex(src))
+ addArgument(JIT::ImmPtr(JSValue::encode(m_jit->m_codeBlock->getConstant(src))));
+ else {
+ m_jit->loadPtr(JIT::Address(JIT::callFrameRegister, src * sizeof(Register)), scratchRegister);
+ addArgument(scratchRegister);
+ }
+ m_jit->killLastResultRegister();
+ }
+#endif
+
+ JIT::Call call()
+ {
+#if ENABLE(OPCODE_SAMPLING)
+ if (m_jit->m_bytecodeOffset != (unsigned)-1)
+ m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeOffset, true);
+#endif
+
+ m_jit->restoreArgumentReference();
+ JIT::Call call = m_jit->call();
+ m_jit->m_calls.append(CallRecord(call, m_jit->m_bytecodeOffset, m_stub.value()));
+
+#if ENABLE(OPCODE_SAMPLING)
+ if (m_jit->m_bytecodeOffset != (unsigned)-1)
+ m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeOffset, false);
+#endif
+
+#if USE(JSVALUE32_64)
+ m_jit->unmap();
+#else
+ m_jit->killLastResultRegister();
+#endif
+ return call;
+ }
+
+#if USE(JSVALUE32_64)
+ JIT::Call call(unsigned dst) // dst is a virtual register.
+ {
+ ASSERT(m_returnType == Value || m_returnType == Cell);
+ JIT::Call call = this->call();
+ if (m_returnType == Value)
+ m_jit->emitStore(dst, JIT::regT1, JIT::regT0);
+ else
+ m_jit->emitStoreCell(dst, JIT::returnValueRegister);
+ return call;
+ }
+#else
+ JIT::Call call(unsigned dst) // dst is a virtual register.
+ {
+ ASSERT(m_returnType == VoidPtr || m_returnType == Cell);
+ JIT::Call call = this->call();
+ m_jit->emitPutVirtualRegister(dst);
+ return call;
+ }
+#endif
+
+ JIT::Call call(JIT::RegisterID dst) // dst is a machine register.
+ {
+#if USE(JSVALUE32_64)
+ ASSERT(m_returnType == Value || m_returnType == VoidPtr || m_returnType == Int || m_returnType == Cell);
+#else
+ ASSERT(m_returnType == VoidPtr || m_returnType == Int || m_returnType == Cell);
+#endif
+ JIT::Call call = this->call();
+ if (dst != JIT::returnValueRegister)
+ m_jit->move(JIT::returnValueRegister, dst);
+ return call;
+ }
+
+ private:
+ static const size_t stackIndexStep = sizeof(EncodedJSValue) == 2 * sizeof(void*) ? 2 : 1;
+
+ JIT* m_jit;
+ FunctionPtr m_stub;
+ enum { Void, VoidPtr, Int, Value, Cell } m_returnType;
+ size_t m_stackIndex;
+ };
+}
+
+#endif // ENABLE(JIT)
+
+#endif // JITStubCall_h
diff --git a/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITStubs.cpp b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITStubs.cpp
new file mode 100644
index 0000000..a589e11
--- /dev/null
+++ b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITStubs.cpp
@@ -0,0 +1,3613 @@
+/*
+ * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ * Copyright (C) Research In Motion Limited 2010, 2011. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(JIT)
+#include "JITStubs.h"
+
+#include "Arguments.h"
+#include "CallFrame.h"
+#include "CodeBlock.h"
+#include "Heap.h"
+#include "Debugger.h"
+#include "ExceptionHelpers.h"
+#include "GetterSetter.h"
+#include "Strong.h"
+#include "JIT.h"
+#include "JSActivation.h"
+#include "JSArray.h"
+#include "JSByteArray.h"
+#include "JSFunction.h"
+#include "JSGlobalObjectFunctions.h"
+#include "JSNotAnObject.h"
+#include "JSPropertyNameIterator.h"
+#include "JSStaticScopeObject.h"
+#include "JSString.h"
+#include "ObjectPrototype.h"
+#include "Operations.h"
+#include "Parser.h"
+#include "Profiler.h"
+#include "RegExpObject.h"
+#include "RegExpPrototype.h"
+#include "Register.h"
+#include "SamplingTool.h"
+#include <wtf/StdLibExtras.h>
+#include <stdarg.h>
+#include <stdio.h>
+
+using namespace std;
+
+namespace JSC {
+
+#if OS(DARWIN) || (OS(WINDOWS) && CPU(X86))
+#define SYMBOL_STRING(name) "_" #name
+#else
+#define SYMBOL_STRING(name) #name
+#endif
+
+#if OS(IOS)
+#define THUMB_FUNC_PARAM(name) SYMBOL_STRING(name)
+#else
+#define THUMB_FUNC_PARAM(name)
+#endif
+
+#if (OS(LINUX) || OS(FREEBSD)) && CPU(X86_64)
+#define SYMBOL_STRING_RELOCATION(name) #name "@plt"
+#elif OS(DARWIN) || (CPU(X86_64) && COMPILER(MINGW) && !GCC_VERSION_AT_LEAST(4, 5, 0))
+#define SYMBOL_STRING_RELOCATION(name) "_" #name
+#elif CPU(X86) && COMPILER(MINGW)
+#define SYMBOL_STRING_RELOCATION(name) "@" #name "@4"
+#else
+#define SYMBOL_STRING_RELOCATION(name) #name
+#endif
+
+#if OS(DARWIN)
+ // Mach-O platform
+#define HIDE_SYMBOL(name) ".private_extern _" #name
+#elif OS(AIX)
+ // IBM's own file format
+#define HIDE_SYMBOL(name) ".lglobl " #name
+#elif OS(LINUX) \
+ || OS(FREEBSD) \
+ || OS(OPENBSD) \
+ || OS(SOLARIS) \
+ || (OS(HPUX) && CPU(IA64)) \
+ || OS(SYMBIAN) \
+ || OS(NETBSD)
+ // ELF platform
+#define HIDE_SYMBOL(name) ".hidden " #name
+#else
+#define HIDE_SYMBOL(name)
+#endif
+
+#if USE(JSVALUE32_64)
+
+#if COMPILER(GCC) && CPU(X86)
+
+// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
+// need to change the assembly trampolines below to match.
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) % 16 == 0x0, JITStackFrame_maintains_16byte_stack_alignment);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x3c, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x58, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x50, JITStackFrame_code_offset_matches_ctiTrampoline);
+
+asm (
+".text\n"
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+HIDE_SYMBOL(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "pushl %ebp" "\n"
+ "movl %esp, %ebp" "\n"
+ "pushl %esi" "\n"
+ "pushl %edi" "\n"
+ "pushl %ebx" "\n"
+ "subl $0x3c, %esp" "\n"
+ "movl $512, %esi" "\n"
+ "movl 0x58(%esp), %edi" "\n"
+ "call *0x50(%esp)" "\n"
+ "addl $0x3c, %esp" "\n"
+ "popl %ebx" "\n"
+ "popl %edi" "\n"
+ "popl %esi" "\n"
+ "popl %ebp" "\n"
+ "ret" "\n"
+);
+
+asm (
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+ "movl %esp, %ecx" "\n"
+ "call " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
+ "int3" "\n"
+);
+
+asm (
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "addl $0x3c, %esp" "\n"
+ "popl %ebx" "\n"
+ "popl %edi" "\n"
+ "popl %esi" "\n"
+ "popl %ebp" "\n"
+ "ret" "\n"
+);
+
+#elif COMPILER(GCC) && CPU(X86_64)
+
+// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
+// need to change the assembly trampolines below to match.
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) % 32 == 0x0, JITStackFrame_maintains_32byte_stack_alignment);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, savedRBX) == 0x48, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x90, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x80, JITStackFrame_code_offset_matches_ctiTrampoline);
+
+asm (
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+HIDE_SYMBOL(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "pushq %rbp" "\n"
+ "movq %rsp, %rbp" "\n"
+ "pushq %r12" "\n"
+ "pushq %r13" "\n"
+ "pushq %r14" "\n"
+ "pushq %r15" "\n"
+ "pushq %rbx" "\n"
+ "subq $0x48, %rsp" "\n"
+ "movq $512, %r12" "\n"
+ "movq $0xFFFF000000000000, %r14" "\n"
+ "movq $0xFFFF000000000002, %r15" "\n"
+ "movq 0x90(%rsp), %r13" "\n"
+ "call *0x80(%rsp)" "\n"
+ "addq $0x48, %rsp" "\n"
+ "popq %rbx" "\n"
+ "popq %r15" "\n"
+ "popq %r14" "\n"
+ "popq %r13" "\n"
+ "popq %r12" "\n"
+ "popq %rbp" "\n"
+ "ret" "\n"
+);
+
+asm (
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+ "movq %rsp, %rdi" "\n"
+ "call " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
+ "int3" "\n"
+);
+
+asm (
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "addq $0x48, %rsp" "\n"
+ "popq %rbx" "\n"
+ "popq %r15" "\n"
+ "popq %r14" "\n"
+ "popq %r13" "\n"
+ "popq %r12" "\n"
+ "popq %rbp" "\n"
+ "ret" "\n"
+);
+
+#elif (COMPILER(GCC) || COMPILER(RVCT)) && CPU(ARM_THUMB2)
+
+#define THUNK_RETURN_ADDRESS_OFFSET 0x38
+#define PRESERVED_RETURN_ADDRESS_OFFSET 0x3C
+#define PRESERVED_R4_OFFSET 0x40
+#define PRESERVED_R5_OFFSET 0x44
+#define PRESERVED_R6_OFFSET 0x48
+#define REGISTER_FILE_OFFSET 0x4C
+#define CALLFRAME_OFFSET 0x50
+#define EXCEPTION_OFFSET 0x54
+#define ENABLE_PROFILER_REFERENCE_OFFSET 0x58
+
+#elif (COMPILER(GCC) || COMPILER(MSVC) || COMPILER(RVCT)) && CPU(ARM_TRADITIONAL)
+
+// Also update the MSVC section (defined at DEFINE_STUB_FUNCTION)
+// when changing one of the following values.
+#define THUNK_RETURN_ADDRESS_OFFSET 64
+#define PRESERVEDR4_OFFSET 68
+
+#elif COMPILER(MSVC) && CPU(X86)
+
+// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
+// need to change the assembly trampolines below to match.
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) % 16 == 0x0, JITStackFrame_maintains_16byte_stack_alignment);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x3c, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x58, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x50, JITStackFrame_code_offset_matches_ctiTrampoline);
+
+extern "C" {
+
+ __declspec(naked) EncodedJSValue ctiTrampoline(void* code, RegisterFile*, CallFrame*, void* /*unused1*/, Profiler**, JSGlobalData*)
+ {
+ __asm {
+ push ebp;
+ mov ebp, esp;
+ push esi;
+ push edi;
+ push ebx;
+ sub esp, 0x3c;
+ mov esi, 512;
+ mov ecx, esp;
+ mov edi, [esp + 0x58];
+ call [esp + 0x50];
+ add esp, 0x3c;
+ pop ebx;
+ pop edi;
+ pop esi;
+ pop ebp;
+ ret;
+ }
+ }
+
+ __declspec(naked) void ctiVMThrowTrampoline()
+ {
+ __asm {
+ mov ecx, esp;
+ call cti_vm_throw;
+ add esp, 0x3c;
+ pop ebx;
+ pop edi;
+ pop esi;
+ pop ebp;
+ ret;
+ }
+ }
+
+ __declspec(naked) void ctiOpThrowNotCaught()
+ {
+ __asm {
+ add esp, 0x3c;
+ pop ebx;
+ pop edi;
+ pop esi;
+ pop ebp;
+ ret;
+ }
+ }
+}
+
+#elif CPU(MIPS)
+
+#define PRESERVED_GP_OFFSET 60
+#define PRESERVED_S0_OFFSET 64
+#define PRESERVED_S1_OFFSET 68
+#define PRESERVED_S2_OFFSET 72
+#define PRESERVED_RETURN_ADDRESS_OFFSET 76
+#define THUNK_RETURN_ADDRESS_OFFSET 80
+#define REGISTER_FILE_OFFSET 84
+#define CALLFRAME_OFFSET 88
+#define EXCEPTION_OFFSET 92
+#define ENABLE_PROFILER_REFERENCE_OFFSET 96
+#define GLOBAL_DATA_OFFSET 100
+#define STACK_LENGTH 104
+#elif CPU(SH4)
+#define SYMBOL_STRING(name) #name
+/* code (r4), RegisterFile* (r5), CallFrame* (r6), JSValue* exception (r7), Profiler**(sp), JSGlobalData (sp)*/
+
+asm volatile (
+".text\n"
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+HIDE_SYMBOL(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "mov.l r7, @-r15" "\n"
+ "mov.l r6, @-r15" "\n"
+ "mov.l r5, @-r15" "\n"
+ "mov.l r8, @-r15" "\n"
+ "mov #127, r8" "\n"
+ "mov.l r14, @-r15" "\n"
+ "sts.l pr, @-r15" "\n"
+ "mov.l r13, @-r15" "\n"
+ "mov.l r11, @-r15" "\n"
+ "mov.l r10, @-r15" "\n"
+ "add #-60, r15" "\n"
+ "mov r6, r14" "\n"
+ "jsr @r4" "\n"
+ "nop" "\n"
+ "add #60, r15" "\n"
+ "mov.l @r15+,r10" "\n"
+ "mov.l @r15+,r11" "\n"
+ "mov.l @r15+,r13" "\n"
+ "lds.l @r15+,pr" "\n"
+ "mov.l @r15+,r14" "\n"
+ "mov.l @r15+,r8" "\n"
+ "add #12, r15" "\n"
+ "rts" "\n"
+ "nop" "\n"
+);
+
+asm volatile (
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+ "mov.l .L2"SYMBOL_STRING(cti_vm_throw)",r0" "\n"
+ "mov r15, r4" "\n"
+ "mov.l @(r0,r12),r11" "\n"
+ "jsr @r11" "\n"
+ "nop" "\n"
+ "add #60, r15" "\n"
+ "mov.l @r15+,r10" "\n"
+ "mov.l @r15+,r11" "\n"
+ "mov.l @r15+,r13" "\n"
+ "lds.l @r15+,pr" "\n"
+ "mov.l @r15+,r14" "\n"
+ "mov.l @r15+,r8" "\n"
+ "add #12, r15" "\n"
+ "rts" "\n"
+ "nop" "\n"
+ ".align 2" "\n"
+ ".L2"SYMBOL_STRING(cti_vm_throw)":.long " SYMBOL_STRING(cti_vm_throw)"@GOT \n"
+);
+
+asm volatile (
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "add #60, r15" "\n"
+ "mov.l @r15+,r10" "\n"
+ "mov.l @r15+,r11" "\n"
+ "mov.l @r15+,r13" "\n"
+ "lds.l @r15+,pr" "\n"
+ "mov.l @r15+,r14" "\n"
+ "mov.l @r15+,r8" "\n"
+ "add #12, r15" "\n"
+ "rts" "\n"
+ "nop" "\n"
+);
+#else
+ #error "JIT not supported on this platform."
+#endif
+
+#else // USE(JSVALUE32_64)
+
+#if COMPILER(GCC) && CPU(X86_64)
+
+// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
+// need to change the assembly trampolines below to match.
+COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x58, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x48, JITStackFrame_code_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, savedRBX) == 0x78, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
+
+asm (
+".text\n"
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+HIDE_SYMBOL(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "pushq %rbp" "\n"
+ "movq %rsp, %rbp" "\n"
+ "pushq %r12" "\n"
+ "pushq %r13" "\n"
+ "pushq %r14" "\n"
+ "pushq %r15" "\n"
+ "pushq %rbx" "\n"
+ // Form the JIT stubs area
+ "pushq %r9" "\n"
+ "pushq %r8" "\n"
+ "pushq %rcx" "\n"
+ "pushq %rdx" "\n"
+ "pushq %rsi" "\n"
+ "pushq %rdi" "\n"
+ "subq $0x48, %rsp" "\n"
+ "movq $512, %r12" "\n"
+ "movq $0xFFFF000000000000, %r14" "\n"
+ "movq $0xFFFF000000000002, %r15" "\n"
+ "movq %rdx, %r13" "\n"
+ "call *%rdi" "\n"
+ "addq $0x78, %rsp" "\n"
+ "popq %rbx" "\n"
+ "popq %r15" "\n"
+ "popq %r14" "\n"
+ "popq %r13" "\n"
+ "popq %r12" "\n"
+ "popq %rbp" "\n"
+ "ret" "\n"
+);
+
+asm (
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+ "movq %rsp, %rdi" "\n"
+ "call " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
+ "int3" "\n"
+);
+
+asm (
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "addq $0x78, %rsp" "\n"
+ "popq %rbx" "\n"
+ "popq %r15" "\n"
+ "popq %r14" "\n"
+ "popq %r13" "\n"
+ "popq %r12" "\n"
+ "popq %rbp" "\n"
+ "ret" "\n"
+);
+
+#else
+ #error "JIT not supported on this platform."
+#endif
+
+#endif // USE(JSVALUE32_64)
+
+#if CPU(MIPS)
+asm (
+".text" "\n"
+".align 2" "\n"
+".set noreorder" "\n"
+".set nomacro" "\n"
+".set nomips16" "\n"
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+".ent " SYMBOL_STRING(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "addiu $29,$29,-" STRINGIZE_VALUE_OF(STACK_LENGTH) "\n"
+ "sw $31," STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "($29)" "\n"
+ "sw $18," STRINGIZE_VALUE_OF(PRESERVED_S2_OFFSET) "($29)" "\n"
+ "sw $17," STRINGIZE_VALUE_OF(PRESERVED_S1_OFFSET) "($29)" "\n"
+ "sw $16," STRINGIZE_VALUE_OF(PRESERVED_S0_OFFSET) "($29)" "\n"
+#if WTF_MIPS_PIC
+ "sw $28," STRINGIZE_VALUE_OF(PRESERVED_GP_OFFSET) "($29)" "\n"
+#endif
+ "move $16,$6 # set callFrameRegister" "\n"
+ "li $17,512 # set timeoutCheckRegister" "\n"
+ "move $25,$4 # move executableAddress to t9" "\n"
+ "sw $5," STRINGIZE_VALUE_OF(REGISTER_FILE_OFFSET) "($29) # store registerFile to current stack" "\n"
+ "sw $6," STRINGIZE_VALUE_OF(CALLFRAME_OFFSET) "($29) # store callFrame to curent stack" "\n"
+ "sw $7," STRINGIZE_VALUE_OF(EXCEPTION_OFFSET) "($29) # store exception to current stack" "\n"
+ "lw $8," STRINGIZE_VALUE_OF(STACK_LENGTH + 16) "($29) # load enableProfilerReference from previous stack" "\n"
+ "lw $9," STRINGIZE_VALUE_OF(STACK_LENGTH + 20) "($29) # load globalData from previous stack" "\n"
+ "sw $8," STRINGIZE_VALUE_OF(ENABLE_PROFILER_REFERENCE_OFFSET) "($29) # store enableProfilerReference to current stack" "\n"
+ "jalr $25" "\n"
+ "sw $9," STRINGIZE_VALUE_OF(GLOBAL_DATA_OFFSET) "($29) # store globalData to current stack" "\n"
+ "lw $16," STRINGIZE_VALUE_OF(PRESERVED_S0_OFFSET) "($29)" "\n"
+ "lw $17," STRINGIZE_VALUE_OF(PRESERVED_S1_OFFSET) "($29)" "\n"
+ "lw $18," STRINGIZE_VALUE_OF(PRESERVED_S2_OFFSET) "($29)" "\n"
+ "lw $31," STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "($29)" "\n"
+ "jr $31" "\n"
+ "addiu $29,$29," STRINGIZE_VALUE_OF(STACK_LENGTH) "\n"
+".set reorder" "\n"
+".set macro" "\n"
+".end " SYMBOL_STRING(ctiTrampoline) "\n"
+);
+
+asm (
+".text" "\n"
+".align 2" "\n"
+".set noreorder" "\n"
+".set nomacro" "\n"
+".set nomips16" "\n"
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+".ent " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+#if WTF_MIPS_PIC
+ "lw $28," STRINGIZE_VALUE_OF(PRESERVED_GP_OFFSET) "($29)" "\n"
+".set macro" "\n"
+ "la $25," SYMBOL_STRING(cti_vm_throw) "\n"
+".set nomacro" "\n"
+ "bal " SYMBOL_STRING(cti_vm_throw) "\n"
+ "move $4,$29" "\n"
+#else
+ "jal " SYMBOL_STRING(cti_vm_throw) "\n"
+ "move $4,$29" "\n"
+#endif
+ "lw $16," STRINGIZE_VALUE_OF(PRESERVED_S0_OFFSET) "($29)" "\n"
+ "lw $17," STRINGIZE_VALUE_OF(PRESERVED_S1_OFFSET) "($29)" "\n"
+ "lw $18," STRINGIZE_VALUE_OF(PRESERVED_S2_OFFSET) "($29)" "\n"
+ "lw $31," STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "($29)" "\n"
+ "jr $31" "\n"
+ "addiu $29,$29," STRINGIZE_VALUE_OF(STACK_LENGTH) "\n"
+".set reorder" "\n"
+".set macro" "\n"
+".end " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+);
+
+asm (
+".text" "\n"
+".align 2" "\n"
+".set noreorder" "\n"
+".set nomacro" "\n"
+".set nomips16" "\n"
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+".ent " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "lw $16," STRINGIZE_VALUE_OF(PRESERVED_S0_OFFSET) "($29)" "\n"
+ "lw $17," STRINGIZE_VALUE_OF(PRESERVED_S1_OFFSET) "($29)" "\n"
+ "lw $18," STRINGIZE_VALUE_OF(PRESERVED_S2_OFFSET) "($29)" "\n"
+ "lw $31," STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "($29)" "\n"
+ "jr $31" "\n"
+ "addiu $29,$29," STRINGIZE_VALUE_OF(STACK_LENGTH) "\n"
+".set reorder" "\n"
+".set macro" "\n"
+".end " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+);
+#endif
+
+#if COMPILER(GCC) && CPU(ARM_THUMB2)
+
+asm (
+".text" "\n"
+".align 2" "\n"
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+HIDE_SYMBOL(ctiTrampoline) "\n"
+".thumb" "\n"
+".thumb_func " THUMB_FUNC_PARAM(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "sub sp, sp, #" STRINGIZE_VALUE_OF(ENABLE_PROFILER_REFERENCE_OFFSET) "\n"
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "]" "\n"
+ "str r4, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R4_OFFSET) "]" "\n"
+ "str r5, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R5_OFFSET) "]" "\n"
+ "str r6, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R6_OFFSET) "]" "\n"
+ "str r1, [sp, #" STRINGIZE_VALUE_OF(REGISTER_FILE_OFFSET) "]" "\n"
+ "str r2, [sp, #" STRINGIZE_VALUE_OF(CALLFRAME_OFFSET) "]" "\n"
+ "str r3, [sp, #" STRINGIZE_VALUE_OF(EXCEPTION_OFFSET) "]" "\n"
+ "cpy r5, r2" "\n"
+ "mov r6, #512" "\n"
+ "blx r0" "\n"
+ "ldr r6, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R6_OFFSET) "]" "\n"
+ "ldr r5, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R5_OFFSET) "]" "\n"
+ "ldr r4, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R4_OFFSET) "]" "\n"
+ "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "]" "\n"
+ "add sp, sp, #" STRINGIZE_VALUE_OF(ENABLE_PROFILER_REFERENCE_OFFSET) "\n"
+ "bx lr" "\n"
+);
+
+asm (
+".text" "\n"
+".align 2" "\n"
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
+".thumb" "\n"
+".thumb_func " THUMB_FUNC_PARAM(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+ "cpy r0, sp" "\n"
+ "bl " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
+ "ldr r6, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R6_OFFSET) "]" "\n"
+ "ldr r5, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R5_OFFSET) "]" "\n"
+ "ldr r4, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R4_OFFSET) "]" "\n"
+ "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "]" "\n"
+ "add sp, sp, #" STRINGIZE_VALUE_OF(ENABLE_PROFILER_REFERENCE_OFFSET) "\n"
+ "bx lr" "\n"
+);
+
+asm (
+".text" "\n"
+".align 2" "\n"
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
+".thumb" "\n"
+".thumb_func " THUMB_FUNC_PARAM(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "ldr r6, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R6_OFFSET) "]" "\n"
+ "ldr r5, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R5_OFFSET) "]" "\n"
+ "ldr r4, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R4_OFFSET) "]" "\n"
+ "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "]" "\n"
+ "add sp, sp, #" STRINGIZE_VALUE_OF(ENABLE_PROFILER_REFERENCE_OFFSET) "\n"
+ "bx lr" "\n"
+);
+
+#elif COMPILER(GCC) && CPU(ARM_TRADITIONAL)
+
+asm (
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+HIDE_SYMBOL(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "stmdb sp!, {r1-r3}" "\n"
+ "stmdb sp!, {r4-r8, lr}" "\n"
+ "sub sp, sp, #" STRINGIZE_VALUE_OF(PRESERVEDR4_OFFSET) "\n"
+ "mov r4, r2" "\n"
+ "mov r5, #512" "\n"
+ // r0 contains the code
+ "mov lr, pc" "\n"
+ "mov pc, r0" "\n"
+ "add sp, sp, #" STRINGIZE_VALUE_OF(PRESERVEDR4_OFFSET) "\n"
+ "ldmia sp!, {r4-r8, lr}" "\n"
+ "add sp, sp, #12" "\n"
+ "mov pc, lr" "\n"
+);
+
+asm (
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+ "mov r0, sp" "\n"
+ "bl " SYMBOL_STRING(cti_vm_throw) "\n"
+
+// Both has the same return sequence
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "add sp, sp, #" STRINGIZE_VALUE_OF(PRESERVEDR4_OFFSET) "\n"
+ "ldmia sp!, {r4-r8, lr}" "\n"
+ "add sp, sp, #12" "\n"
+ "mov pc, lr" "\n"
+);
+
+#elif COMPILER(RVCT) && CPU(ARM_THUMB2)
+
+__asm EncodedJSValue ctiTrampoline(void*, RegisterFile*, CallFrame*, JSValue*, Profiler**, JSGlobalData*)
+{
+ PRESERVE8
+ sub sp, sp, # ENABLE_PROFILER_REFERENCE_OFFSET
+ str lr, [sp, # PRESERVED_RETURN_ADDRESS_OFFSET ]
+ str r4, [sp, # PRESERVED_R4_OFFSET ]
+ str r5, [sp, # PRESERVED_R5_OFFSET ]
+ str r6, [sp, # PRESERVED_R6_OFFSET ]
+ str r1, [sp, # REGISTER_FILE_OFFSET ]
+ str r2, [sp, # CALLFRAME_OFFSET ]
+ str r3, [sp, # EXCEPTION_OFFSET ]
+ cpy r5, r2
+ mov r6, #512
+ blx r0
+ ldr r6, [sp, # PRESERVED_R6_OFFSET ]
+ ldr r5, [sp, # PRESERVED_R5_OFFSET ]
+ ldr r4, [sp, # PRESERVED_R4_OFFSET ]
+ ldr lr, [sp, # PRESERVED_RETURN_ADDRESS_OFFSET ]
+ add sp, sp, # ENABLE_PROFILER_REFERENCE_OFFSET
+ bx lr
+}
+
+__asm void ctiVMThrowTrampoline()
+{
+ PRESERVE8
+ cpy r0, sp
+ bl cti_vm_throw
+ ldr r6, [sp, # PRESERVED_R6_OFFSET ]
+ ldr r5, [sp, # PRESERVED_R5_OFFSET ]
+ ldr r4, [sp, # PRESERVED_R4_OFFSET ]
+ ldr lr, [sp, # PRESERVED_RETURN_ADDRESS_OFFSET ]
+ add sp, sp, # ENABLE_PROFILER_REFERENCE_OFFSET
+ bx lr
+}
+
+__asm void ctiOpThrowNotCaught()
+{
+ PRESERVE8
+ ldr r6, [sp, # PRESERVED_R6_OFFSET ]
+ ldr r5, [sp, # PRESERVED_R5_OFFSET ]
+ ldr r4, [sp, # PRESERVED_R4_OFFSET ]
+ ldr lr, [sp, # PRESERVED_RETURN_ADDRESS_OFFSET ]
+ add sp, sp, # ENABLE_PROFILER_REFERENCE_OFFSET
+ bx lr
+}
+
+#elif COMPILER(RVCT) && CPU(ARM_TRADITIONAL)
+
+__asm EncodedJSValue ctiTrampoline(void*, RegisterFile*, CallFrame*, void* /*unused1*/, Profiler**, JSGlobalData*)
+{
+ ARM
+ stmdb sp!, {r1-r3}
+ stmdb sp!, {r4-r8, lr}
+ sub sp, sp, # PRESERVEDR4_OFFSET
+ mov r4, r2
+ mov r5, #512
+ mov lr, pc
+ bx r0
+ add sp, sp, # PRESERVEDR4_OFFSET
+ ldmia sp!, {r4-r8, lr}
+ add sp, sp, #12
+ bx lr
+}
+
+__asm void ctiVMThrowTrampoline()
+{
+ ARM
+ PRESERVE8
+ mov r0, sp
+ bl cti_vm_throw
+ add sp, sp, # PRESERVEDR4_OFFSET
+ ldmia sp!, {r4-r8, lr}
+ add sp, sp, #12
+ bx lr
+}
+
+__asm void ctiOpThrowNotCaught()
+{
+ ARM
+ add sp, sp, # PRESERVEDR4_OFFSET
+ ldmia sp!, {r4-r8, lr}
+ add sp, sp, #12
+ bx lr
+}
+#endif
+
+#if ENABLE(OPCODE_SAMPLING)
+ #define CTI_SAMPLER stackFrame.globalData->interpreter->sampler()
+#else
+ #define CTI_SAMPLER 0
+#endif
+
+JITThunks::JITThunks(JSGlobalData* globalData)
+ : m_hostFunctionStubMap(adoptPtr(new HostFunctionStubMap))
+{
+ if (!globalData->executableAllocator.isValid())
+ return;
+
+ JIT::compileCTIMachineTrampolines(globalData, &m_executablePool, &m_trampolineStructure);
+ ASSERT(m_executablePool);
+#if CPU(ARM_THUMB2)
+ // Unfortunate the arm compiler does not like the use of offsetof on JITStackFrame (since it contains non POD types),
+ // and the OBJECT_OFFSETOF macro does not appear constantish enough for it to be happy with its use in COMPILE_ASSERT
+ // macros.
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedReturnAddress) == PRESERVED_RETURN_ADDRESS_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR4) == PRESERVED_R4_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR5) == PRESERVED_R5_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR6) == PRESERVED_R6_OFFSET);
+
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, registerFile) == REGISTER_FILE_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, callFrame) == CALLFRAME_OFFSET);
+ // The fifth argument is the first item already on the stack.
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, enabledProfilerReference) == ENABLE_PROFILER_REFERENCE_OFFSET);
+
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, thunkReturnAddress) == THUNK_RETURN_ADDRESS_OFFSET);
+
+#elif CPU(ARM_TRADITIONAL)
+
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, thunkReturnAddress) == THUNK_RETURN_ADDRESS_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR4) == PRESERVEDR4_OFFSET);
+
+
+#elif CPU(MIPS)
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedGP) == PRESERVED_GP_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedS0) == PRESERVED_S0_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedS1) == PRESERVED_S1_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedS2) == PRESERVED_S2_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedReturnAddress) == PRESERVED_RETURN_ADDRESS_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, thunkReturnAddress) == THUNK_RETURN_ADDRESS_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, registerFile) == REGISTER_FILE_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, callFrame) == CALLFRAME_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, unused1) == EXCEPTION_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, enabledProfilerReference) == ENABLE_PROFILER_REFERENCE_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, globalData) == GLOBAL_DATA_OFFSET);
+
+#endif
+}
+
+JITThunks::~JITThunks()
+{
+}
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot& slot, StructureStubInfo* stubInfo, bool direct)
+{
+ // The interpreter checks for recursion here; I do not believe this can occur in CTI.
+
+ if (!baseValue.isCell())
+ return;
+
+ // Uncacheable: give up.
+ if (!slot.isCacheable()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
+ return;
+ }
+
+ JSCell* baseCell = baseValue.asCell();
+ Structure* structure = baseCell->structure();
+
+ if (structure->isUncacheableDictionary()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
+ return;
+ }
+
+ // If baseCell != base, then baseCell must be a proxy for another object.
+ if (baseCell != slot.base()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
+ return;
+ }
+
+ // Cache hit: Specialize instruction and ref Structures.
+
+ // Structure transition, cache transition info
+ if (slot.type() == PutPropertySlot::NewProperty) {
+ if (structure->isDictionary()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
+ return;
+ }
+
+ // put_by_id_transition checks the prototype chain for setters.
+ normalizePrototypeChain(callFrame, baseCell);
+
+ StructureChain* prototypeChain = structure->prototypeChain(callFrame);
+ stubInfo->initPutByIdTransition(callFrame->globalData(), codeBlock->ownerExecutable(), structure->previousID(), structure, prototypeChain);
+ JIT::compilePutByIdTransition(callFrame->scopeChain()->globalData, codeBlock, stubInfo, structure->previousID(), structure, slot.cachedOffset(), prototypeChain, returnAddress, direct);
+ return;
+ }
+
+ stubInfo->initPutByIdReplace(callFrame->globalData(), codeBlock->ownerExecutable(), structure);
+
+ JIT::patchPutByIdReplace(codeBlock, stubInfo, structure, slot.cachedOffset(), returnAddress, direct);
+}
+
+NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo* stubInfo)
+{
+ // FIXME: Write a test that proves we need to check for recursion here just
+ // like the interpreter does, then add a check for recursion.
+
+ // FIXME: Cache property access for immediates.
+ if (!baseValue.isCell()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
+ return;
+ }
+
+ JSGlobalData* globalData = &callFrame->globalData();
+
+ if (isJSArray(globalData, baseValue) && propertyName == callFrame->propertyNames().length) {
+ JIT::compilePatchGetArrayLength(callFrame->scopeChain()->globalData, codeBlock, returnAddress);
+ return;
+ }
+
+ if (isJSString(globalData, baseValue) && propertyName == callFrame->propertyNames().length) {
+ // The tradeoff of compiling an patched inline string length access routine does not seem
+ // to pay off, so we currently only do this for arrays.
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, globalData->jitStubs->ctiStringLengthTrampoline());
+ return;
+ }
+
+ // Uncacheable: give up.
+ if (!slot.isCacheable()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
+ return;
+ }
+
+ JSCell* baseCell = baseValue.asCell();
+ Structure* structure = baseCell->structure();
+
+ if (structure->isUncacheableDictionary()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
+ return;
+ }
+
+ // Cache hit: Specialize instruction and ref Structures.
+
+ if (slot.slotBase() == baseValue) {
+ // set this up, so derefStructures can do it's job.
+ stubInfo->initGetByIdSelf(callFrame->globalData(), codeBlock->ownerExecutable(), structure);
+ if (slot.cachedPropertyType() != PropertySlot::Value)
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
+ else
+ JIT::patchGetByIdSelf(codeBlock, stubInfo, structure, slot.cachedOffset(), returnAddress);
+ return;
+ }
+
+ if (structure->isDictionary()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
+ return;
+ }
+
+ if (slot.slotBase() == structure->prototypeForLookup(callFrame)) {
+ ASSERT(slot.slotBase().isObject());
+
+ JSObject* slotBaseObject = asObject(slot.slotBase());
+ size_t offset = slot.cachedOffset();
+
+ // Since we're accessing a prototype in a loop, it's a good bet that it
+ // should not be treated as a dictionary.
+ if (slotBaseObject->structure()->isDictionary()) {
+ slotBaseObject->flattenDictionaryObject(callFrame->globalData());
+ offset = slotBaseObject->structure()->get(callFrame->globalData(), propertyName);
+ }
+
+ stubInfo->initGetByIdProto(callFrame->globalData(), codeBlock->ownerExecutable(), structure, slotBaseObject->structure());
+
+ ASSERT(!structure->isDictionary());
+ ASSERT(!slotBaseObject->structure()->isDictionary());
+ JIT::compileGetByIdProto(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, slotBaseObject->structure(), propertyName, slot, offset, returnAddress);
+ return;
+ }
+
+ size_t offset = slot.cachedOffset();
+ size_t count = normalizePrototypeChain(callFrame, baseValue, slot.slotBase(), propertyName, offset);
+ if (!count) {
+ stubInfo->accessType = access_get_by_id_generic;
+ return;
+ }
+
+ StructureChain* prototypeChain = structure->prototypeChain(callFrame);
+ stubInfo->initGetByIdChain(callFrame->globalData(), codeBlock->ownerExecutable(), structure, prototypeChain);
+ JIT::compileGetByIdChain(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, prototypeChain, count, propertyName, slot, offset, returnAddress);
+}
+
+#endif // ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+#ifndef NDEBUG
+
+extern "C" {
+
+static void jscGeneratedNativeCode()
+{
+ // When executing a JIT stub function (which might do an allocation), we hack the return address
+ // to pretend to be executing this function, to keep stack logging tools from blowing out
+ // memory.
+}
+
+}
+
+struct StackHack {
+ ALWAYS_INLINE StackHack(JITStackFrame& stackFrame)
+ : stackFrame(stackFrame)
+ , savedReturnAddress(*stackFrame.returnAddressSlot())
+ {
+ *stackFrame.returnAddressSlot() = ReturnAddressPtr(FunctionPtr(jscGeneratedNativeCode));
+ }
+
+ ALWAYS_INLINE ~StackHack()
+ {
+ *stackFrame.returnAddressSlot() = savedReturnAddress;
+ }
+
+ JITStackFrame& stackFrame;
+ ReturnAddressPtr savedReturnAddress;
+};
+
+#define STUB_INIT_STACK_FRAME(stackFrame) JITStackFrame& stackFrame = *reinterpret_cast_ptr<JITStackFrame*>(STUB_ARGS); StackHack stackHack(stackFrame)
+#define STUB_SET_RETURN_ADDRESS(returnAddress) stackHack.savedReturnAddress = ReturnAddressPtr(returnAddress)
+#define STUB_RETURN_ADDRESS stackHack.savedReturnAddress
+
+#else
+
+#define STUB_INIT_STACK_FRAME(stackFrame) JITStackFrame& stackFrame = *reinterpret_cast_ptr<JITStackFrame*>(STUB_ARGS)
+#define STUB_SET_RETURN_ADDRESS(returnAddress) *stackFrame.returnAddressSlot() = ReturnAddressPtr(returnAddress)
+#define STUB_RETURN_ADDRESS *stackFrame.returnAddressSlot()
+
+#endif
+
+// The reason this is not inlined is to avoid having to do a PIC branch
+// to get the address of the ctiVMThrowTrampoline function. It's also
+// good to keep the code size down by leaving as much of the exception
+// handling code out of line as possible.
+static NEVER_INLINE void returnToThrowTrampoline(JSGlobalData* globalData, ReturnAddressPtr exceptionLocation, ReturnAddressPtr& returnAddressSlot)
+{
+ ASSERT(globalData->exception);
+ globalData->exceptionLocation = exceptionLocation;
+ returnAddressSlot = ReturnAddressPtr(FunctionPtr(ctiVMThrowTrampoline));
+}
+
+static NEVER_INLINE void throwStackOverflowError(CallFrame* callFrame, JSGlobalData* globalData, ReturnAddressPtr exceptionLocation, ReturnAddressPtr& returnAddressSlot)
+{
+ globalData->exception = createStackOverflowError(callFrame);
+ returnToThrowTrampoline(globalData, exceptionLocation, returnAddressSlot);
+}
+
+#define VM_THROW_EXCEPTION() \
+ do { \
+ VM_THROW_EXCEPTION_AT_END(); \
+ return 0; \
+ } while (0)
+#define VM_THROW_EXCEPTION_AT_END() \
+ do {\
+ returnToThrowTrampoline(stackFrame.globalData, STUB_RETURN_ADDRESS, STUB_RETURN_ADDRESS);\
+ } while (0)
+
+#define CHECK_FOR_EXCEPTION() \
+ do { \
+ if (UNLIKELY(stackFrame.globalData->exception)) \
+ VM_THROW_EXCEPTION(); \
+ } while (0)
+#define CHECK_FOR_EXCEPTION_AT_END() \
+ do { \
+ if (UNLIKELY(stackFrame.globalData->exception)) \
+ VM_THROW_EXCEPTION_AT_END(); \
+ } while (0)
+#define CHECK_FOR_EXCEPTION_VOID() \
+ do { \
+ if (UNLIKELY(stackFrame.globalData->exception)) { \
+ VM_THROW_EXCEPTION_AT_END(); \
+ return; \
+ } \
+ } while (0)
+
+struct ExceptionHandler {
+ void* catchRoutine;
+ CallFrame* callFrame;
+};
+static ExceptionHandler jitThrow(JSGlobalData* globalData, CallFrame* callFrame, JSValue exceptionValue, ReturnAddressPtr faultLocation)
+{
+ ASSERT(exceptionValue);
+
+ unsigned vPCIndex = callFrame->codeBlock()->bytecodeOffset(faultLocation);
+ globalData->exception = JSValue();
+ HandlerInfo* handler = globalData->interpreter->throwException(callFrame, exceptionValue, vPCIndex); // This may update callFrame & exceptionValue!
+ globalData->exception = exceptionValue;
+
+ void* catchRoutine = handler ? handler->nativeCode.executableAddress() : FunctionPtr(ctiOpThrowNotCaught).value();
+ ASSERT(catchRoutine);
+ ExceptionHandler exceptionHandler = { catchRoutine, callFrame };
+ return exceptionHandler;
+}
+
+#if CPU(ARM_THUMB2) && COMPILER(GCC)
+
+#define DEFINE_STUB_FUNCTION(rtype, op) \
+ extern "C" { \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \
+ }; \
+ asm ( \
+ ".text" "\n" \
+ ".align 2" "\n" \
+ ".globl " SYMBOL_STRING(cti_##op) "\n" \
+ HIDE_SYMBOL(cti_##op) "\n" \
+ ".thumb" "\n" \
+ ".thumb_func " THUMB_FUNC_PARAM(cti_##op) "\n" \
+ SYMBOL_STRING(cti_##op) ":" "\n" \
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "]" "\n" \
+ "bl " SYMBOL_STRING(JITStubThunked_##op) "\n" \
+ "ldr lr, [sp, #" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "]" "\n" \
+ "bx lr" "\n" \
+ ); \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION) \
+
+#elif CPU(MIPS)
+#if WTF_MIPS_PIC
+#define DEFINE_STUB_FUNCTION(rtype, op) \
+ extern "C" { \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \
+ }; \
+ asm ( \
+ ".text" "\n" \
+ ".align 2" "\n" \
+ ".set noreorder" "\n" \
+ ".set nomacro" "\n" \
+ ".set nomips16" "\n" \
+ ".globl " SYMBOL_STRING(cti_##op) "\n" \
+ ".ent " SYMBOL_STRING(cti_##op) "\n" \
+ SYMBOL_STRING(cti_##op) ":" "\n" \
+ "lw $28," STRINGIZE_VALUE_OF(PRESERVED_GP_OFFSET) "($29)" "\n" \
+ "sw $31," STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "($29)" "\n" \
+ ".set macro" "\n" \
+ "la $25," SYMBOL_STRING(JITStubThunked_##op) "\n" \
+ ".set nomacro" "\n" \
+ "bal " SYMBOL_STRING(JITStubThunked_##op) "\n" \
+ "nop" "\n" \
+ "lw $31," STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "($29)" "\n" \
+ "jr $31" "\n" \
+ "nop" "\n" \
+ ".set reorder" "\n" \
+ ".set macro" "\n" \
+ ".end " SYMBOL_STRING(cti_##op) "\n" \
+ ); \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
+
+#else // WTF_MIPS_PIC
+#define DEFINE_STUB_FUNCTION(rtype, op) \
+ extern "C" { \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \
+ }; \
+ asm ( \
+ ".text" "\n" \
+ ".align 2" "\n" \
+ ".set noreorder" "\n" \
+ ".set nomacro" "\n" \
+ ".set nomips16" "\n" \
+ ".globl " SYMBOL_STRING(cti_##op) "\n" \
+ ".ent " SYMBOL_STRING(cti_##op) "\n" \
+ SYMBOL_STRING(cti_##op) ":" "\n" \
+ "sw $31," STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "($29)" "\n" \
+ "jal " SYMBOL_STRING(JITStubThunked_##op) "\n" \
+ "nop" "\n" \
+ "lw $31," STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "($29)" "\n" \
+ "jr $31" "\n" \
+ "nop" "\n" \
+ ".set reorder" "\n" \
+ ".set macro" "\n" \
+ ".end " SYMBOL_STRING(cti_##op) "\n" \
+ ); \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
+
+#endif
+
+#elif CPU(ARM_TRADITIONAL) && COMPILER(GCC)
+
+#define DEFINE_STUB_FUNCTION(rtype, op) \
+ extern "C" { \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \
+ }; \
+ asm ( \
+ ".globl " SYMBOL_STRING(cti_##op) "\n" \
+ SYMBOL_STRING(cti_##op) ":" "\n" \
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "]" "\n" \
+ "bl " SYMBOL_STRING(JITStubThunked_##op) "\n" \
+ "ldr lr, [sp, #" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "]" "\n" \
+ "mov pc, lr" "\n" \
+ ); \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
+
+#elif (CPU(ARM_THUMB2) || CPU(ARM_TRADITIONAL)) && COMPILER(RVCT)
+
+#define DEFINE_STUB_FUNCTION(rtype, op) rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
+
+/* The following is a workaround for RVCT toolchain; precompiler macros are not expanded before the code is passed to the assembler */
+
+/* The following section is a template to generate code for GeneratedJITStubs_RVCT.h */
+/* The pattern "#xxx#" will be replaced with "xxx" */
+
+/*
+RVCT(extern "C" #rtype# JITStubThunked_#op#(STUB_ARGS_DECLARATION);)
+RVCT(__asm #rtype# cti_#op#(STUB_ARGS_DECLARATION))
+RVCT({)
+RVCT( PRESERVE8)
+RVCT( IMPORT JITStubThunked_#op#)
+RVCT( str lr, [sp, # THUNK_RETURN_ADDRESS_OFFSET])
+RVCT( bl JITStubThunked_#op#)
+RVCT( ldr lr, [sp, # THUNK_RETURN_ADDRESS_OFFSET])
+RVCT( bx lr)
+RVCT(})
+RVCT()
+*/
+
+/* Include the generated file */
+#include "GeneratedJITStubs_RVCT.h"
+
+#elif CPU(ARM_TRADITIONAL) && COMPILER(MSVC)
+
+#define DEFINE_STUB_FUNCTION(rtype, op) extern "C" rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
+
+/* The following is a workaround for MSVC toolchain; inline assembler is not supported */
+
+/* The following section is a template to generate code for GeneratedJITStubs_MSVC.asm */
+/* The pattern "#xxx#" will be replaced with "xxx" */
+
+/*
+MSVC_BEGIN( AREA Trampoline, CODE)
+MSVC_BEGIN()
+MSVC_BEGIN( EXPORT ctiTrampoline)
+MSVC_BEGIN( EXPORT ctiVMThrowTrampoline)
+MSVC_BEGIN( EXPORT ctiOpThrowNotCaught)
+MSVC_BEGIN()
+MSVC_BEGIN(ctiTrampoline PROC)
+MSVC_BEGIN( stmdb sp!, {r1-r3})
+MSVC_BEGIN( stmdb sp!, {r4-r8, lr})
+MSVC_BEGIN( sub sp, sp, #68 ; sync with PRESERVEDR4_OFFSET)
+MSVC_BEGIN( mov r4, r2)
+MSVC_BEGIN( mov r5, #512)
+MSVC_BEGIN( ; r0 contains the code)
+MSVC_BEGIN( mov lr, pc)
+MSVC_BEGIN( bx r0)
+MSVC_BEGIN( add sp, sp, #68 ; sync with PRESERVEDR4_OFFSET)
+MSVC_BEGIN( ldmia sp!, {r4-r8, lr})
+MSVC_BEGIN( add sp, sp, #12)
+MSVC_BEGIN( bx lr)
+MSVC_BEGIN(ctiTrampoline ENDP)
+MSVC_BEGIN()
+MSVC_BEGIN(ctiVMThrowTrampoline PROC)
+MSVC_BEGIN( mov r0, sp)
+MSVC_BEGIN( mov lr, pc)
+MSVC_BEGIN( bl cti_vm_throw)
+MSVC_BEGIN(ctiOpThrowNotCaught)
+MSVC_BEGIN( add sp, sp, #68 ; sync with PRESERVEDR4_OFFSET)
+MSVC_BEGIN( ldmia sp!, {r4-r8, lr})
+MSVC_BEGIN( add sp, sp, #12)
+MSVC_BEGIN( bx lr)
+MSVC_BEGIN(ctiVMThrowTrampoline ENDP)
+MSVC_BEGIN()
+
+MSVC( EXPORT cti_#op#)
+MSVC( IMPORT JITStubThunked_#op#)
+MSVC(cti_#op# PROC)
+MSVC( str lr, [sp, #64] ; sync with THUNK_RETURN_ADDRESS_OFFSET)
+MSVC( bl JITStubThunked_#op#)
+MSVC( ldr lr, [sp, #64] ; sync with THUNK_RETURN_ADDRESS_OFFSET)
+MSVC( bx lr)
+MSVC(cti_#op# ENDP)
+MSVC()
+
+MSVC_END( END)
+*/
+
+#elif CPU(SH4)
+#define DEFINE_STUB_FUNCTION(rtype, op) \
+ extern "C" { \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \
+ }; \
+ asm volatile( \
+ ".align 2" "\n" \
+ ".globl " SYMBOL_STRING(cti_##op) "\n" \
+ SYMBOL_STRING(cti_##op) ":" "\n" \
+ "sts pr, r11" "\n" \
+ "mov.l r11, @(0x38, r15)" "\n" \
+ "mov.l .L2"SYMBOL_STRING(JITStubThunked_##op)",r0" "\n" \
+ "mov.l @(r0,r12),r11" "\n" \
+ "jsr @r11" "\n" \
+ "nop" "\n" \
+ "mov.l @(0x38, r15), r11 " "\n" \
+ "lds r11, pr " "\n" \
+ "rts" "\n" \
+ "nop" "\n" \
+ ".align 2" "\n" \
+ ".L2"SYMBOL_STRING(JITStubThunked_##op)":.long " SYMBOL_STRING(JITStubThunked_##op)"@GOT \n" \
+ ); \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
+#else
+#define DEFINE_STUB_FUNCTION(rtype, op) rtype JIT_STUB cti_##op(STUB_ARGS_DECLARATION)
+#endif
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_create_this)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSFunction* constructor = asFunction(callFrame->callee());
+#if !ASSERT_DISABLED
+ ConstructData constructData;
+ ASSERT(constructor->getConstructData(constructData) == ConstructTypeJS);
+#endif
+
+ Structure* structure;
+ JSValue proto = stackFrame.args[0].jsValue();
+ if (proto.isObject())
+ structure = asObject(proto)->inheritorID(*stackFrame.globalData);
+ else
+ structure = constructor->scope()->globalObject->emptyObjectStructure();
+ JSValue result = constructEmptyObject(callFrame, structure);
+
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_convert_this)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue v1 = stackFrame.args[0].jsValue();
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSObject* result = v1.toThisObject(callFrame);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_convert_this_strict)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue v1 = stackFrame.args[0].jsValue();
+ CallFrame* callFrame = stackFrame.callFrame;
+ ASSERT(v1.asCell()->structure()->typeInfo().needsThisConversion());
+ JSValue result = v1.toStrictThisObject(callFrame);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_add)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue v1 = stackFrame.args[0].jsValue();
+ JSValue v2 = stackFrame.args[1].jsValue();
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ if (v1.isString()) {
+ JSValue result = v2.isString()
+ ? jsString(callFrame, asString(v1), asString(v2))
+ : jsString(callFrame, asString(v1), v2.toPrimitiveString(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+ }
+
+ double left = 0.0, right;
+ if (v1.getNumber(left) && v2.getNumber(right))
+ return JSValue::encode(jsNumber(left + right));
+
+ // All other cases are pretty uncommon
+ JSValue result = jsAddSlowCase(callFrame, v1, v2);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_pre_inc)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue v = stackFrame.args[0].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(v.toNumber(callFrame) + 1);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(int, timeout_check)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSGlobalData* globalData = stackFrame.globalData;
+ TimeoutChecker& timeoutChecker = globalData->timeoutChecker;
+
+ if (globalData->terminator.shouldTerminate()) {
+ globalData->exception = createTerminatedExecutionException(globalData);
+ VM_THROW_EXCEPTION_AT_END();
+ } else if (timeoutChecker.didTimeOut(stackFrame.callFrame)) {
+ globalData->exception = createInterruptedExecutionException(globalData);
+ VM_THROW_EXCEPTION_AT_END();
+ }
+
+ return timeoutChecker.ticksUntilNextCheck();
+}
+
+DEFINE_STUB_FUNCTION(void*, register_file_check)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ if (UNLIKELY(!stackFrame.registerFile->grow(&callFrame->registers()[callFrame->codeBlock()->m_numCalleeRegisters]))) {
+ // Rewind to the previous call frame because op_call already optimistically
+ // moved the call frame forward.
+ CallFrame* oldCallFrame = callFrame->callerFrame();
+ ExceptionHandler handler = jitThrow(stackFrame.globalData, oldCallFrame, createStackOverflowError(oldCallFrame), ReturnAddressPtr(callFrame->returnPC()));
+ STUB_SET_RETURN_ADDRESS(handler.catchRoutine);
+ callFrame = handler.callFrame;
+ }
+
+ return callFrame;
+}
+
+DEFINE_STUB_FUNCTION(int, op_loop_if_lesseq)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ bool result = jsLessEq(callFrame, src1, src2);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+DEFINE_STUB_FUNCTION(JSObject*, op_new_object)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return constructEmptyObject(stackFrame.callFrame);
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_by_id_generic)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ PutPropertySlot slot(stackFrame.callFrame->codeBlock()->isStrictMode());
+ stackFrame.args[0].jsValue().put(stackFrame.callFrame, stackFrame.args[1].identifier(), stackFrame.args[2].jsValue(), slot);
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_by_id_direct_generic)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ PutPropertySlot slot(stackFrame.callFrame->codeBlock()->isStrictMode());
+ stackFrame.args[0].jsValue().putDirect(stackFrame.callFrame, stackFrame.args[1].identifier(), stackFrame.args[2].jsValue(), slot);
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_generic)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(callFrame, ident, slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+DEFINE_STUB_FUNCTION(void, op_put_by_id)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
+ stackFrame.args[0].jsValue().put(callFrame, ident, stackFrame.args[2].jsValue(), slot);
+
+ CodeBlock* codeBlock = stackFrame.callFrame->codeBlock();
+ StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
+ if (!stubInfo->seenOnce())
+ stubInfo->setSeen();
+ else
+ JITThunks::tryCachePutByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, stackFrame.args[0].jsValue(), slot, stubInfo, false);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_by_id_direct)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
+ stackFrame.args[0].jsValue().putDirect(callFrame, ident, stackFrame.args[2].jsValue(), slot);
+
+ CodeBlock* codeBlock = stackFrame.callFrame->codeBlock();
+ StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
+ if (!stubInfo->seenOnce())
+ stubInfo->setSeen();
+ else
+ JITThunks::tryCachePutByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, stackFrame.args[0].jsValue(), slot, stubInfo, true);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_by_id_fail)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
+ stackFrame.args[0].jsValue().put(callFrame, ident, stackFrame.args[2].jsValue(), slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_by_id_direct_fail)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
+ stackFrame.args[0].jsValue().putDirect(callFrame, ident, stackFrame.args[2].jsValue(), slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+DEFINE_STUB_FUNCTION(JSObject*, op_put_by_id_transition_realloc)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ int32_t oldSize = stackFrame.args[3].int32();
+ int32_t newSize = stackFrame.args[4].int32();
+
+ ASSERT(baseValue.isObject());
+ JSObject* base = asObject(baseValue);
+ base->allocatePropertyStorage(oldSize, newSize);
+
+ return base;
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(callFrame, ident, slot);
+ CHECK_FOR_EXCEPTION();
+
+ CodeBlock* codeBlock = stackFrame.callFrame->codeBlock();
+ MethodCallLinkInfo& methodCallLinkInfo = codeBlock->getMethodCallLinkInfo(STUB_RETURN_ADDRESS);
+
+ if (!methodCallLinkInfo.seenOnce()) {
+ methodCallLinkInfo.setSeen();
+ return JSValue::encode(result);
+ }
+
+ // If we successfully got something, then the base from which it is being accessed must
+ // be an object. (Assertion to ensure asObject() call below is safe, which comes after
+ // an isCacheable() chceck.
+ ASSERT(!slot.isCacheableValue() || slot.slotBase().isObject());
+
+ // Check that:
+ // * We're dealing with a JSCell,
+ // * the property is cachable,
+ // * it's not a dictionary
+ // * there is a function cached.
+ Structure* structure;
+ JSCell* specific;
+ JSObject* slotBaseObject;
+ if (baseValue.isCell()
+ && slot.isCacheableValue()
+ && !(structure = baseValue.asCell()->structure())->isUncacheableDictionary()
+ && (slotBaseObject = asObject(slot.slotBase()))->getPropertySpecificValue(callFrame, ident, specific)
+ && specific
+ ) {
+
+ JSFunction* callee = (JSFunction*)specific;
+
+ // Since we're accessing a prototype in a loop, it's a good bet that it
+ // should not be treated as a dictionary.
+ if (slotBaseObject->structure()->isDictionary())
+ slotBaseObject->flattenDictionaryObject(callFrame->globalData());
+
+ // The result fetched should always be the callee!
+ ASSERT(result == JSValue(callee));
+
+ // Check to see if the function is on the object's prototype. Patch up the code to optimize.
+ if (slot.slotBase() == structure->prototypeForLookup(callFrame)) {
+ JIT::patchMethodCallProto(callFrame->globalData(), codeBlock, methodCallLinkInfo, callee, structure, slotBaseObject, STUB_RETURN_ADDRESS);
+ return JSValue::encode(result);
+ }
+
+ // Check to see if the function is on the object itself.
+ // Since we generate the method-check to check both the structure and a prototype-structure (since this
+ // is the common case) we have a problem - we need to patch the prototype structure check to do something
+ // useful. We could try to nop it out altogether, but that's a little messy, so lets do something simpler
+ // for now. For now it performs a check on a special object on the global object only used for this
+ // purpose. The object is in no way exposed, and as such the check will always pass.
+ if (slot.slotBase() == baseValue) {
+ JIT::patchMethodCallProto(callFrame->globalData(), codeBlock, methodCallLinkInfo, callee, structure, callFrame->scopeChain()->globalObject->methodCallDummy(), STUB_RETURN_ADDRESS);
+ return JSValue::encode(result);
+ }
+ }
+
+ // Revert the get_by_id op back to being a regular get_by_id - allow it to cache like normal, if it needs to.
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id));
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(callFrame, ident, slot);
+
+ CodeBlock* codeBlock = stackFrame.callFrame->codeBlock();
+ StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
+ if (!stubInfo->seenOnce())
+ stubInfo->setSeen();
+ else
+ JITThunks::tryCacheGetByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, baseValue, ident, slot, stubInfo);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_self_fail)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(callFrame, ident, slot);
+
+ CHECK_FOR_EXCEPTION();
+
+ if (baseValue.isCell()
+ && slot.isCacheable()
+ && !baseValue.asCell()->structure()->isUncacheableDictionary()
+ && slot.slotBase() == baseValue) {
+
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
+
+ ASSERT(slot.slotBase().isObject());
+
+ PolymorphicAccessStructureList* polymorphicStructureList;
+ int listIndex = 1;
+
+ if (stubInfo->accessType == access_get_by_id_self) {
+ ASSERT(!stubInfo->stubRoutine);
+ polymorphicStructureList = new PolymorphicAccessStructureList(callFrame->globalData(), codeBlock->ownerExecutable(), CodeLocationLabel(), stubInfo->u.getByIdSelf.baseObjectStructure.get());
+ stubInfo->initGetByIdSelfList(polymorphicStructureList, 1);
+ } else {
+ polymorphicStructureList = stubInfo->u.getByIdSelfList.structureList;
+ listIndex = stubInfo->u.getByIdSelfList.listSize;
+ }
+ if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE) {
+ stubInfo->u.getByIdSelfList.listSize++;
+ JIT::compileGetByIdSelfList(callFrame->scopeChain()->globalData, codeBlock, stubInfo, polymorphicStructureList, listIndex, baseValue.asCell()->structure(), ident, slot, slot.cachedOffset());
+
+ if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_generic));
+ }
+ } else
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_generic));
+ return JSValue::encode(result);
+}
+
+static PolymorphicAccessStructureList* getPolymorphicAccessStructureListSlot(JSGlobalData& globalData, ScriptExecutable* owner, StructureStubInfo* stubInfo, int& listIndex)
+{
+ PolymorphicAccessStructureList* prototypeStructureList = 0;
+ listIndex = 1;
+
+ switch (stubInfo->accessType) {
+ case access_get_by_id_proto:
+ prototypeStructureList = new PolymorphicAccessStructureList(globalData, owner, stubInfo->stubRoutine, stubInfo->u.getByIdProto.baseObjectStructure.get(), stubInfo->u.getByIdProto.prototypeStructure.get());
+ stubInfo->stubRoutine = CodeLocationLabel();
+ stubInfo->initGetByIdProtoList(prototypeStructureList, 2);
+ break;
+ case access_get_by_id_chain:
+ prototypeStructureList = new PolymorphicAccessStructureList(globalData, owner, stubInfo->stubRoutine, stubInfo->u.getByIdChain.baseObjectStructure.get(), stubInfo->u.getByIdChain.chain.get());
+ stubInfo->stubRoutine = CodeLocationLabel();
+ stubInfo->initGetByIdProtoList(prototypeStructureList, 2);
+ break;
+ case access_get_by_id_proto_list:
+ prototypeStructureList = stubInfo->u.getByIdProtoList.structureList;
+ listIndex = stubInfo->u.getByIdProtoList.listSize;
+ if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE)
+ stubInfo->u.getByIdProtoList.listSize++;
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+
+ ASSERT(listIndex <= POLYMORPHIC_LIST_CACHE_SIZE);
+ return prototypeStructureList;
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_getter_stub)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+ GetterSetter* getterSetter = asGetterSetter(stackFrame.args[0].jsObject());
+ if (!getterSetter->getter())
+ return JSValue::encode(jsUndefined());
+ JSObject* getter = asObject(getterSetter->getter());
+ CallData callData;
+ CallType callType = getter->getCallData(callData);
+ JSValue result = call(callFrame, getter, callType, callData, stackFrame.args[1].jsObject(), ArgList());
+ if (callFrame->hadException())
+ returnToThrowTrampoline(&callFrame->globalData(), stackFrame.args[2].returnAddress(), STUB_RETURN_ADDRESS);
+
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_custom_stub)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSObject* slotBase = stackFrame.args[0].jsObject();
+ PropertySlot::GetValueFunc getter = reinterpret_cast<PropertySlot::GetValueFunc>(stackFrame.args[1].asPointer);
+ const Identifier& ident = stackFrame.args[2].identifier();
+ JSValue result = getter(callFrame, slotBase, ident);
+ if (callFrame->hadException())
+ returnToThrowTrampoline(&callFrame->globalData(), stackFrame.args[3].returnAddress(), STUB_RETURN_ADDRESS);
+
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ const Identifier& propertyName = stackFrame.args[1].identifier();
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(callFrame, propertyName, slot);
+
+ CHECK_FOR_EXCEPTION();
+
+ if (!baseValue.isCell() || !slot.isCacheable() || baseValue.asCell()->structure()->isDictionary()) {
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
+ return JSValue::encode(result);
+ }
+
+ Structure* structure = baseValue.asCell()->structure();
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
+
+ ASSERT(slot.slotBase().isObject());
+ JSObject* slotBaseObject = asObject(slot.slotBase());
+
+ size_t offset = slot.cachedOffset();
+
+ if (slot.slotBase() == baseValue)
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
+ else if (slot.slotBase() == baseValue.asCell()->structure()->prototypeForLookup(callFrame)) {
+ ASSERT(!baseValue.asCell()->structure()->isDictionary());
+ // Since we're accessing a prototype in a loop, it's a good bet that it
+ // should not be treated as a dictionary.
+ if (slotBaseObject->structure()->isDictionary()) {
+ slotBaseObject->flattenDictionaryObject(callFrame->globalData());
+ offset = slotBaseObject->structure()->get(callFrame->globalData(), propertyName);
+ }
+
+ int listIndex;
+ PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(callFrame->globalData(), codeBlock->ownerExecutable(), stubInfo, listIndex);
+ if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE) {
+ JIT::compileGetByIdProtoList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, slotBaseObject->structure(), propertyName, slot, offset);
+
+ if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full));
+ }
+ } else if (size_t count = normalizePrototypeChain(callFrame, baseValue, slot.slotBase(), propertyName, offset)) {
+ ASSERT(!baseValue.asCell()->structure()->isDictionary());
+ int listIndex;
+ PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(callFrame->globalData(), codeBlock->ownerExecutable(), stubInfo, listIndex);
+
+ if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE) {
+ StructureChain* protoChain = structure->prototypeChain(callFrame);
+ JIT::compileGetByIdChainList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, protoChain, count, propertyName, slot, offset);
+
+ if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full));
+ }
+ } else
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
+
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list_full)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(stackFrame.callFrame, stackFrame.args[1].identifier(), slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_fail)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(stackFrame.callFrame, stackFrame.args[1].identifier(), slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_array_fail)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(stackFrame.callFrame, stackFrame.args[1].identifier(), slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_string_fail)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(stackFrame.callFrame, stackFrame.args[1].identifier(), slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+#endif // ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+DEFINE_STUB_FUNCTION(void, op_check_has_instance)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue baseVal = stackFrame.args[0].jsValue();
+
+ // ECMA-262 15.3.5.3:
+ // Throw an exception either if baseVal is not an object, or if it does not implement 'HasInstance' (i.e. is a function).
+#ifndef NDEBUG
+ TypeInfo typeInfo(UnspecifiedType);
+ ASSERT(!baseVal.isObject() || !(typeInfo = asObject(baseVal)->structure()->typeInfo()).implementsHasInstance());
+#endif
+ stackFrame.globalData->exception = createInvalidParamError(callFrame, "instanceof", baseVal);
+ VM_THROW_EXCEPTION_AT_END();
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_instanceof)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue value = stackFrame.args[0].jsValue();
+ JSValue baseVal = stackFrame.args[1].jsValue();
+ JSValue proto = stackFrame.args[2].jsValue();
+
+ // At least one of these checks must have failed to get to the slow case.
+ ASSERT(!value.isCell() || !baseVal.isCell() || !proto.isCell()
+ || !value.isObject() || !baseVal.isObject() || !proto.isObject()
+ || (asObject(baseVal)->structure()->typeInfo().flags() & (ImplementsHasInstance | OverridesHasInstance)) != ImplementsHasInstance);
+
+
+ // ECMA-262 15.3.5.3:
+ // Throw an exception either if baseVal is not an object, or if it does not implement 'HasInstance' (i.e. is a function).
+ TypeInfo typeInfo(UnspecifiedType);
+ if (!baseVal.isObject() || !(typeInfo = asObject(baseVal)->structure()->typeInfo()).implementsHasInstance()) {
+ stackFrame.globalData->exception = createInvalidParamError(stackFrame.callFrame, "instanceof", baseVal);
+ VM_THROW_EXCEPTION();
+ }
+ ASSERT(typeInfo.type() != UnspecifiedType);
+
+ if (!typeInfo.overridesHasInstance()) {
+ if (!value.isObject())
+ return JSValue::encode(jsBoolean(false));
+
+ if (!proto.isObject()) {
+ throwError(callFrame, createTypeError(callFrame, "instanceof called on an object with an invalid prototype property."));
+ VM_THROW_EXCEPTION();
+ }
+ }
+
+ JSValue result = jsBoolean(asObject(baseVal)->hasInstance(callFrame, value, proto));
+ CHECK_FOR_EXCEPTION_AT_END();
+
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_del_by_id)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSObject* baseObj = stackFrame.args[0].jsValue().toObject(callFrame);
+
+ bool couldDelete = baseObj->deleteProperty(callFrame, stackFrame.args[1].identifier());
+ JSValue result = jsBoolean(couldDelete);
+ if (!couldDelete && callFrame->codeBlock()->isStrictMode())
+ stackFrame.globalData->exception = createTypeError(stackFrame.callFrame, "Unable to delete property.");
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_mul)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+ double left;
+ double right;
+ if (src1.getNumber(left) && src2.getNumber(right))
+ return JSValue::encode(jsNumber(left * right));
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(src1.toNumber(callFrame) * src2.toNumber(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(JSObject*, op_new_func)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ ASSERT(stackFrame.callFrame->codeBlock()->codeType() != FunctionCode || !stackFrame.callFrame->codeBlock()->needsFullScopeChain() || stackFrame.callFrame->uncheckedR(stackFrame.callFrame->codeBlock()->activationRegister()).jsValue());
+ return stackFrame.args[0].function()->make(stackFrame.callFrame, stackFrame.callFrame->scopeChain());
+}
+
+DEFINE_STUB_FUNCTION(void*, op_call_jitCompile)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+#if !ASSERT_DISABLED
+ CallData callData;
+ ASSERT(stackFrame.callFrame->callee()->getCallData(callData) == CallTypeJS);
+#endif
+
+ JSFunction* function = asFunction(stackFrame.callFrame->callee());
+ ASSERT(!function->isHostFunction());
+ FunctionExecutable* executable = function->jsExecutable();
+ ScopeChainNode* callDataScopeChain = function->scope();
+ JSObject* error = executable->compileForCall(stackFrame.callFrame, callDataScopeChain);
+ if (error) {
+ stackFrame.callFrame->globalData().exception = error;
+ return 0;
+ }
+ return function;
+}
+
+DEFINE_STUB_FUNCTION(void*, op_construct_jitCompile)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+#if !ASSERT_DISABLED
+ ConstructData constructData;
+ ASSERT(asFunction(stackFrame.callFrame->callee())->getConstructData(constructData) == ConstructTypeJS);
+#endif
+
+ JSFunction* function = asFunction(stackFrame.callFrame->callee());
+ ASSERT(!function->isHostFunction());
+ FunctionExecutable* executable = function->jsExecutable();
+ ScopeChainNode* callDataScopeChain = function->scope();
+ JSObject* error = executable->compileForConstruct(stackFrame.callFrame, callDataScopeChain);
+ if (error) {
+ stackFrame.callFrame->globalData().exception = error;
+ return 0;
+ }
+ return function;
+}
+
+DEFINE_STUB_FUNCTION(void*, op_call_arityCheck)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSFunction* callee = asFunction(callFrame->callee());
+ ASSERT(!callee->isHostFunction());
+ CodeBlock* newCodeBlock = &callee->jsExecutable()->generatedBytecodeForCall();
+ int argCount = callFrame->argumentCountIncludingThis();
+ ReturnAddressPtr pc = callFrame->returnPC();
+
+ ASSERT(argCount != newCodeBlock->m_numParameters);
+
+ CallFrame* oldCallFrame = callFrame->callerFrame();
+
+ Register* r;
+ if (argCount > newCodeBlock->m_numParameters) {
+ size_t numParameters = newCodeBlock->m_numParameters;
+ r = callFrame->registers() + numParameters;
+ Register* newEnd = r + newCodeBlock->m_numCalleeRegisters;
+ if (!stackFrame.registerFile->grow(newEnd)) {
+ // Rewind to the previous call frame because op_call already optimistically
+ // moved the call frame forward.
+ ExceptionHandler handler = jitThrow(stackFrame.globalData, oldCallFrame, createStackOverflowError(oldCallFrame), pc);
+ STUB_SET_RETURN_ADDRESS(handler.catchRoutine);
+ return handler.callFrame;
+ }
+
+ Register* argv = r - RegisterFile::CallFrameHeaderSize - numParameters - argCount;
+ for (size_t i = 0; i < numParameters; ++i)
+ argv[i + argCount] = argv[i];
+ } else {
+ size_t omittedArgCount = newCodeBlock->m_numParameters - argCount;
+ r = callFrame->registers() + omittedArgCount;
+ Register* newEnd = r + newCodeBlock->m_numCalleeRegisters;
+ if (!stackFrame.registerFile->grow(newEnd)) {
+ // Rewind to the previous call frame because op_call already optimistically
+ // moved the call frame forward.
+ ExceptionHandler handler = jitThrow(stackFrame.globalData, oldCallFrame, createStackOverflowError(oldCallFrame), pc);
+ STUB_SET_RETURN_ADDRESS(handler.catchRoutine);
+ return handler.callFrame;
+ }
+
+ Register* argv = r - RegisterFile::CallFrameHeaderSize - omittedArgCount;
+ for (size_t i = 0; i < omittedArgCount; ++i)
+ argv[i] = jsUndefined();
+ }
+
+ callFrame = CallFrame::create(r);
+ callFrame->setCallerFrame(oldCallFrame);
+ callFrame->setArgumentCountIncludingThis(argCount);
+ callFrame->setCallee(callee);
+ callFrame->setScopeChain(callee->scope());
+ callFrame->setReturnPC(pc.value());
+
+ ASSERT((void*)callFrame <= stackFrame.registerFile->end());
+ return callFrame;
+}
+
+DEFINE_STUB_FUNCTION(void*, op_construct_arityCheck)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSFunction* callee = asFunction(callFrame->callee());
+ ASSERT(!callee->isHostFunction());
+ CodeBlock* newCodeBlock = &callee->jsExecutable()->generatedBytecodeForConstruct();
+ int argCount = callFrame->argumentCountIncludingThis();
+ ReturnAddressPtr pc = callFrame->returnPC();
+
+ ASSERT(argCount != newCodeBlock->m_numParameters);
+
+ CallFrame* oldCallFrame = callFrame->callerFrame();
+
+ Register* r;
+ if (argCount > newCodeBlock->m_numParameters) {
+ size_t numParameters = newCodeBlock->m_numParameters;
+ r = callFrame->registers() + numParameters;
+ Register* newEnd = r + newCodeBlock->m_numCalleeRegisters;
+ if (!stackFrame.registerFile->grow(newEnd)) {
+ // Rewind to the previous call frame because op_call already optimistically
+ // moved the call frame forward.
+ ExceptionHandler handler = jitThrow(stackFrame.globalData, oldCallFrame, createStackOverflowError(oldCallFrame), pc);
+ STUB_SET_RETURN_ADDRESS(handler.catchRoutine);
+ return handler.callFrame;
+ }
+
+ Register* argv = r - RegisterFile::CallFrameHeaderSize - numParameters - argCount;
+ for (size_t i = 0; i < numParameters; ++i)
+ argv[i + argCount] = argv[i];
+ } else {
+ size_t omittedArgCount = newCodeBlock->m_numParameters - argCount;
+ r = callFrame->registers() + omittedArgCount;
+ Register* newEnd = r + newCodeBlock->m_numCalleeRegisters;
+ if (!stackFrame.registerFile->grow(newEnd)) {
+ // Rewind to the previous call frame because op_call already optimistically
+ // moved the call frame forward.
+ ExceptionHandler handler = jitThrow(stackFrame.globalData, oldCallFrame, createStackOverflowError(oldCallFrame), pc);
+ STUB_SET_RETURN_ADDRESS(handler.catchRoutine);
+ return handler.callFrame;
+ }
+
+ Register* argv = r - RegisterFile::CallFrameHeaderSize - omittedArgCount;
+ for (size_t i = 0; i < omittedArgCount; ++i)
+ argv[i] = jsUndefined();
+ }
+
+ callFrame = CallFrame::create(r);
+ callFrame->setCallerFrame(oldCallFrame);
+ callFrame->setArgumentCountIncludingThis(argCount);
+ callFrame->setCallee(callee);
+ callFrame->setScopeChain(callee->scope());
+ callFrame->setReturnPC(pc.value());
+
+ ASSERT((void*)callFrame <= stackFrame.registerFile->end());
+ return callFrame;
+}
+
+#if ENABLE(JIT_OPTIMIZE_CALL)
+DEFINE_STUB_FUNCTION(void*, vm_lazyLinkCall)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSFunction* callee = asFunction(callFrame->callee());
+ ExecutableBase* executable = callee->executable();
+
+ MacroAssemblerCodePtr codePtr;
+ CodeBlock* codeBlock = 0;
+ if (executable->isHostFunction())
+ codePtr = executable->generatedJITCodeForCall().addressForCall();
+ else {
+ FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable);
+ JSObject* error = functionExecutable->compileForCall(callFrame, callee->scope());
+ if (error) {
+ callFrame->globalData().exception = createStackOverflowError(callFrame);
+ return 0;
+ }
+ codeBlock = &functionExecutable->generatedBytecodeForCall();
+ if (callFrame->argumentCountIncludingThis() == static_cast<size_t>(codeBlock->m_numParameters))
+ codePtr = functionExecutable->generatedJITCodeForCall().addressForCall();
+ else
+ codePtr = functionExecutable->generatedJITCodeForCallWithArityCheck();
+ }
+ CallLinkInfo* callLinkInfo = &stackFrame.callFrame->callerFrame()->codeBlock()->getCallLinkInfo(callFrame->returnPC());
+
+ if (!callLinkInfo->seenOnce())
+ callLinkInfo->setSeen();
+ else
+ JIT::linkCall(callee, stackFrame.callFrame->callerFrame()->codeBlock(), codeBlock, codePtr, callLinkInfo, callFrame->argumentCountIncludingThis(), stackFrame.globalData);
+
+ return codePtr.executableAddress();
+}
+
+DEFINE_STUB_FUNCTION(void*, vm_lazyLinkConstruct)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSFunction* callee = asFunction(callFrame->callee());
+ ExecutableBase* executable = callee->executable();
+
+ MacroAssemblerCodePtr codePtr;
+ CodeBlock* codeBlock = 0;
+ if (executable->isHostFunction())
+ codePtr = executable->generatedJITCodeForConstruct().addressForCall();
+ else {
+ FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable);
+ JSObject* error = functionExecutable->compileForConstruct(callFrame, callee->scope());
+ if (error) {
+ throwStackOverflowError(callFrame, stackFrame.globalData, ReturnAddressPtr(callFrame->returnPC()), STUB_RETURN_ADDRESS);
+ return 0;
+ }
+ codeBlock = &functionExecutable->generatedBytecodeForConstruct();
+ if (callFrame->argumentCountIncludingThis() == static_cast<size_t>(codeBlock->m_numParameters))
+ codePtr = functionExecutable->generatedJITCodeForConstruct().addressForCall();
+ else
+ codePtr = functionExecutable->generatedJITCodeForConstructWithArityCheck();
+ }
+ CallLinkInfo* callLinkInfo = &stackFrame.callFrame->callerFrame()->codeBlock()->getCallLinkInfo(callFrame->returnPC());
+
+ if (!callLinkInfo->seenOnce())
+ callLinkInfo->setSeen();
+ else
+ JIT::linkConstruct(callee, stackFrame.callFrame->callerFrame()->codeBlock(), codeBlock, codePtr, callLinkInfo, callFrame->argumentCountIncludingThis(), stackFrame.globalData);
+
+ return codePtr.executableAddress();
+}
+#endif // !ENABLE(JIT_OPTIMIZE_CALL)
+
+DEFINE_STUB_FUNCTION(JSObject*, op_push_activation)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSActivation* activation = new (stackFrame.globalData) JSActivation(stackFrame.callFrame, static_cast<FunctionExecutable*>(stackFrame.callFrame->codeBlock()->ownerExecutable()));
+ stackFrame.callFrame->setScopeChain(stackFrame.callFrame->scopeChain()->push(activation));
+ return activation;
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_NotJSFunction)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue funcVal = stackFrame.args[0].jsValue();
+
+ CallData callData;
+ CallType callType = getCallData(funcVal, callData);
+
+ ASSERT(callType != CallTypeJS);
+
+ if (callType == CallTypeHost) {
+ int registerOffset = stackFrame.args[1].int32();
+ int argCount = stackFrame.args[2].int32();
+ CallFrame* previousCallFrame = stackFrame.callFrame;
+ CallFrame* callFrame = CallFrame::create(previousCallFrame->registers() + registerOffset);
+ if (!stackFrame.registerFile->grow(callFrame->registers())) {
+ throwStackOverflowError(previousCallFrame, stackFrame.globalData, callFrame->returnPC(), STUB_RETURN_ADDRESS);
+ VM_THROW_EXCEPTION();
+ }
+
+ callFrame->init(0, static_cast<Instruction*>((STUB_RETURN_ADDRESS).value()), previousCallFrame->scopeChain(), previousCallFrame, argCount, asObject(funcVal));
+
+ EncodedJSValue returnValue;
+ {
+ SamplingTool::HostCallRecord callRecord(CTI_SAMPLER);
+ returnValue = callData.native.function(callFrame);
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return returnValue;
+ }
+
+ ASSERT(callType == CallTypeNone);
+
+ stackFrame.globalData->exception = createNotAFunctionError(stackFrame.callFrame, funcVal);
+ VM_THROW_EXCEPTION();
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_create_arguments)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ Arguments* arguments = new (stackFrame.globalData) Arguments(stackFrame.callFrame);
+ return JSValue::encode(JSValue(arguments));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_create_arguments_no_params)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ Arguments* arguments = new (stackFrame.globalData) Arguments(stackFrame.callFrame, Arguments::NoParameters);
+ return JSValue::encode(JSValue(arguments));
+}
+
+DEFINE_STUB_FUNCTION(void, op_tear_off_activation)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ ASSERT(stackFrame.callFrame->codeBlock()->needsFullScopeChain());
+ JSValue activationValue = stackFrame.args[0].jsValue();
+ if (!activationValue) {
+ if (JSValue v = stackFrame.args[1].jsValue()) {
+ if (!stackFrame.callFrame->codeBlock()->isStrictMode())
+ asArguments(v)->copyRegisters(*stackFrame.globalData);
+ }
+ return;
+ }
+ JSActivation* activation = asActivation(stackFrame.args[0].jsValue());
+ activation->copyRegisters(*stackFrame.globalData);
+ if (JSValue v = stackFrame.args[1].jsValue()) {
+ if (!stackFrame.callFrame->codeBlock()->isStrictMode())
+ asArguments(v)->setActivation(*stackFrame.globalData, activation);
+ }
+}
+
+DEFINE_STUB_FUNCTION(void, op_tear_off_arguments)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ ASSERT(stackFrame.callFrame->codeBlock()->usesArguments() && !stackFrame.callFrame->codeBlock()->needsFullScopeChain());
+ asArguments(stackFrame.args[0].jsValue())->copyRegisters(*stackFrame.globalData);
+}
+
+DEFINE_STUB_FUNCTION(void, op_profile_will_call)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ ASSERT(*stackFrame.enabledProfilerReference);
+ (*stackFrame.enabledProfilerReference)->willExecute(stackFrame.callFrame, stackFrame.args[0].jsValue());
+}
+
+DEFINE_STUB_FUNCTION(void, op_profile_did_call)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ ASSERT(*stackFrame.enabledProfilerReference);
+ (*stackFrame.enabledProfilerReference)->didExecute(stackFrame.callFrame, stackFrame.args[0].jsValue());
+}
+
+DEFINE_STUB_FUNCTION(JSObject*, op_new_array)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ ArgList argList(&stackFrame.callFrame->registers()[stackFrame.args[0].int32()], stackFrame.args[1].int32());
+ return constructArray(stackFrame.callFrame, argList);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ ScopeChainNode* scopeChain = callFrame->scopeChain();
+
+ ScopeChainIterator iter = scopeChain->begin();
+ ScopeChainIterator end = scopeChain->end();
+ ASSERT(iter != end);
+
+ Identifier& ident = stackFrame.args[0].identifier();
+ do {
+ JSObject* o = iter->get();
+ PropertySlot slot(o);
+ if (o->getPropertySlot(callFrame, ident, slot)) {
+ JSValue result = slot.getValue(callFrame, ident);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+ }
+ } while (++iter != end);
+
+ stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident);
+ VM_THROW_EXCEPTION();
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_construct_NotJSConstruct)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue constrVal = stackFrame.args[0].jsValue();
+
+ ConstructData constructData;
+ ConstructType constructType = getConstructData(constrVal, constructData);
+
+ ASSERT(constructType != ConstructTypeJS);
+
+ if (constructType == ConstructTypeHost) {
+ int registerOffset = stackFrame.args[1].int32();
+ int argCount = stackFrame.args[2].int32();
+ CallFrame* previousCallFrame = stackFrame.callFrame;
+ CallFrame* callFrame = CallFrame::create(previousCallFrame->registers() + registerOffset);
+ if (!stackFrame.registerFile->grow(callFrame->registers())) {
+ throwStackOverflowError(previousCallFrame, stackFrame.globalData, callFrame->returnPC(), STUB_RETURN_ADDRESS);
+ VM_THROW_EXCEPTION();
+ }
+
+ callFrame->init(0, static_cast<Instruction*>((STUB_RETURN_ADDRESS).value()), previousCallFrame->scopeChain(), previousCallFrame, argCount, asObject(constrVal));
+
+ EncodedJSValue returnValue;
+ {
+ SamplingTool::HostCallRecord callRecord(CTI_SAMPLER);
+ returnValue = constructData.native.function(callFrame);
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return returnValue;
+ }
+
+ ASSERT(constructType == ConstructTypeNone);
+
+ stackFrame.globalData->exception = createNotAConstructorError(stackFrame.callFrame, constrVal);
+ VM_THROW_EXCEPTION();
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSGlobalData* globalData = stackFrame.globalData;
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ JSValue subscript = stackFrame.args[1].jsValue();
+
+ if (LIKELY(baseValue.isCell() && subscript.isString())) {
+ Identifier propertyName(callFrame, asString(subscript)->value(callFrame));
+ PropertySlot slot(baseValue.asCell());
+ // JSString::value may have thrown, but we shouldn't find a property with a null identifier,
+ // so we should miss this case and wind up in the CHECK_FOR_EXCEPTION_AT_END, below.
+ if (baseValue.asCell()->fastGetOwnPropertySlot(callFrame, propertyName, slot)) {
+ JSValue result = slot.getValue(callFrame, propertyName);
+ CHECK_FOR_EXCEPTION();
+ return JSValue::encode(result);
+ }
+ }
+
+ if (subscript.isUInt32()) {
+ uint32_t i = subscript.asUInt32();
+ if (isJSString(globalData, baseValue) && asString(baseValue)->canGetIndex(i)) {
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val_string));
+ JSValue result = asString(baseValue)->getIndex(callFrame, i);
+ CHECK_FOR_EXCEPTION();
+ return JSValue::encode(result);
+ }
+ if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
+ // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val_byte_array));
+ return JSValue::encode(asByteArray(baseValue)->getIndex(callFrame, i));
+ }
+ JSValue result = baseValue.get(callFrame, i);
+ CHECK_FOR_EXCEPTION();
+ return JSValue::encode(result);
+ }
+
+ Identifier property(callFrame, subscript.toString(callFrame));
+ JSValue result = baseValue.get(callFrame, property);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val_string)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSGlobalData* globalData = stackFrame.globalData;
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ JSValue subscript = stackFrame.args[1].jsValue();
+
+ JSValue result;
+
+ if (LIKELY(subscript.isUInt32())) {
+ uint32_t i = subscript.asUInt32();
+ if (isJSString(globalData, baseValue) && asString(baseValue)->canGetIndex(i))
+ result = asString(baseValue)->getIndex(callFrame, i);
+ else {
+ result = baseValue.get(callFrame, i);
+ if (!isJSString(globalData, baseValue))
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val));
+ }
+ } else {
+ Identifier property(callFrame, subscript.toString(callFrame));
+ result = baseValue.get(callFrame, property);
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val_byte_array)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSGlobalData* globalData = stackFrame.globalData;
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ JSValue subscript = stackFrame.args[1].jsValue();
+
+ JSValue result;
+
+ if (LIKELY(subscript.isUInt32())) {
+ uint32_t i = subscript.asUInt32();
+ if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
+ // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
+ return JSValue::encode(asByteArray(baseValue)->getIndex(callFrame, i));
+ }
+
+ result = baseValue.get(callFrame, i);
+ if (!isJSByteArray(globalData, baseValue))
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val));
+ } else {
+ Identifier property(callFrame, subscript.toString(callFrame));
+ result = baseValue.get(callFrame, property);
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_sub)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+ double left;
+ double right;
+ if (src1.getNumber(left) && src2.getNumber(right))
+ return JSValue::encode(jsNumber(left - right));
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(src1.toNumber(callFrame) - src2.toNumber(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_by_val)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSGlobalData* globalData = stackFrame.globalData;
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ JSValue subscript = stackFrame.args[1].jsValue();
+ JSValue value = stackFrame.args[2].jsValue();
+
+ if (LIKELY(subscript.isUInt32())) {
+ uint32_t i = subscript.asUInt32();
+ if (isJSArray(globalData, baseValue)) {
+ JSArray* jsArray = asArray(baseValue);
+ if (jsArray->canSetIndex(i))
+ jsArray->setIndex(*globalData, i, value);
+ else
+ jsArray->JSArray::put(callFrame, i, value);
+ } else if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
+ JSByteArray* jsByteArray = asByteArray(baseValue);
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_put_by_val_byte_array));
+ // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
+ if (value.isInt32()) {
+ jsByteArray->setIndex(i, value.asInt32());
+ return;
+ } else {
+ double dValue = 0;
+ if (value.getNumber(dValue)) {
+ jsByteArray->setIndex(i, dValue);
+ return;
+ }
+ }
+
+ baseValue.put(callFrame, i, value);
+ } else
+ baseValue.put(callFrame, i, value);
+ } else {
+ Identifier property(callFrame, subscript.toString(callFrame));
+ if (!stackFrame.globalData->exception) { // Don't put to an object if toString threw an exception.
+ PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
+ baseValue.put(callFrame, property, value, slot);
+ }
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_by_val_byte_array)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSGlobalData* globalData = stackFrame.globalData;
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ JSValue subscript = stackFrame.args[1].jsValue();
+ JSValue value = stackFrame.args[2].jsValue();
+
+ if (LIKELY(subscript.isUInt32())) {
+ uint32_t i = subscript.asUInt32();
+ if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
+ JSByteArray* jsByteArray = asByteArray(baseValue);
+
+ // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
+ if (value.isInt32()) {
+ jsByteArray->setIndex(i, value.asInt32());
+ return;
+ } else {
+ double dValue = 0;
+ if (value.getNumber(dValue)) {
+ jsByteArray->setIndex(i, dValue);
+ return;
+ }
+ }
+ }
+
+ if (!isJSByteArray(globalData, baseValue))
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_put_by_val));
+ baseValue.put(callFrame, i, value);
+ } else {
+ Identifier property(callFrame, subscript.toString(callFrame));
+ if (!stackFrame.globalData->exception) { // Don't put to an object if toString threw an exception.
+ PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
+ baseValue.put(callFrame, property, value, slot);
+ }
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_lesseq)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsBoolean(jsLessEq(callFrame, stackFrame.args[0].jsValue(), stackFrame.args[1].jsValue()));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(int, op_load_varargs)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ RegisterFile* registerFile = stackFrame.registerFile;
+ int argsOffset = stackFrame.args[0].int32();
+ JSValue arguments = callFrame->registers()[argsOffset].jsValue();
+ uint32_t argCount = 0;
+ if (!arguments) {
+ int providedParams = callFrame->registers()[RegisterFile::ArgumentCount].i() - 1;
+ argCount = providedParams;
+ argCount = min(argCount, static_cast<uint32_t>(Arguments::MaxArguments));
+ int32_t sizeDelta = argsOffset + argCount + RegisterFile::CallFrameHeaderSize;
+ Register* newEnd = callFrame->registers() + sizeDelta;
+ if (!registerFile->grow(newEnd) || ((newEnd - callFrame->registers()) != sizeDelta)) {
+ stackFrame.globalData->exception = createStackOverflowError(callFrame);
+ VM_THROW_EXCEPTION();
+ }
+ int32_t expectedParams = asFunction(callFrame->callee())->jsExecutable()->parameterCount();
+ int32_t inplaceArgs = min(providedParams, expectedParams);
+
+ Register* inplaceArgsDst = callFrame->registers() + argsOffset;
+
+ Register* inplaceArgsEnd = inplaceArgsDst + inplaceArgs;
+ Register* inplaceArgsEnd2 = inplaceArgsDst + providedParams;
+
+ Register* inplaceArgsSrc = callFrame->registers() - RegisterFile::CallFrameHeaderSize - expectedParams;
+ Register* inplaceArgsSrc2 = inplaceArgsSrc - providedParams - 1 + inplaceArgs;
+
+ // First step is to copy the "expected" parameters from their normal location relative to the callframe
+ while (inplaceArgsDst < inplaceArgsEnd)
+ *inplaceArgsDst++ = *inplaceArgsSrc++;
+
+ // Then we copy any additional arguments that may be further up the stack ('-1' to account for 'this')
+ while (inplaceArgsDst < inplaceArgsEnd2)
+ *inplaceArgsDst++ = *inplaceArgsSrc2++;
+
+ } else if (!arguments.isUndefinedOrNull()) {
+ if (!arguments.isObject()) {
+ stackFrame.globalData->exception = createInvalidParamError(callFrame, "Function.prototype.apply", arguments);
+ VM_THROW_EXCEPTION();
+ }
+ if (asObject(arguments)->classInfo() == &Arguments::s_info) {
+ Arguments* argsObject = asArguments(arguments);
+ argCount = argsObject->numProvidedArguments(callFrame);
+ argCount = min(argCount, static_cast<uint32_t>(Arguments::MaxArguments));
+ int32_t sizeDelta = argsOffset + argCount + RegisterFile::CallFrameHeaderSize;
+ Register* newEnd = callFrame->registers() + sizeDelta;
+ if (!registerFile->grow(newEnd) || ((newEnd - callFrame->registers()) != sizeDelta)) {
+ stackFrame.globalData->exception = createStackOverflowError(callFrame);
+ VM_THROW_EXCEPTION();
+ }
+ argsObject->copyToRegisters(callFrame, callFrame->registers() + argsOffset, argCount);
+ } else if (isJSArray(&callFrame->globalData(), arguments)) {
+ JSArray* array = asArray(arguments);
+ argCount = array->length();
+ argCount = min(argCount, static_cast<uint32_t>(Arguments::MaxArguments));
+ int32_t sizeDelta = argsOffset + argCount + RegisterFile::CallFrameHeaderSize;
+ Register* newEnd = callFrame->registers() + sizeDelta;
+ if (!registerFile->grow(newEnd) || ((newEnd - callFrame->registers()) != sizeDelta)) {
+ stackFrame.globalData->exception = createStackOverflowError(callFrame);
+ VM_THROW_EXCEPTION();
+ }
+ array->copyToRegisters(callFrame, callFrame->registers() + argsOffset, argCount);
+ } else if (asObject(arguments)->inherits(&JSArray::s_info)) {
+ JSObject* argObject = asObject(arguments);
+ argCount = argObject->get(callFrame, callFrame->propertyNames().length).toUInt32(callFrame);
+ argCount = min(argCount, static_cast<uint32_t>(Arguments::MaxArguments));
+ int32_t sizeDelta = argsOffset + argCount + RegisterFile::CallFrameHeaderSize;
+ Register* newEnd = callFrame->registers() + sizeDelta;
+ if (!registerFile->grow(newEnd) || ((newEnd - callFrame->registers()) != sizeDelta)) {
+ stackFrame.globalData->exception = createStackOverflowError(callFrame);
+ VM_THROW_EXCEPTION();
+ }
+ Register* argsBuffer = callFrame->registers() + argsOffset;
+ for (unsigned i = 0; i < argCount; ++i) {
+ argsBuffer[i] = asObject(arguments)->get(callFrame, i);
+ CHECK_FOR_EXCEPTION();
+ }
+ } else {
+ stackFrame.globalData->exception = createInvalidParamError(callFrame, "Function.prototype.apply", arguments);
+ VM_THROW_EXCEPTION();
+ }
+ }
+
+ return argCount + 1;
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_negate)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src = stackFrame.args[0].jsValue();
+
+ double v;
+ if (src.getNumber(v))
+ return JSValue::encode(jsNumber(-v));
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(-src.toNumber(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_base)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return JSValue::encode(JSC::resolveBase(stackFrame.callFrame, stackFrame.args[0].identifier(), stackFrame.callFrame->scopeChain(), false));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_base_strict_put)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ JSValue base = JSC::resolveBase(stackFrame.callFrame, stackFrame.args[0].identifier(), stackFrame.callFrame->scopeChain(), true);
+ if (!base) {
+ stackFrame.globalData->exception = createErrorForInvalidGlobalAssignment(stackFrame.callFrame, stackFrame.args[0].identifier().ustring());
+ VM_THROW_EXCEPTION();
+ }
+ return JSValue::encode(base);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_ensure_property_exists)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ JSValue base = stackFrame.callFrame->r(stackFrame.args[0].int32()).jsValue();
+ JSObject* object = asObject(base);
+ PropertySlot slot(object);
+ ASSERT(stackFrame.callFrame->codeBlock()->isStrictMode());
+ if (!object->getPropertySlot(stackFrame.callFrame, stackFrame.args[1].identifier(), slot)) {
+ stackFrame.globalData->exception = createErrorForInvalidGlobalAssignment(stackFrame.callFrame, stackFrame.args[1].identifier().ustring());
+ VM_THROW_EXCEPTION();
+ }
+
+ return JSValue::encode(base);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_skip)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ ScopeChainNode* scopeChain = callFrame->scopeChain();
+
+ int skip = stackFrame.args[1].int32();
+
+ ScopeChainIterator iter = scopeChain->begin();
+ ScopeChainIterator end = scopeChain->end();
+ ASSERT(iter != end);
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ bool checkTopLevel = codeBlock->codeType() == FunctionCode && codeBlock->needsFullScopeChain();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ if (callFrame->uncheckedR(codeBlock->activationRegister()).jsValue())
+ ++iter;
+ }
+ while (skip--) {
+ ++iter;
+ ASSERT(iter != end);
+ }
+ Identifier& ident = stackFrame.args[0].identifier();
+ do {
+ JSObject* o = iter->get();
+ PropertySlot slot(o);
+ if (o->getPropertySlot(callFrame, ident, slot)) {
+ JSValue result = slot.getValue(callFrame, ident);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+ }
+ } while (++iter != end);
+
+ stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident);
+ VM_THROW_EXCEPTION();
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_global)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ JSGlobalObject* globalObject = codeBlock->globalObject();
+ Identifier& ident = stackFrame.args[0].identifier();
+ unsigned globalResolveInfoIndex = stackFrame.args[1].int32();
+ ASSERT(globalObject->isGlobalObject());
+
+ PropertySlot slot(globalObject);
+ if (globalObject->getPropertySlot(callFrame, ident, slot)) {
+ JSValue result = slot.getValue(callFrame, ident);
+ if (slot.isCacheableValue() && !globalObject->structure()->isUncacheableDictionary() && slot.slotBase() == globalObject) {
+ GlobalResolveInfo& globalResolveInfo = codeBlock->globalResolveInfo(globalResolveInfoIndex);
+ globalResolveInfo.structure.set(callFrame->globalData(), codeBlock->ownerExecutable(), globalObject->structure());
+ globalResolveInfo.offset = slot.cachedOffset();
+ return JSValue::encode(result);
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+ }
+
+ stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident);
+ VM_THROW_EXCEPTION();
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_div)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+ double left;
+ double right;
+ if (src1.getNumber(left) && src2.getNumber(right))
+ return JSValue::encode(jsNumber(left / right));
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(src1.toNumber(callFrame) / src2.toNumber(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_pre_dec)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue v = stackFrame.args[0].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(v.toNumber(callFrame) - 1);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(int, op_jless)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ bool result = jsLess(callFrame, src1, src2);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+DEFINE_STUB_FUNCTION(int, op_jlesseq)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ bool result = jsLessEq(callFrame, src1, src2);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_not)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src = stackFrame.args[0].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSValue result = jsBoolean(!src.toBoolean(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(int, op_jtrue)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ bool result = src1.toBoolean(callFrame);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_post_inc)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue v = stackFrame.args[0].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSValue number = v.toJSNumber(callFrame);
+ CHECK_FOR_EXCEPTION_AT_END();
+
+ callFrame->registers()[stackFrame.args[1].int32()] = jsNumber(number.uncheckedGetNumber() + 1);
+ return JSValue::encode(number);
+}
+
+DEFINE_STUB_FUNCTION(int, op_eq)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+#if USE(JSVALUE32_64)
+ start:
+ if (src2.isUndefined()) {
+ return src1.isNull() ||
+ (src1.isCell() && src1.asCell()->structure()->typeInfo().masqueradesAsUndefined())
+ || src1.isUndefined();
+ }
+
+ if (src2.isNull()) {
+ return src1.isUndefined() ||
+ (src1.isCell() && src1.asCell()->structure()->typeInfo().masqueradesAsUndefined())
+ || src1.isNull();
+ }
+
+ if (src1.isInt32()) {
+ if (src2.isDouble())
+ return src1.asInt32() == src2.asDouble();
+ double d = src2.toNumber(stackFrame.callFrame);
+ CHECK_FOR_EXCEPTION();
+ return src1.asInt32() == d;
+ }
+
+ if (src1.isDouble()) {
+ if (src2.isInt32())
+ return src1.asDouble() == src2.asInt32();
+ double d = src2.toNumber(stackFrame.callFrame);
+ CHECK_FOR_EXCEPTION();
+ return src1.asDouble() == d;
+ }
+
+ if (src1.isTrue()) {
+ if (src2.isFalse())
+ return false;
+ double d = src2.toNumber(stackFrame.callFrame);
+ CHECK_FOR_EXCEPTION();
+ return d == 1.0;
+ }
+
+ if (src1.isFalse()) {
+ if (src2.isTrue())
+ return false;
+ double d = src2.toNumber(stackFrame.callFrame);
+ CHECK_FOR_EXCEPTION();
+ return d == 0.0;
+ }
+
+ if (src1.isUndefined())
+ return src2.isCell() && src2.asCell()->structure()->typeInfo().masqueradesAsUndefined();
+
+ if (src1.isNull())
+ return src2.isCell() && src2.asCell()->structure()->typeInfo().masqueradesAsUndefined();
+
+ JSCell* cell1 = src1.asCell();
+
+ if (cell1->isString()) {
+ if (src2.isInt32())
+ return jsToNumber(static_cast<JSString*>(cell1)->value(stackFrame.callFrame)) == src2.asInt32();
+
+ if (src2.isDouble())
+ return jsToNumber(static_cast<JSString*>(cell1)->value(stackFrame.callFrame)) == src2.asDouble();
+
+ if (src2.isTrue())
+ return jsToNumber(static_cast<JSString*>(cell1)->value(stackFrame.callFrame)) == 1.0;
+
+ if (src2.isFalse())
+ return jsToNumber(static_cast<JSString*>(cell1)->value(stackFrame.callFrame)) == 0.0;
+
+ JSCell* cell2 = src2.asCell();
+ if (cell2->isString())
+ return static_cast<JSString*>(cell1)->value(stackFrame.callFrame) == static_cast<JSString*>(cell2)->value(stackFrame.callFrame);
+
+ src2 = asObject(cell2)->toPrimitive(stackFrame.callFrame);
+ CHECK_FOR_EXCEPTION();
+ goto start;
+ }
+
+ if (src2.isObject())
+ return asObject(cell1) == asObject(src2);
+ src1 = asObject(cell1)->toPrimitive(stackFrame.callFrame);
+ CHECK_FOR_EXCEPTION();
+ goto start;
+
+#else // USE(JSVALUE32_64)
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ bool result = JSValue::equalSlowCaseInline(callFrame, src1, src2);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+#endif // USE(JSVALUE32_64)
+}
+
+DEFINE_STUB_FUNCTION(int, op_eq_strings)
+{
+#if USE(JSVALUE32_64)
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSString* string1 = stackFrame.args[0].jsString();
+ JSString* string2 = stackFrame.args[1].jsString();
+
+ ASSERT(string1->isString());
+ ASSERT(string2->isString());
+ return string1->value(stackFrame.callFrame) == string2->value(stackFrame.callFrame);
+#else
+ UNUSED_PARAM(args);
+ ASSERT_NOT_REACHED();
+ return 0;
+#endif
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_lshift)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue val = stackFrame.args[0].jsValue();
+ JSValue shift = stackFrame.args[1].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber((val.toInt32(callFrame)) << (shift.toUInt32(callFrame) & 0x1f));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitand)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+ ASSERT(!src1.isInt32() || !src2.isInt32());
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(src1.toInt32(callFrame) & src2.toInt32(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_rshift)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue val = stackFrame.args[0].jsValue();
+ JSValue shift = stackFrame.args[1].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber((val.toInt32(callFrame)) >> (shift.toUInt32(callFrame) & 0x1f));
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitnot)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src = stackFrame.args[0].jsValue();
+
+ ASSERT(!src.isInt32());
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(~src.toInt32(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_with_base)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ ScopeChainNode* scopeChain = callFrame->scopeChain();
+
+ ScopeChainIterator iter = scopeChain->begin();
+ ScopeChainIterator end = scopeChain->end();
+
+ // FIXME: add scopeDepthIsZero optimization
+
+ ASSERT(iter != end);
+
+ Identifier& ident = stackFrame.args[0].identifier();
+ JSObject* base;
+ do {
+ base = iter->get();
+ PropertySlot slot(base);
+ if (base->getPropertySlot(callFrame, ident, slot)) {
+ JSValue result = slot.getValue(callFrame, ident);
+ CHECK_FOR_EXCEPTION_AT_END();
+
+ callFrame->registers()[stackFrame.args[1].int32()] = JSValue(base);
+ return JSValue::encode(result);
+ }
+ ++iter;
+ } while (iter != end);
+
+ stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident);
+ VM_THROW_EXCEPTION_AT_END();
+ return JSValue::encode(JSValue());
+}
+
+DEFINE_STUB_FUNCTION(JSObject*, op_new_func_exp)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ FunctionExecutable* function = stackFrame.args[0].function();
+ JSFunction* func = function->make(callFrame, callFrame->scopeChain());
+ ASSERT(callFrame->codeBlock()->codeType() != FunctionCode || !callFrame->codeBlock()->needsFullScopeChain() || callFrame->uncheckedR(callFrame->codeBlock()->activationRegister()).jsValue());
+
+ /*
+ The Identifier in a FunctionExpression can be referenced from inside
+ the FunctionExpression's FunctionBody to allow the function to call
+ itself recursively. However, unlike in a FunctionDeclaration, the
+ Identifier in a FunctionExpression cannot be referenced from and
+ does not affect the scope enclosing the FunctionExpression.
+ */
+ if (!function->name().isNull()) {
+ JSStaticScopeObject* functionScopeObject = new (callFrame) JSStaticScopeObject(callFrame, function->name(), func, ReadOnly | DontDelete);
+ func->setScope(callFrame->globalData(), func->scope()->push(functionScopeObject));
+ }
+
+ return func;
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_mod)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue dividendValue = stackFrame.args[0].jsValue();
+ JSValue divisorValue = stackFrame.args[1].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ double d = dividendValue.toNumber(callFrame);
+ JSValue result = jsNumber(fmod(d, divisorValue.toNumber(callFrame)));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_less)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsBoolean(jsLess(callFrame, stackFrame.args[0].jsValue(), stackFrame.args[1].jsValue()));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_post_dec)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue v = stackFrame.args[0].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSValue number = v.toJSNumber(callFrame);
+ CHECK_FOR_EXCEPTION_AT_END();
+
+ callFrame->registers()[stackFrame.args[1].int32()] = jsNumber(number.uncheckedGetNumber() - 1);
+ return JSValue::encode(number);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_urshift)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue val = stackFrame.args[0].jsValue();
+ JSValue shift = stackFrame.args[1].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber((val.toUInt32(callFrame)) >> (shift.toUInt32(callFrame) & 0x1f));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitxor)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSValue result = jsNumber(src1.toInt32(callFrame) ^ src2.toInt32(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(JSObject*, op_new_regexp)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ RegExp* regExp = stackFrame.args[0].regExp();
+ if (!regExp->isValid()) {
+ stackFrame.globalData->exception = createSyntaxError(callFrame, "Invalid flags supplied to RegExp constructor.");
+ VM_THROW_EXCEPTION();
+ }
+
+ return new (stackFrame.globalData) RegExpObject(stackFrame.callFrame->lexicalGlobalObject(), stackFrame.callFrame->lexicalGlobalObject()->regExpStructure(), regExp);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitor)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSValue result = jsNumber(src1.toInt32(callFrame) | src2.toInt32(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_eval)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ ASSERT(stackFrame.callFrame->codeBlock()->codeType() != FunctionCode || !stackFrame.callFrame->codeBlock()->needsFullScopeChain() || stackFrame.callFrame->uncheckedR(stackFrame.callFrame->codeBlock()->activationRegister()).jsValue());
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ RegisterFile* registerFile = stackFrame.registerFile;
+
+ Interpreter* interpreter = stackFrame.globalData->interpreter;
+
+ JSValue funcVal = stackFrame.args[0].jsValue();
+ int registerOffset = stackFrame.args[1].int32();
+ int argCount = stackFrame.args[2].int32();
+
+ Register* newCallFrame = callFrame->registers() + registerOffset;
+ Register* argv = newCallFrame - RegisterFile::CallFrameHeaderSize - argCount;
+ JSValue baseValue = argv[0].jsValue();
+ JSGlobalObject* globalObject = callFrame->scopeChain()->globalObject.get();
+
+ if (baseValue == globalObject && funcVal == globalObject->evalFunction()) {
+ JSValue result = interpreter->callEval(callFrame, registerFile, argv, argCount, registerOffset);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+ }
+
+ return JSValue::encode(JSValue());
+}
+
+DEFINE_STUB_FUNCTION(void*, op_throw)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ ExceptionHandler handler = jitThrow(stackFrame.globalData, stackFrame.callFrame, stackFrame.args[0].jsValue(), STUB_RETURN_ADDRESS);
+ STUB_SET_RETURN_ADDRESS(handler.catchRoutine);
+ return handler.callFrame;
+}
+
+DEFINE_STUB_FUNCTION(JSPropertyNameIterator*, op_get_pnames)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSObject* o = stackFrame.args[0].jsObject();
+ Structure* structure = o->structure();
+ JSPropertyNameIterator* jsPropertyNameIterator = structure->enumerationCache();
+ if (!jsPropertyNameIterator || jsPropertyNameIterator->cachedPrototypeChain() != structure->prototypeChain(callFrame))
+ jsPropertyNameIterator = JSPropertyNameIterator::create(callFrame, o);
+ return jsPropertyNameIterator;
+}
+
+DEFINE_STUB_FUNCTION(int, has_property)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSObject* base = stackFrame.args[0].jsObject();
+ JSString* property = stackFrame.args[1].jsString();
+ int result = base->hasProperty(stackFrame.callFrame, Identifier(stackFrame.callFrame, property->value(stackFrame.callFrame)));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+DEFINE_STUB_FUNCTION(JSObject*, op_push_scope)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSObject* o = stackFrame.args[0].jsValue().toObject(stackFrame.callFrame);
+ CHECK_FOR_EXCEPTION();
+ stackFrame.callFrame->setScopeChain(stackFrame.callFrame->scopeChain()->push(o));
+ return o;
+}
+
+DEFINE_STUB_FUNCTION(void, op_pop_scope)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ stackFrame.callFrame->setScopeChain(stackFrame.callFrame->scopeChain()->pop());
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_typeof)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return JSValue::encode(jsTypeStringForValue(stackFrame.callFrame, stackFrame.args[0].jsValue()));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_undefined)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue v = stackFrame.args[0].jsValue();
+ return JSValue::encode(jsBoolean(v.isCell() ? v.asCell()->structure()->typeInfo().masqueradesAsUndefined() : v.isUndefined()));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_boolean)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return JSValue::encode(jsBoolean(stackFrame.args[0].jsValue().isBoolean()));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_number)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return JSValue::encode(jsBoolean(stackFrame.args[0].jsValue().isNumber()));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_string)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return JSValue::encode(jsBoolean(isJSString(stackFrame.globalData, stackFrame.args[0].jsValue())));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_object)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return JSValue::encode(jsBoolean(jsIsObjectType(stackFrame.args[0].jsValue())));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_function)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return JSValue::encode(jsBoolean(jsIsFunctionType(stackFrame.args[0].jsValue())));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_stricteq)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+ bool result = JSValue::strictEqual(stackFrame.callFrame, src1, src2);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(jsBoolean(result));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_to_primitive)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return JSValue::encode(stackFrame.args[0].jsValue().toPrimitive(stackFrame.callFrame));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_strcat)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue result = jsString(stackFrame.callFrame, &stackFrame.callFrame->registers()[stackFrame.args[0].int32()], stackFrame.args[1].int32());
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_nstricteq)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+ bool result = !JSValue::strictEqual(stackFrame.callFrame, src1, src2);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(jsBoolean(result));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_to_jsnumber)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src = stackFrame.args[0].jsValue();
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSValue result = src.toJSNumber(callFrame);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_in)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue baseVal = stackFrame.args[1].jsValue();
+
+ if (!baseVal.isObject()) {
+ stackFrame.globalData->exception = createInvalidParamError(stackFrame.callFrame, "in", baseVal);
+ VM_THROW_EXCEPTION();
+ }
+
+ JSValue propName = stackFrame.args[0].jsValue();
+ JSObject* baseObj = asObject(baseVal);
+
+ uint32_t i;
+ if (propName.getUInt32(i))
+ return JSValue::encode(jsBoolean(baseObj->hasProperty(callFrame, i)));
+
+ Identifier property(callFrame, propName.toString(callFrame));
+ CHECK_FOR_EXCEPTION();
+ return JSValue::encode(jsBoolean(baseObj->hasProperty(callFrame, property)));
+}
+
+DEFINE_STUB_FUNCTION(JSObject*, op_push_new_scope)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSObject* scope = new (stackFrame.globalData) JSStaticScopeObject(stackFrame.callFrame, stackFrame.args[0].identifier(), stackFrame.args[1].jsValue(), DontDelete);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ callFrame->setScopeChain(callFrame->scopeChain()->push(scope));
+ return scope;
+}
+
+DEFINE_STUB_FUNCTION(void, op_jmp_scopes)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ unsigned count = stackFrame.args[0].int32();
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ ScopeChainNode* tmp = callFrame->scopeChain();
+ while (count--)
+ tmp = tmp->pop();
+ callFrame->setScopeChain(tmp);
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_by_index)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ unsigned property = stackFrame.args[1].int32();
+
+ stackFrame.args[0].jsValue().put(callFrame, property, stackFrame.args[2].jsValue());
+}
+
+DEFINE_STUB_FUNCTION(void*, op_switch_imm)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue scrutinee = stackFrame.args[0].jsValue();
+ unsigned tableIndex = stackFrame.args[1].int32();
+ CallFrame* callFrame = stackFrame.callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+
+ if (scrutinee.isInt32())
+ return codeBlock->immediateSwitchJumpTable(tableIndex).ctiForValue(scrutinee.asInt32()).executableAddress();
+ else {
+ double value;
+ int32_t intValue;
+ if (scrutinee.getNumber(value) && ((intValue = static_cast<int32_t>(value)) == value))
+ return codeBlock->immediateSwitchJumpTable(tableIndex).ctiForValue(intValue).executableAddress();
+ else
+ return codeBlock->immediateSwitchJumpTable(tableIndex).ctiDefault.executableAddress();
+ }
+}
+
+DEFINE_STUB_FUNCTION(void*, op_switch_char)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue scrutinee = stackFrame.args[0].jsValue();
+ unsigned tableIndex = stackFrame.args[1].int32();
+ CallFrame* callFrame = stackFrame.callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+
+ void* result = codeBlock->characterSwitchJumpTable(tableIndex).ctiDefault.executableAddress();
+
+ if (scrutinee.isString()) {
+ StringImpl* value = asString(scrutinee)->value(callFrame).impl();
+ if (value->length() == 1)
+ result = codeBlock->characterSwitchJumpTable(tableIndex).ctiForValue(value->characters()[0]).executableAddress();
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+DEFINE_STUB_FUNCTION(void*, op_switch_string)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue scrutinee = stackFrame.args[0].jsValue();
+ unsigned tableIndex = stackFrame.args[1].int32();
+ CallFrame* callFrame = stackFrame.callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+
+ void* result = codeBlock->stringSwitchJumpTable(tableIndex).ctiDefault.executableAddress();
+
+ if (scrutinee.isString()) {
+ StringImpl* value = asString(scrutinee)->value(callFrame).impl();
+ result = codeBlock->stringSwitchJumpTable(tableIndex).ctiForValue(value).executableAddress();
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_del_by_val)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ JSObject* baseObj = baseValue.toObject(callFrame); // may throw
+
+ JSValue subscript = stackFrame.args[1].jsValue();
+ bool result;
+ uint32_t i;
+ if (subscript.getUInt32(i))
+ result = baseObj->deleteProperty(callFrame, i);
+ else {
+ CHECK_FOR_EXCEPTION();
+ Identifier property(callFrame, subscript.toString(callFrame));
+ CHECK_FOR_EXCEPTION();
+ result = baseObj->deleteProperty(callFrame, property);
+ }
+
+ if (!result && callFrame->codeBlock()->isStrictMode())
+ stackFrame.globalData->exception = createTypeError(stackFrame.callFrame, "Unable to delete property.");
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(jsBoolean(result));
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_getter)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ ASSERT(stackFrame.args[0].jsValue().isObject());
+ JSObject* baseObj = asObject(stackFrame.args[0].jsValue());
+ ASSERT(stackFrame.args[2].jsValue().isObject());
+ baseObj->defineGetter(callFrame, stackFrame.args[1].identifier(), asObject(stackFrame.args[2].jsValue()));
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_setter)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ ASSERT(stackFrame.args[0].jsValue().isObject());
+ JSObject* baseObj = asObject(stackFrame.args[0].jsValue());
+ ASSERT(stackFrame.args[2].jsValue().isObject());
+ baseObj->defineSetter(callFrame, stackFrame.args[1].identifier(), asObject(stackFrame.args[2].jsValue()));
+}
+
+DEFINE_STUB_FUNCTION(void, op_throw_reference_error)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ UString message = stackFrame.args[0].jsValue().toString(callFrame);
+ stackFrame.globalData->exception = createReferenceError(callFrame, message);
+ VM_THROW_EXCEPTION_AT_END();
+}
+
+DEFINE_STUB_FUNCTION(void, op_debug)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ int debugHookID = stackFrame.args[0].int32();
+ int firstLine = stackFrame.args[1].int32();
+ int lastLine = stackFrame.args[2].int32();
+
+ stackFrame.globalData->interpreter->debug(callFrame, static_cast<DebugHookID>(debugHookID), firstLine, lastLine);
+}
+
+DEFINE_STUB_FUNCTION(void*, vm_throw)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ JSGlobalData* globalData = stackFrame.globalData;
+ ExceptionHandler handler = jitThrow(globalData, stackFrame.callFrame, globalData->exception, globalData->exceptionLocation);
+ STUB_SET_RETURN_ADDRESS(handler.catchRoutine);
+ return handler.callFrame;
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, to_object)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ return JSValue::encode(stackFrame.args[0].jsValue().toObject(callFrame));
+}
+
+MacroAssemblerCodePtr JITThunks::ctiStub(JSGlobalData* globalData, ThunkGenerator generator)
+{
+ std::pair<CTIStubMap::iterator, bool> entry = m_ctiStubMap.add(generator, MacroAssemblerCodePtr());
+ if (entry.second)
+ entry.first->second = generator(globalData, m_executablePool.get());
+ return entry.first->second;
+}
+
+NativeExecutable* JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFunction function)
+{
+ std::pair<HostFunctionStubMap::iterator, bool> entry = m_hostFunctionStubMap->add(function, Weak<NativeExecutable>());
+ if (!*entry.first->second)
+ entry.first->second.set(*globalData, NativeExecutable::create(*globalData, JIT::compileCTINativeCall(globalData, m_executablePool, function), function, ctiNativeConstruct(), callHostFunctionAsConstructor));
+ return entry.first->second.get();
+}
+
+NativeExecutable* JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFunction function, ThunkGenerator generator)
+{
+ std::pair<HostFunctionStubMap::iterator, bool> entry = m_hostFunctionStubMap->add(function, Weak<NativeExecutable>());
+ if (!*entry.first->second) {
+ MacroAssemblerCodePtr code = globalData->canUseJIT() ? generator(globalData, m_executablePool.get()) : MacroAssemblerCodePtr();
+ entry.first->second.set(*globalData, NativeExecutable::create(*globalData, code, function, ctiNativeConstruct(), callHostFunctionAsConstructor));
+ }
+ return entry.first->second.get();
+}
+
+void JITThunks::clearHostFunctionStubs()
+{
+ m_hostFunctionStubMap.clear();
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITStubs.h b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITStubs.h
new file mode 100644
index 0000000..624b6be
--- /dev/null
+++ b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JITStubs.h
@@ -0,0 +1,440 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) Research In Motion Limited 2010. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITStubs_h
+#define JITStubs_h
+
+#include "CallData.h"
+#include "MacroAssemblerCodeRef.h"
+#include "Register.h"
+#include "ThunkGenerators.h"
+#include <wtf/HashMap.h>
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+ struct StructureStubInfo;
+
+ class CodeBlock;
+ class ExecutablePool;
+ class FunctionExecutable;
+ class Identifier;
+ class JSGlobalData;
+ class JSGlobalObject;
+ class JSObject;
+ class JSPropertyNameIterator;
+ class JSValue;
+ class JSValueEncodedAsPointer;
+ class NativeExecutable;
+ class Profiler;
+ class PropertySlot;
+ class PutPropertySlot;
+ class RegisterFile;
+ class RegExp;
+
+ template <typename T> class Weak;
+
+ union JITStubArg {
+ void* asPointer;
+ EncodedJSValue asEncodedJSValue;
+ int32_t asInt32;
+
+ JSValue jsValue() { return JSValue::decode(asEncodedJSValue); }
+ JSObject* jsObject() { return static_cast<JSObject*>(asPointer); }
+ Identifier& identifier() { return *static_cast<Identifier*>(asPointer); }
+ int32_t int32() { return asInt32; }
+ CodeBlock* codeBlock() { return static_cast<CodeBlock*>(asPointer); }
+ FunctionExecutable* function() { return static_cast<FunctionExecutable*>(asPointer); }
+ RegExp* regExp() { return static_cast<RegExp*>(asPointer); }
+ JSPropertyNameIterator* propertyNameIterator() { return static_cast<JSPropertyNameIterator*>(asPointer); }
+ JSGlobalObject* globalObject() { return static_cast<JSGlobalObject*>(asPointer); }
+ JSString* jsString() { return static_cast<JSString*>(asPointer); }
+ ReturnAddressPtr returnAddress() { return ReturnAddressPtr(asPointer); }
+ };
+
+ struct TrampolineStructure {
+ MacroAssemblerCodePtr ctiStringLengthTrampoline;
+ MacroAssemblerCodePtr ctiVirtualCallLink;
+ MacroAssemblerCodePtr ctiVirtualConstructLink;
+ MacroAssemblerCodePtr ctiVirtualCall;
+ MacroAssemblerCodePtr ctiVirtualConstruct;
+ MacroAssemblerCodePtr ctiNativeCall;
+ MacroAssemblerCodePtr ctiNativeConstruct;
+ MacroAssemblerCodePtr ctiSoftModulo;
+ };
+
+#if CPU(X86_64)
+ struct JITStackFrame {
+ void* reserved; // Unused
+ JITStubArg args[6];
+ void* padding[2]; // Maintain 32-byte stack alignment (possibly overkill).
+
+ void* code;
+ RegisterFile* registerFile;
+ CallFrame* callFrame;
+ void* unused1;
+ Profiler** enabledProfilerReference;
+ JSGlobalData* globalData;
+
+ void* savedRBX;
+ void* savedR15;
+ void* savedR14;
+ void* savedR13;
+ void* savedR12;
+ void* savedRBP;
+ void* savedRIP;
+
+ // When JIT code makes a call, it pushes its return address just below the rest of the stack.
+ ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; }
+ };
+#elif CPU(X86)
+#if COMPILER(MSVC) || (OS(WINDOWS) && COMPILER(GCC))
+#pragma pack(push)
+#pragma pack(4)
+#endif // COMPILER(MSVC) || (OS(WINDOWS) && COMPILER(GCC))
+ struct JITStackFrame {
+ void* reserved; // Unused
+ JITStubArg args[6];
+#if USE(JSVALUE32_64)
+ void* padding[2]; // Maintain 16-byte stack alignment.
+#endif
+
+ void* savedEBX;
+ void* savedEDI;
+ void* savedESI;
+ void* savedEBP;
+ void* savedEIP;
+
+ void* code;
+ RegisterFile* registerFile;
+ CallFrame* callFrame;
+ void* unused1;
+ Profiler** enabledProfilerReference;
+ JSGlobalData* globalData;
+
+ // When JIT code makes a call, it pushes its return address just below the rest of the stack.
+ ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; }
+ };
+#if COMPILER(MSVC) || (OS(WINDOWS) && COMPILER(GCC))
+#pragma pack(pop)
+#endif // COMPILER(MSVC) || (OS(WINDOWS) && COMPILER(GCC))
+#elif CPU(ARM_THUMB2)
+ struct JITStackFrame {
+ JITStubArg reserved; // Unused
+ JITStubArg args[6];
+#if USE(JSVALUE64)
+ void* padding; // Maintain 16-byte stack alignment.
+#endif
+
+ ReturnAddressPtr thunkReturnAddress;
+
+ void* preservedReturnAddress;
+ void* preservedR4;
+ void* preservedR5;
+ void* preservedR6;
+
+ // These arguments passed in r1..r3 (r0 contained the entry code pointed, which is not preserved)
+ RegisterFile* registerFile;
+ CallFrame* callFrame;
+ void* unused1;
+
+ // These arguments passed on the stack.
+ Profiler** enabledProfilerReference;
+ JSGlobalData* globalData;
+
+ ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
+ };
+#elif CPU(ARM_TRADITIONAL)
+#if COMPILER(MSVC)
+#pragma pack(push)
+#pragma pack(4)
+#endif // COMPILER(MSVC)
+ struct JITStackFrame {
+ JITStubArg padding; // Unused
+ JITStubArg args[7];
+
+ ReturnAddressPtr thunkReturnAddress;
+
+ void* preservedR4;
+ void* preservedR5;
+ void* preservedR6;
+ void* preservedR7;
+ void* preservedR8;
+ void* preservedLink;
+
+ RegisterFile* registerFile;
+ CallFrame* callFrame;
+ void* unused1;
+
+ // These arguments passed on the stack.
+ Profiler** enabledProfilerReference;
+ JSGlobalData* globalData;
+
+ // When JIT code makes a call, it pushes its return address just below the rest of the stack.
+ ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
+ };
+#if COMPILER(MSVC)
+#pragma pack(pop)
+#endif // COMPILER(MSVC)
+#elif CPU(MIPS)
+ struct JITStackFrame {
+ JITStubArg reserved; // Unused
+ JITStubArg args[6];
+
+#if USE(JSVALUE32_64)
+ void* padding; // Make the overall stack length 8-byte aligned.
+#endif
+
+ void* preservedGP; // store GP when using PIC code
+ void* preservedS0;
+ void* preservedS1;
+ void* preservedS2;
+ void* preservedReturnAddress;
+
+ ReturnAddressPtr thunkReturnAddress;
+
+ // These arguments passed in a1..a3 (a0 contained the entry code pointed, which is not preserved)
+ RegisterFile* registerFile;
+ CallFrame* callFrame;
+ void* unused1;
+
+ // These arguments passed on the stack.
+ Profiler** enabledProfilerReference;
+ JSGlobalData* globalData;
+
+ ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
+ };
+#elif CPU(SH4)
+ struct JITStackFrame {
+ JITStubArg padding; // Unused
+ JITStubArg args[6];
+
+ ReturnAddressPtr thunkReturnAddress;
+ void* savedR10;
+ void* savedR11;
+ void* savedR13;
+ void* savedRPR;
+ void* savedR14;
+ void* savedTimeoutReg;
+
+ RegisterFile* registerFile;
+ CallFrame* callFrame;
+ JSValue* exception;
+ Profiler** enabledProfilerReference;
+ JSGlobalData* globalData;
+
+ ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
+ };
+#else
+#error "JITStackFrame not defined for this platform."
+#endif
+
+#define JITSTACKFRAME_ARGS_INDEX (OBJECT_OFFSETOF(JITStackFrame, args) / sizeof(void*))
+
+#define STUB_ARGS_DECLARATION void** args
+#define STUB_ARGS (args)
+
+#if CPU(X86)
+ #if COMPILER(MSVC)
+ #define JIT_STUB __fastcall
+ #elif COMPILER(GCC)
+ #define JIT_STUB __attribute__ ((fastcall))
+ #else
+ #error "JIT_STUB function calls require fastcall conventions on x86, add appropriate directive/attribute here for your compiler!"
+ #endif
+#else
+ #define JIT_STUB
+#endif
+
+ extern "C" void ctiVMThrowTrampoline();
+ extern "C" void ctiOpThrowNotCaught();
+ extern "C" EncodedJSValue ctiTrampoline(void* code, RegisterFile*, CallFrame*, void* /*unused1*/, Profiler**, JSGlobalData*);
+
+ class JITThunks {
+ public:
+ JITThunks(JSGlobalData*);
+ ~JITThunks();
+
+ static void tryCacheGetByID(CallFrame*, CodeBlock*, ReturnAddressPtr returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot&, StructureStubInfo* stubInfo);
+ static void tryCachePutByID(CallFrame*, CodeBlock*, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot&, StructureStubInfo* stubInfo, bool direct);
+
+ MacroAssemblerCodePtr ctiStringLengthTrampoline() { return m_trampolineStructure.ctiStringLengthTrampoline; }
+ MacroAssemblerCodePtr ctiVirtualCallLink() { return m_trampolineStructure.ctiVirtualCallLink; }
+ MacroAssemblerCodePtr ctiVirtualConstructLink() { return m_trampolineStructure.ctiVirtualConstructLink; }
+ MacroAssemblerCodePtr ctiVirtualCall() { return m_trampolineStructure.ctiVirtualCall; }
+ MacroAssemblerCodePtr ctiVirtualConstruct() { return m_trampolineStructure.ctiVirtualConstruct; }
+ MacroAssemblerCodePtr ctiNativeCall() { return m_trampolineStructure.ctiNativeCall; }
+ MacroAssemblerCodePtr ctiNativeConstruct() { return m_trampolineStructure.ctiNativeConstruct; }
+ MacroAssemblerCodePtr ctiSoftModulo() { return m_trampolineStructure.ctiSoftModulo; }
+
+ MacroAssemblerCodePtr ctiStub(JSGlobalData* globalData, ThunkGenerator generator);
+
+ NativeExecutable* hostFunctionStub(JSGlobalData*, NativeFunction);
+ NativeExecutable* hostFunctionStub(JSGlobalData*, NativeFunction, ThunkGenerator);
+
+ void clearHostFunctionStubs();
+
+ private:
+ typedef HashMap<ThunkGenerator, MacroAssemblerCodePtr> CTIStubMap;
+ CTIStubMap m_ctiStubMap;
+ typedef HashMap<NativeFunction, Weak<NativeExecutable> > HostFunctionStubMap;
+ OwnPtr<HostFunctionStubMap> m_hostFunctionStubMap;
+ RefPtr<ExecutablePool> m_executablePool;
+
+ TrampolineStructure m_trampolineStructure;
+ };
+
+extern "C" {
+ EncodedJSValue JIT_STUB cti_op_add(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_bitand(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_bitnot(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_bitor(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_bitxor(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_call_NotJSFunction(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_call_eval(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_construct_NotJSConstruct(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_create_this(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_convert_this(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_convert_this_strict(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_create_arguments(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_create_arguments_no_params(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_del_by_id(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_del_by_val(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_div(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_array_fail(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_custom_stub(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_generic(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_getter_stub(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_method_check(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_proto_fail(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_proto_list(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_proto_list_full(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_self_fail(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_string_fail(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_val(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_val_byte_array(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_val_string(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_in(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_instanceof(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_is_boolean(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_is_function(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_is_number(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_is_object(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_is_string(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_is_undefined(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_less(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_lesseq(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_lshift(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_mod(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_mul(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_negate(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_not(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_nstricteq(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_post_dec(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_post_inc(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_pre_dec(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_pre_inc(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve_base(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve_base_strict_put(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_ensure_property_exists(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve_global(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve_global_dynamic(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve_skip(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve_with_base(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_rshift(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_strcat(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_stricteq(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_sub(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_to_jsnumber(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_to_primitive(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_typeof(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_urshift(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_to_object(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_array(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_func(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_func_exp(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_object(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_regexp(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_push_activation(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_push_new_scope(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_push_scope(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_put_by_id_transition_realloc(STUB_ARGS_DECLARATION);
+ JSPropertyNameIterator* JIT_STUB cti_op_get_pnames(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_eq(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_eq_strings(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_jless(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_jlesseq(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_jtrue(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_load_varargs(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_loop_if_lesseq(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_timeout_check(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_has_property(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_check_has_instance(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_debug(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_end(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_jmp_scopes(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_pop_scope(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_profile_did_call(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_profile_will_call(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id_fail(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id_generic(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id_direct(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id_direct_fail(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id_direct_generic(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_index(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_val(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_val_byte_array(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_getter(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_setter(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_tear_off_activation(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_tear_off_arguments(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_throw_reference_error(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_call_arityCheck(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_construct_arityCheck(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_call_jitCompile(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_construct_jitCompile(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_switch_char(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_switch_imm(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_switch_string(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_throw(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_register_file_check(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_vm_lazyLinkCall(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_vm_lazyLinkConstruct(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_vm_throw(STUB_ARGS_DECLARATION);
+} // extern "C"
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // JITStubs_h
diff --git a/src/3rdparty/webkit/Source/JavaScriptCore/jit/JSInterfaceJIT.h b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JSInterfaceJIT.h
new file mode 100644
index 0000000..e1d9353
--- /dev/null
+++ b/src/3rdparty/webkit/Source/JavaScriptCore/jit/JSInterfaceJIT.h
@@ -0,0 +1,352 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JSInterfaceJIT_h
+#define JSInterfaceJIT_h
+
+#include "JITCode.h"
+#include "JITStubs.h"
+#include "JSValue.h"
+#include "MacroAssembler.h"
+#include "RegisterFile.h"
+#include <wtf/AlwaysInline.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+ class JSInterfaceJIT : public MacroAssembler {
+ public:
+ // NOTES:
+ //
+ // regT0 has two special meanings. The return value from a stub
+ // call will always be in regT0, and by default (unless
+ // a register is specified) emitPutVirtualRegister() will store
+ // the value from regT0.
+ //
+ // regT3 is required to be callee-preserved.
+ //
+ // tempRegister2 is has no such dependencies. It is important that
+ // on x86/x86-64 it is ecx for performance reasons, since the
+ // MacroAssembler will need to plant register swaps if it is not -
+ // however the code will still function correctly.
+#if CPU(X86_64)
+ static const RegisterID returnValueRegister = X86Registers::eax;
+ static const RegisterID cachedResultRegister = X86Registers::eax;
+ static const RegisterID firstArgumentRegister = X86Registers::edi;
+
+ static const RegisterID timeoutCheckRegister = X86Registers::r12;
+ static const RegisterID callFrameRegister = X86Registers::r13;
+ static const RegisterID tagTypeNumberRegister = X86Registers::r14;
+ static const RegisterID tagMaskRegister = X86Registers::r15;
+
+ static const RegisterID regT0 = X86Registers::eax;
+ static const RegisterID regT1 = X86Registers::edx;
+ static const RegisterID regT2 = X86Registers::ecx;
+ static const RegisterID regT3 = X86Registers::ebx;
+
+ static const FPRegisterID fpRegT0 = X86Registers::xmm0;
+ static const FPRegisterID fpRegT1 = X86Registers::xmm1;
+ static const FPRegisterID fpRegT2 = X86Registers::xmm2;
+ static const FPRegisterID fpRegT3 = X86Registers::xmm3;
+#elif CPU(X86)
+ static const RegisterID returnValueRegister = X86Registers::eax;
+ static const RegisterID cachedResultRegister = X86Registers::eax;
+ // On x86 we always use fastcall conventions = but on
+ // OS X if might make more sense to just use regparm.
+ static const RegisterID firstArgumentRegister = X86Registers::ecx;
+
+ static const RegisterID timeoutCheckRegister = X86Registers::esi;
+ static const RegisterID callFrameRegister = X86Registers::edi;
+
+ static const RegisterID regT0 = X86Registers::eax;
+ static const RegisterID regT1 = X86Registers::edx;
+ static const RegisterID regT2 = X86Registers::ecx;
+ static const RegisterID regT3 = X86Registers::ebx;
+
+ static const FPRegisterID fpRegT0 = X86Registers::xmm0;
+ static const FPRegisterID fpRegT1 = X86Registers::xmm1;
+ static const FPRegisterID fpRegT2 = X86Registers::xmm2;
+ static const FPRegisterID fpRegT3 = X86Registers::xmm3;
+#elif CPU(ARM_THUMB2)
+ static const RegisterID returnValueRegister = ARMRegisters::r0;
+ static const RegisterID cachedResultRegister = ARMRegisters::r0;
+ static const RegisterID firstArgumentRegister = ARMRegisters::r0;
+
+ static const RegisterID regT0 = ARMRegisters::r0;
+ static const RegisterID regT1 = ARMRegisters::r1;
+ static const RegisterID regT2 = ARMRegisters::r2;
+ static const RegisterID regT3 = ARMRegisters::r4;
+
+ static const RegisterID callFrameRegister = ARMRegisters::r5;
+ static const RegisterID timeoutCheckRegister = ARMRegisters::r6;
+
+ static const FPRegisterID fpRegT0 = ARMRegisters::d0;
+ static const FPRegisterID fpRegT1 = ARMRegisters::d1;
+ static const FPRegisterID fpRegT2 = ARMRegisters::d2;
+ static const FPRegisterID fpRegT3 = ARMRegisters::d3;
+#elif CPU(ARM_TRADITIONAL)
+ static const RegisterID returnValueRegister = ARMRegisters::r0;
+ static const RegisterID cachedResultRegister = ARMRegisters::r0;
+ static const RegisterID firstArgumentRegister = ARMRegisters::r0;
+
+ static const RegisterID timeoutCheckRegister = ARMRegisters::r5;
+ static const RegisterID callFrameRegister = ARMRegisters::r4;
+
+ static const RegisterID regT0 = ARMRegisters::r0;
+ static const RegisterID regT1 = ARMRegisters::r1;
+ static const RegisterID regT2 = ARMRegisters::r2;
+ // Callee preserved
+ static const RegisterID regT3 = ARMRegisters::r7;
+
+ static const RegisterID regS0 = ARMRegisters::S0;
+ // Callee preserved
+ static const RegisterID regS1 = ARMRegisters::S1;
+
+ static const RegisterID regStackPtr = ARMRegisters::sp;
+ static const RegisterID regLink = ARMRegisters::lr;
+
+ static const FPRegisterID fpRegT0 = ARMRegisters::d0;
+ static const FPRegisterID fpRegT1 = ARMRegisters::d1;
+ static const FPRegisterID fpRegT2 = ARMRegisters::d2;
+ static const FPRegisterID fpRegT3 = ARMRegisters::d3;
+#elif CPU(MIPS)
+ static const RegisterID returnValueRegister = MIPSRegisters::v0;
+ static const RegisterID cachedResultRegister = MIPSRegisters::v0;
+ static const RegisterID firstArgumentRegister = MIPSRegisters::a0;
+
+ // regT0 must be v0 for returning a 32-bit value.
+ static const RegisterID regT0 = MIPSRegisters::v0;
+
+ // regT1 must be v1 for returning a pair of 32-bit value.
+ static const RegisterID regT1 = MIPSRegisters::v1;
+
+ static const RegisterID regT2 = MIPSRegisters::t4;
+
+ // regT3 must be saved in the callee, so use an S register.
+ static const RegisterID regT3 = MIPSRegisters::s2;
+
+ static const RegisterID callFrameRegister = MIPSRegisters::s0;
+ static const RegisterID timeoutCheckRegister = MIPSRegisters::s1;
+
+ static const FPRegisterID fpRegT0 = MIPSRegisters::f4;
+ static const FPRegisterID fpRegT1 = MIPSRegisters::f6;
+ static const FPRegisterID fpRegT2 = MIPSRegisters::f8;
+ static const FPRegisterID fpRegT3 = MIPSRegisters::f10;
+#elif CPU(SH4)
+ static const RegisterID timeoutCheckRegister = SH4Registers::r8;
+ static const RegisterID callFrameRegister = SH4Registers::fp;
+
+ static const RegisterID regT0 = SH4Registers::r0;
+ static const RegisterID regT1 = SH4Registers::r1;
+ static const RegisterID regT2 = SH4Registers::r2;
+ static const RegisterID regT3 = SH4Registers::r10;
+ static const RegisterID regT4 = SH4Registers::r4;
+ static const RegisterID regT5 = SH4Registers::r5;
+ static const RegisterID regT6 = SH4Registers::r6;
+ static const RegisterID regT7 = SH4Registers::r7;
+ static const RegisterID firstArgumentRegister =regT4;
+
+ static const RegisterID returnValueRegister = SH4Registers::r0;
+ static const RegisterID cachedResultRegister = SH4Registers::r0;
+
+ static const FPRegisterID fpRegT0 = SH4Registers::fr0;
+ static const FPRegisterID fpRegT1 = SH4Registers::fr2;
+ static const FPRegisterID fpRegT2 = SH4Registers::fr4;
+ static const FPRegisterID fpRegT3 = SH4Registers::fr6;
+ static const FPRegisterID fpRegT4 = SH4Registers::fr8;
+ static const FPRegisterID fpRegT5 = SH4Registers::fr10;
+ static const FPRegisterID fpRegT6 = SH4Registers::fr12;
+ static const FPRegisterID fpRegT7 = SH4Registers::fr14;
+#else
+#error "JIT not supported on this platform."
+#endif
+
+#if USE(JSVALUE32_64)
+ // Can't just propogate JSValue::Int32Tag as visual studio doesn't like it
+ static const unsigned Int32Tag = 0xffffffff;
+ COMPILE_ASSERT(Int32Tag == JSValue::Int32Tag, Int32Tag_out_of_sync);
+#else
+ static const unsigned Int32Tag = TagTypeNumber >> 32;
+#endif
+ inline Jump emitLoadJSCell(unsigned virtualRegisterIndex, RegisterID payload);
+ inline Jump emitLoadInt32(unsigned virtualRegisterIndex, RegisterID dst);
+ inline Jump emitLoadDouble(unsigned virtualRegisterIndex, FPRegisterID dst, RegisterID scratch);
+
+ inline void storePtrWithWriteBarrier(TrustedImmPtr ptr, RegisterID /* owner */, Address dest)
+ {
+ storePtr(ptr, dest);
+ }
+
+#if USE(JSVALUE32_64)
+ inline Jump emitJumpIfNotJSCell(unsigned virtualRegisterIndex);
+ inline Address tagFor(int index, RegisterID base = callFrameRegister);
+#endif
+
+#if USE(JSVALUE64)
+ Jump emitJumpIfImmediateNumber(RegisterID reg);
+ Jump emitJumpIfNotImmediateNumber(RegisterID reg);
+ void emitFastArithImmToInt(RegisterID reg);
+#endif
+
+ inline Address payloadFor(int index, RegisterID base = callFrameRegister);
+ inline Address intPayloadFor(int index, RegisterID base = callFrameRegister);
+ inline Address intTagFor(int index, RegisterID base = callFrameRegister);
+ inline Address addressFor(int index, RegisterID base = callFrameRegister);
+ };
+
+ struct ThunkHelpers {
+ static unsigned stringImplDataOffset() { return StringImpl::dataOffset(); }
+ static unsigned jsStringLengthOffset() { return OBJECT_OFFSETOF(JSString, m_length); }
+ static unsigned jsStringValueOffset() { return OBJECT_OFFSETOF(JSString, m_value); }
+ };
+
+#if USE(JSVALUE32_64)
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadJSCell(unsigned virtualRegisterIndex, RegisterID payload)
+ {
+ loadPtr(payloadFor(virtualRegisterIndex), payload);
+ return emitJumpIfNotJSCell(virtualRegisterIndex);
+ }
+
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotJSCell(unsigned virtualRegisterIndex)
+ {
+ ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
+ return branch32(NotEqual, tagFor(virtualRegisterIndex), TrustedImm32(JSValue::CellTag));
+ }
+
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadInt32(unsigned virtualRegisterIndex, RegisterID dst)
+ {
+ ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
+ loadPtr(payloadFor(virtualRegisterIndex), dst);
+ return branch32(NotEqual, tagFor(static_cast<int>(virtualRegisterIndex)), TrustedImm32(JSValue::Int32Tag));
+ }
+
+ inline JSInterfaceJIT::Address JSInterfaceJIT::tagFor(int virtualRegisterIndex, RegisterID base)
+ {
+ ASSERT(virtualRegisterIndex < FirstConstantRegisterIndex);
+ return Address(base, (static_cast<unsigned>(virtualRegisterIndex) * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.tag));
+ }
+
+ inline JSInterfaceJIT::Address JSInterfaceJIT::payloadFor(int virtualRegisterIndex, RegisterID base)
+ {
+ ASSERT(virtualRegisterIndex < FirstConstantRegisterIndex);
+ return Address(base, (static_cast<unsigned>(virtualRegisterIndex) * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.payload));
+ }
+
+ inline JSInterfaceJIT::Address JSInterfaceJIT::intPayloadFor(int virtualRegisterIndex, RegisterID base)
+ {
+ return payloadFor(virtualRegisterIndex, base);
+ }
+
+ inline JSInterfaceJIT::Address JSInterfaceJIT::intTagFor(int virtualRegisterIndex, RegisterID base)
+ {
+ return tagFor(virtualRegisterIndex, base);
+ }
+
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadDouble(unsigned virtualRegisterIndex, FPRegisterID dst, RegisterID scratch)
+ {
+ ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
+ loadPtr(tagFor(virtualRegisterIndex), scratch);
+ Jump isDouble = branch32(Below, scratch, TrustedImm32(JSValue::LowestTag));
+ Jump notInt = branch32(NotEqual, scratch, TrustedImm32(JSValue::Int32Tag));
+ loadPtr(payloadFor(virtualRegisterIndex), scratch);
+ convertInt32ToDouble(scratch, dst);
+ Jump done = jump();
+ isDouble.link(this);
+ loadDouble(addressFor(virtualRegisterIndex), dst);
+ done.link(this);
+ return notInt;
+ }
+#endif
+
+#if USE(JSVALUE64)
+ ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfImmediateNumber(RegisterID reg)
+ {
+ return branchTestPtr(NonZero, reg, tagTypeNumberRegister);
+ }
+ ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotImmediateNumber(RegisterID reg)
+ {
+ return branchTestPtr(Zero, reg, tagTypeNumberRegister);
+ }
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadJSCell(unsigned virtualRegisterIndex, RegisterID dst)
+ {
+ loadPtr(addressFor(virtualRegisterIndex), dst);
+ return branchTestPtr(NonZero, dst, tagMaskRegister);
+ }
+
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadInt32(unsigned virtualRegisterIndex, RegisterID dst)
+ {
+ loadPtr(addressFor(virtualRegisterIndex), dst);
+ Jump result = branchPtr(Below, dst, tagTypeNumberRegister);
+ zeroExtend32ToPtr(dst, dst);
+ return result;
+ }
+
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadDouble(unsigned virtualRegisterIndex, FPRegisterID dst, RegisterID scratch)
+ {
+ loadPtr(addressFor(virtualRegisterIndex), scratch);
+ Jump notNumber = emitJumpIfNotImmediateNumber(scratch);
+ Jump notInt = branchPtr(Below, scratch, tagTypeNumberRegister);
+ convertInt32ToDouble(scratch, dst);
+ Jump done = jump();
+ notInt.link(this);
+ addPtr(tagTypeNumberRegister, scratch);
+ movePtrToDouble(scratch, dst);
+ done.link(this);
+ return notNumber;
+ }
+
+ ALWAYS_INLINE void JSInterfaceJIT::emitFastArithImmToInt(RegisterID)
+ {
+ }
+
+#endif
+
+#if USE(JSVALUE64)
+ inline JSInterfaceJIT::Address JSInterfaceJIT::payloadFor(int virtualRegisterIndex, RegisterID base)
+ {
+ ASSERT(virtualRegisterIndex < FirstConstantRegisterIndex);
+ return addressFor(virtualRegisterIndex, base);
+ }
+
+ inline JSInterfaceJIT::Address JSInterfaceJIT::intPayloadFor(int virtualRegisterIndex, RegisterID base)
+ {
+ ASSERT(virtualRegisterIndex < FirstConstantRegisterIndex);
+ return Address(base, (static_cast<unsigned>(virtualRegisterIndex) * sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ }
+ inline JSInterfaceJIT::Address JSInterfaceJIT::intTagFor(int virtualRegisterIndex, RegisterID base)
+ {
+ ASSERT(virtualRegisterIndex < FirstConstantRegisterIndex);
+ return Address(base, (static_cast<unsigned>(virtualRegisterIndex) * sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ }
+#endif
+
+ inline JSInterfaceJIT::Address JSInterfaceJIT::addressFor(int virtualRegisterIndex, RegisterID base)
+ {
+ ASSERT(virtualRegisterIndex < FirstConstantRegisterIndex);
+ return Address(base, (static_cast<unsigned>(virtualRegisterIndex) * sizeof(Register)));
+ }
+
+}
+
+#endif // JSInterfaceJIT_h
diff --git a/src/3rdparty/webkit/Source/JavaScriptCore/jit/SpecializedThunkJIT.h b/src/3rdparty/webkit/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
new file mode 100644
index 0000000..73d0848
--- /dev/null
+++ b/src/3rdparty/webkit/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SpecializedThunkJIT_h
+#define SpecializedThunkJIT_h
+
+#if ENABLE(JIT)
+
+#include "Executable.h"
+#include "JSInterfaceJIT.h"
+#include "LinkBuffer.h"
+
+namespace JSC {
+
+ class SpecializedThunkJIT : public JSInterfaceJIT {
+ public:
+ static const int ThisArgument = -1;
+ SpecializedThunkJIT(int expectedArgCount, JSGlobalData* globalData, ExecutablePool* pool)
+ : m_expectedArgCount(expectedArgCount)
+ , m_globalData(globalData)
+ , m_pool(pool)
+ {
+ // Check that we have the expected number of arguments
+ m_failures.append(branch32(NotEqual, Address(callFrameRegister, RegisterFile::ArgumentCount * (int)sizeof(Register)), TrustedImm32(expectedArgCount + 1)));
+ }
+
+ void loadDoubleArgument(int argument, FPRegisterID dst, RegisterID scratch)
+ {
+ unsigned src = argumentToVirtualRegister(argument);
+ m_failures.append(emitLoadDouble(src, dst, scratch));
+ }
+
+ void loadCellArgument(int argument, RegisterID dst)
+ {
+ unsigned src = argumentToVirtualRegister(argument);
+ m_failures.append(emitLoadJSCell(src, dst));
+ }
+
+ void loadJSStringArgument(int argument, RegisterID dst)
+ {
+ loadCellArgument(argument, dst);
+ m_failures.append(branchPtr(NotEqual, Address(dst, 0), TrustedImmPtr(m_globalData->jsStringVPtr)));
+ m_failures.append(branchTest32(NonZero, Address(dst, OBJECT_OFFSETOF(JSString, m_fiberCount))));
+ }
+
+ void loadInt32Argument(int argument, RegisterID dst, Jump& failTarget)
+ {
+ unsigned src = argumentToVirtualRegister(argument);
+ failTarget = emitLoadInt32(src, dst);
+ }
+
+ void loadInt32Argument(int argument, RegisterID dst)
+ {
+ Jump conversionFailed;
+ loadInt32Argument(argument, dst, conversionFailed);
+ m_failures.append(conversionFailed);
+ }
+
+ void appendFailure(const Jump& failure)
+ {
+ m_failures.append(failure);
+ }
+
+ void returnJSValue(RegisterID src)
+ {
+ if (src != regT0)
+ move(src, regT0);
+ loadPtr(payloadFor(RegisterFile::CallerFrame, callFrameRegister), callFrameRegister);
+ ret();
+ }
+
+ void returnDouble(FPRegisterID src)
+ {
+#if USE(JSVALUE64)
+ moveDoubleToPtr(src, regT0);
+ subPtr(tagTypeNumberRegister, regT0);
+#else
+ storeDouble(src, Address(stackPointerRegister, -(int)sizeof(double)));
+ loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(JSValue, u.asBits.tag) - sizeof(double)), regT1);
+ loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(JSValue, u.asBits.payload) - sizeof(double)), regT0);
+#endif
+ loadPtr(payloadFor(RegisterFile::CallerFrame, callFrameRegister), callFrameRegister);
+ ret();
+ }
+
+ void returnInt32(RegisterID src)
+ {
+ if (src != regT0)
+ move(src, regT0);
+ tagReturnAsInt32();
+ loadPtr(payloadFor(RegisterFile::CallerFrame, callFrameRegister), callFrameRegister);
+ ret();
+ }
+
+ void returnJSCell(RegisterID src)
+ {
+ if (src != regT0)
+ move(src, regT0);
+ tagReturnAsJSCell();
+ loadPtr(payloadFor(RegisterFile::CallerFrame, callFrameRegister), callFrameRegister);
+ ret();
+ }
+
+ MacroAssemblerCodePtr finalize(MacroAssemblerCodePtr fallback)
+ {
+ LinkBuffer patchBuffer(this, m_pool.get());
+ patchBuffer.link(m_failures, CodeLocationLabel(fallback));
+ return patchBuffer.finalizeCode().m_code;
+ }
+
+ private:
+ int argumentToVirtualRegister(unsigned argument)
+ {
+ return -static_cast<int>(RegisterFile::CallFrameHeaderSize + (m_expectedArgCount - argument));
+ }
+
+ void tagReturnAsInt32()
+ {
+#if USE(JSVALUE64)
+ orPtr(tagTypeNumberRegister, regT0);
+#else
+ move(TrustedImm32(JSValue::Int32Tag), regT1);
+#endif
+ }
+
+ void tagReturnAsJSCell()
+ {
+#if USE(JSVALUE32_64)
+ move(TrustedImm32(JSValue::CellTag), regT1);
+#endif
+ }
+
+ int m_expectedArgCount;
+ JSGlobalData* m_globalData;
+ RefPtr<ExecutablePool> m_pool;
+ MacroAssembler::JumpList m_failures;
+ };
+
+}
+
+#endif // ENABLE(JIT)
+
+#endif // SpecializedThunkJIT_h
diff --git a/src/3rdparty/webkit/Source/JavaScriptCore/jit/ThunkGenerators.cpp b/src/3rdparty/webkit/Source/JavaScriptCore/jit/ThunkGenerators.cpp
new file mode 100644
index 0000000..1201696
--- /dev/null
+++ b/src/3rdparty/webkit/Source/JavaScriptCore/jit/ThunkGenerators.cpp
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ThunkGenerators.h"
+
+#include "CodeBlock.h"
+#include <wtf/text/StringImpl.h>
+#include "SpecializedThunkJIT.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+static void stringCharLoad(SpecializedThunkJIT& jit)
+{
+ // load string
+ jit.loadJSStringArgument(SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0);
+ // regT0 now contains this, and is a non-rope JSString*
+
+ // Load string length to regT2, and start the process of loading the data pointer into regT0
+ jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2);
+ jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringValueOffset()), SpecializedThunkJIT::regT0);
+ jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::stringImplDataOffset()), SpecializedThunkJIT::regT0);
+
+ // load index
+ jit.loadInt32Argument(0, SpecializedThunkJIT::regT1); // regT1 contains the index
+
+ // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
+ jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2));
+
+ // Load the character
+ jit.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesTwo, 0), SpecializedThunkJIT::regT0);
+}
+
+static void charToString(SpecializedThunkJIT& jit, JSGlobalData* globalData, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch)
+{
+ jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, src, MacroAssembler::TrustedImm32(0x100)));
+ jit.move(MacroAssembler::TrustedImmPtr(globalData->smallStrings.singleCharacterStrings()), scratch);
+ jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst);
+ jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
+}
+
+MacroAssemblerCodePtr charCodeAtThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+{
+ SpecializedThunkJIT jit(1, globalData, pool);
+ stringCharLoad(jit);
+ jit.returnInt32(SpecializedThunkJIT::regT0);
+ return jit.finalize(globalData->jitStubs->ctiNativeCall());
+}
+
+MacroAssemblerCodePtr charAtThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+{
+ SpecializedThunkJIT jit(1, globalData, pool);
+ stringCharLoad(jit);
+ charToString(jit, globalData, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
+ jit.returnJSCell(SpecializedThunkJIT::regT0);
+ return jit.finalize(globalData->jitStubs->ctiNativeCall());
+}
+
+MacroAssemblerCodePtr fromCharCodeThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+{
+ SpecializedThunkJIT jit(1, globalData, pool);
+ // load char code
+ jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
+ charToString(jit, globalData, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
+ jit.returnJSCell(SpecializedThunkJIT::regT0);
+ return jit.finalize(globalData->jitStubs->ctiNativeCall());
+}
+
+MacroAssemblerCodePtr sqrtThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+{
+ SpecializedThunkJIT jit(1, globalData, pool);
+ if (!jit.supportsFloatingPointSqrt())
+ return globalData->jitStubs->ctiNativeCall();
+
+ jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
+ jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
+ jit.returnDouble(SpecializedThunkJIT::fpRegT0);
+ return jit.finalize(globalData->jitStubs->ctiNativeCall());
+}
+
+static const double oneConstant = 1.0;
+static const double negativeHalfConstant = -0.5;
+
+MacroAssemblerCodePtr powThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+{
+ SpecializedThunkJIT jit(2, globalData, pool);
+ if (!jit.supportsFloatingPoint())
+ return globalData->jitStubs->ctiNativeCall();
+
+ jit.loadDouble(&oneConstant, SpecializedThunkJIT::fpRegT1);
+ jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
+ MacroAssembler::Jump nonIntExponent;
+ jit.loadInt32Argument(1, SpecializedThunkJIT::regT0, nonIntExponent);
+ jit.appendFailure(jit.branch32(MacroAssembler::LessThan, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(0)));
+
+ MacroAssembler::Jump exponentIsZero = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0);
+ MacroAssembler::Label startLoop(jit.label());
+
+ MacroAssembler::Jump exponentIsEven = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1));
+ jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
+ exponentIsEven.link(&jit);
+ jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
+ jit.rshift32(MacroAssembler::TrustedImm32(1), SpecializedThunkJIT::regT0);
+ jit.branchTest32(MacroAssembler::NonZero, SpecializedThunkJIT::regT0).linkTo(startLoop, &jit);
+
+ exponentIsZero.link(&jit);
+
+ {
+ SpecializedThunkJIT::JumpList doubleResult;
+ jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
+ jit.returnInt32(SpecializedThunkJIT::regT0);
+ doubleResult.link(&jit);
+ jit.returnDouble(SpecializedThunkJIT::fpRegT1);
+ }
+
+ if (jit.supportsFloatingPointSqrt()) {
+ nonIntExponent.link(&jit);
+ jit.loadDouble(&negativeHalfConstant, SpecializedThunkJIT::fpRegT3);
+ jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::regT0);
+ jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleLessThanOrEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
+ jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT3));
+ jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
+ jit.divDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
+
+ SpecializedThunkJIT::JumpList doubleResult;
+ jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
+ jit.returnInt32(SpecializedThunkJIT::regT0);
+ doubleResult.link(&jit);
+ jit.returnDouble(SpecializedThunkJIT::fpRegT1);
+ } else
+ jit.appendFailure(nonIntExponent);
+
+ return jit.finalize(globalData->jitStubs->ctiNativeCall());
+}
+
+}
+
+#endif // ENABLE(JIT)
diff --git a/src/3rdparty/webkit/Source/JavaScriptCore/jit/ThunkGenerators.h b/src/3rdparty/webkit/Source/JavaScriptCore/jit/ThunkGenerators.h
new file mode 100644
index 0000000..15261f7
--- /dev/null
+++ b/src/3rdparty/webkit/Source/JavaScriptCore/jit/ThunkGenerators.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ThunkGenerators_h
+#define ThunkGenerators_h
+
+#if ENABLE(JIT)
+namespace JSC {
+ class ExecutablePool;
+ class JSGlobalData;
+ class NativeExecutable;
+ class MacroAssemblerCodePtr;
+
+ typedef MacroAssemblerCodePtr (*ThunkGenerator)(JSGlobalData*, ExecutablePool*);
+ MacroAssemblerCodePtr charCodeAtThunkGenerator(JSGlobalData*, ExecutablePool*);
+ MacroAssemblerCodePtr charAtThunkGenerator(JSGlobalData*, ExecutablePool*);
+ MacroAssemblerCodePtr fromCharCodeThunkGenerator(JSGlobalData*, ExecutablePool*);
+ MacroAssemblerCodePtr sqrtThunkGenerator(JSGlobalData*, ExecutablePool*);
+ MacroAssemblerCodePtr powThunkGenerator(JSGlobalData*, ExecutablePool*);
+}
+#endif
+
+#endif // ThunkGenerator_h