summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/webkit/JavaScriptCore/jit
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/webkit/JavaScriptCore/jit')
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/jit/ExecutableAllocator.cpp38
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/jit/ExecutableAllocator.h179
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp56
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/jit/ExecutableAllocatorWin.cpp56
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/jit/JIT.cpp1907
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/jit/JIT.h530
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/jit/JITArithmetic.cpp769
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/jit/JITCall.cpp353
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/jit/JITInlineMethods.h406
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/jit/JITPropertyAccess.cpp704
10 files changed, 4998 insertions, 0 deletions
diff --git a/src/3rdparty/webkit/JavaScriptCore/jit/ExecutableAllocator.cpp b/src/3rdparty/webkit/JavaScriptCore/jit/ExecutableAllocator.cpp
new file mode 100644
index 0000000..f6b27ec
--- /dev/null
+++ b/src/3rdparty/webkit/JavaScriptCore/jit/ExecutableAllocator.cpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#include "ExecutableAllocator.h"
+
+#if ENABLE(ASSEMBLER)
+
+namespace JSC {
+
+size_t ExecutableAllocator::pageSize = 0;
+
+}
+
+#endif // HAVE(ASSEMBLER)
diff --git a/src/3rdparty/webkit/JavaScriptCore/jit/ExecutableAllocator.h b/src/3rdparty/webkit/JavaScriptCore/jit/ExecutableAllocator.h
new file mode 100644
index 0000000..1541256
--- /dev/null
+++ b/src/3rdparty/webkit/JavaScriptCore/jit/ExecutableAllocator.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ExecutableAllocator_h
+#define ExecutableAllocator_h
+
+#if ENABLE(ASSEMBLER)
+
+#include <wtf/Assertions.h>
+#include <wtf/PassRefPtr.h>
+#include <wtf/RefCounted.h>
+#include <wtf/Vector.h>
+
+#include <limits>
+
+#define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize)
+#define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4)
+
+namespace JSC {
+
+class ExecutablePool : public RefCounted<ExecutablePool> {
+private:
+ struct Allocation {
+ char* pages;
+ size_t size;
+ };
+ typedef Vector<Allocation, 2> AllocationList;
+
+public:
+ static PassRefPtr<ExecutablePool> create(size_t n)
+ {
+ return adoptRef(new ExecutablePool(n));
+ }
+
+ void* alloc(size_t n)
+ {
+ ASSERT(m_freePtr <= m_end);
+
+ // Round 'n' up to a multiple of word size; if all allocations are of
+ // word sized quantities, then all subsequent allocations will be aligned.
+ n = roundUpAllocationSize(n, sizeof(void*));
+
+ if (static_cast<ptrdiff_t>(n) < (m_end - m_freePtr)) {
+ void* result = m_freePtr;
+ m_freePtr += n;
+ return result;
+ }
+
+ // Insufficient space to allocate in the existing pool
+ // so we need allocate into a new pool
+ return poolAllocate(n);
+ }
+
+ ~ExecutablePool()
+ {
+ AllocationList::const_iterator end = m_pools.end();
+ for (AllocationList::const_iterator ptr = m_pools.begin(); ptr != end; ++ptr)
+ ExecutablePool::systemRelease(*ptr);
+ }
+
+ size_t available() const { return (m_pools.size() > 1) ? 0 : m_end - m_freePtr; }
+
+private:
+ static Allocation systemAlloc(size_t n);
+ static void systemRelease(const Allocation& alloc);
+
+ inline size_t roundUpAllocationSize(size_t request, size_t granularity)
+ {
+ if ((std::numeric_limits<size_t>::max() - granularity) <= request)
+ CRASH(); // Allocation is too large
+
+ // Round up to next page boundary
+ size_t size = request + (granularity - 1);
+ size = size & ~(granularity - 1);
+ ASSERT(size >= request);
+ return size;
+ }
+
+ ExecutablePool(size_t n);
+
+ void* poolAllocate(size_t n);
+
+ char* m_freePtr;
+ char* m_end;
+ AllocationList m_pools;
+};
+
+class ExecutableAllocator {
+public:
+ static size_t pageSize;
+ ExecutableAllocator()
+ {
+ if (!pageSize)
+ intializePageSize();
+ m_smallAllocationPool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
+ }
+
+ PassRefPtr<ExecutablePool> poolForSize(size_t n)
+ {
+ // Try to fit in the existing small allocator
+ if (n < m_smallAllocationPool->available())
+ return m_smallAllocationPool;
+
+ // If the request is large, we just provide a unshared allocator
+ if (n > JIT_ALLOCATOR_LARGE_ALLOC_SIZE)
+ return ExecutablePool::create(n);
+
+ // Create a new allocator
+ RefPtr<ExecutablePool> pool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
+
+ // If the new allocator will result in more free space than in
+ // the current small allocator, then we will use it instead
+ if ((pool->available() - n) > m_smallAllocationPool->available())
+ m_smallAllocationPool = pool;
+ return pool.release();
+ }
+
+private:
+ RefPtr<ExecutablePool> m_smallAllocationPool;
+ static void intializePageSize();
+};
+
+inline ExecutablePool::ExecutablePool(size_t n)
+{
+ size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
+ Allocation mem = systemAlloc(allocSize);
+ m_pools.append(mem);
+ m_freePtr = mem.pages;
+ if (!m_freePtr)
+ CRASH(); // Failed to allocate
+ m_end = m_freePtr + allocSize;
+}
+
+inline void* ExecutablePool::poolAllocate(size_t n)
+{
+ size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
+
+ Allocation result = systemAlloc(allocSize);
+ if (!result.pages)
+ CRASH(); // Failed to allocate
+
+ ASSERT(m_end >= m_freePtr);
+ if ((allocSize - n) > static_cast<size_t>(m_end - m_freePtr)) {
+ // Replace allocation pool
+ m_freePtr = result.pages + n;
+ m_end = result.pages + allocSize;
+ }
+
+ m_pools.append(result);
+ return result.pages;
+}
+
+}
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // !defined(ExecutableAllocator)
diff --git a/src/3rdparty/webkit/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp b/src/3rdparty/webkit/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp
new file mode 100644
index 0000000..21955d7
--- /dev/null
+++ b/src/3rdparty/webkit/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#include "ExecutableAllocator.h"
+
+#if ENABLE(ASSEMBLER)
+
+#include <sys/mman.h>
+#include <unistd.h>
+
+namespace JSC {
+
+void ExecutableAllocator::intializePageSize()
+{
+ ExecutableAllocator::pageSize = getpagesize();
+}
+
+ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t n)
+{
+ ExecutablePool::Allocation alloc = {reinterpret_cast<char*>(mmap(NULL, n, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANON, -1, 0)), n};
+ return alloc;
+}
+
+void ExecutablePool::systemRelease(const ExecutablePool::Allocation& alloc)
+{
+ int result = munmap(alloc.pages, alloc.size);
+ ASSERT_UNUSED(result, !result);
+}
+
+}
+
+#endif // HAVE(ASSEMBLER)
diff --git a/src/3rdparty/webkit/JavaScriptCore/jit/ExecutableAllocatorWin.cpp b/src/3rdparty/webkit/JavaScriptCore/jit/ExecutableAllocatorWin.cpp
new file mode 100644
index 0000000..7467f81
--- /dev/null
+++ b/src/3rdparty/webkit/JavaScriptCore/jit/ExecutableAllocatorWin.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#include "ExecutableAllocator.h"
+
+#if ENABLE(ASSEMBLER)
+
+#include "windows.h"
+
+namespace JSC {
+
+void ExecutableAllocator::intializePageSize()
+{
+ SYSTEM_INFO system_info;
+ GetSystemInfo(&system_info);
+ ExecutableAllocator::pageSize = system_info.dwPageSize;
+}
+
+ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t n)
+{
+ ExecutablePool::Allocation alloc = {reinterpret_cast<char*>(VirtualAlloc(0, n, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE)), n};
+ return alloc;
+}
+
+void ExecutablePool::systemRelease(const ExecutablePool::Allocation& alloc)
+{
+ VirtualFree(alloc.pages, 0, MEM_RELEASE);
+}
+
+}
+
+#endif // HAVE(ASSEMBLER)
diff --git a/src/3rdparty/webkit/JavaScriptCore/jit/JIT.cpp b/src/3rdparty/webkit/JavaScriptCore/jit/JIT.cpp
new file mode 100644
index 0000000..0ce9bc1
--- /dev/null
+++ b/src/3rdparty/webkit/JavaScriptCore/jit/JIT.cpp
@@ -0,0 +1,1907 @@
+/*
+ * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JIT.h"
+
+#if ENABLE(JIT)
+
+#include "CodeBlock.h"
+#include "JITInlineMethods.h"
+#include "JSArray.h"
+#include "JSFunction.h"
+#include "Interpreter.h"
+#include "ResultType.h"
+#include "SamplingTool.h"
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+using namespace std;
+
+namespace JSC {
+
+COMPILE_ASSERT(STUB_ARGS_code == 0xC, STUB_ARGS_code_is_C);
+COMPILE_ASSERT(STUB_ARGS_callFrame == 0xE, STUB_ARGS_callFrame_is_E);
+
+#if COMPILER(GCC) && PLATFORM(X86)
+
+#if PLATFORM(DARWIN)
+#define SYMBOL_STRING(name) "_" #name
+#else
+#define SYMBOL_STRING(name) #name
+#endif
+
+asm(
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "pushl %ebp" "\n"
+ "movl %esp, %ebp" "\n"
+ "pushl %esi" "\n"
+ "pushl %edi" "\n"
+ "pushl %ebx" "\n"
+ "subl $0x1c, %esp" "\n"
+ "movl $512, %esi" "\n"
+ "movl 0x38(%esp), %edi" "\n" // Ox38 = 0x0E * 4, 0x0E = STUB_ARGS_callFrame (see assertion above)
+ "call *0x30(%esp)" "\n" // Ox30 = 0x0C * 4, 0x0C = STUB_ARGS_code (see assertion above)
+ "addl $0x1c, %esp" "\n"
+ "popl %ebx" "\n"
+ "popl %edi" "\n"
+ "popl %esi" "\n"
+ "popl %ebp" "\n"
+ "ret" "\n"
+);
+
+asm(
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+#if USE(JIT_STUB_ARGUMENT_VA_LIST)
+ "call " SYMBOL_STRING(_ZN3JSC11Interpreter12cti_vm_throwEPvz) "\n"
+#else
+#if USE(JIT_STUB_ARGUMENT_REGISTER)
+ "movl %esp, %ecx" "\n"
+#else // JIT_STUB_ARGUMENT_STACK
+ "movl %esp, 0(%esp)" "\n"
+#endif
+ "call " SYMBOL_STRING(_ZN3JSC11Interpreter12cti_vm_throwEPPv) "\n"
+#endif
+ "addl $0x1c, %esp" "\n"
+ "popl %ebx" "\n"
+ "popl %edi" "\n"
+ "popl %esi" "\n"
+ "popl %ebp" "\n"
+ "ret" "\n"
+);
+
+#elif COMPILER(GCC) && PLATFORM(X86_64)
+
+#if PLATFORM(DARWIN)
+#define SYMBOL_STRING(name) "_" #name
+#else
+#define SYMBOL_STRING(name) #name
+#endif
+
+asm(
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "pushq %rbp" "\n"
+ "movq %rsp, %rbp" "\n"
+ "pushq %r12" "\n"
+ "pushq %r13" "\n"
+ "pushq %rbx" "\n"
+ "subq $0x38, %rsp" "\n"
+ "movq $512, %r12" "\n"
+ "movq 0x70(%rsp), %r13" "\n" // Ox70 = 0x0E * 8, 0x0E = STUB_ARGS_callFrame (see assertion above)
+ "call *0x60(%rsp)" "\n" // Ox60 = 0x0C * 8, 0x0C = STUB_ARGS_code (see assertion above)
+ "addq $0x38, %rsp" "\n"
+ "popq %rbx" "\n"
+ "popq %r13" "\n"
+ "popq %r12" "\n"
+ "popq %rbp" "\n"
+ "ret" "\n"
+);
+
+asm(
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+#if USE(JIT_STUB_ARGUMENT_REGISTER)
+ "movq %rsp, %rdi" "\n"
+ "call " SYMBOL_STRING(_ZN3JSC11Interpreter12cti_vm_throwEPPv) "\n"
+#else // JIT_STUB_ARGUMENT_VA_LIST or JIT_STUB_ARGUMENT_STACK
+#error "JIT_STUB_ARGUMENT configuration not supported."
+#endif
+ "addq $0x38, %rsp" "\n"
+ "popq %rbx" "\n"
+ "popq %r13" "\n"
+ "popq %r12" "\n"
+ "popq %rbp" "\n"
+ "ret" "\n"
+);
+
+#elif COMPILER(MSVC)
+
+extern "C" {
+
+ __declspec(naked) JSValueEncodedAsPointer* ctiTrampoline(void* code, RegisterFile*, CallFrame*, JSValuePtr* exception, Profiler**, JSGlobalData*)
+ {
+ __asm {
+ push ebp;
+ mov ebp, esp;
+ push esi;
+ push edi;
+ push ebx;
+ sub esp, 0x1c;
+ mov esi, 512;
+ mov ecx, esp;
+ mov edi, [esp + 0x38];
+ call [esp + 0x30]; // Ox30 = 0x0C * 4, 0x0C = STUB_ARGS_code (see assertion above)
+ add esp, 0x1c;
+ pop ebx;
+ pop edi;
+ pop esi;
+ pop ebp;
+ ret;
+ }
+ }
+
+ __declspec(naked) void ctiVMThrowTrampoline()
+ {
+ __asm {
+#if USE(JIT_STUB_ARGUMENT_REGISTER)
+ mov ecx, esp;
+#else // JIT_STUB_ARGUMENT_VA_LIST or JIT_STUB_ARGUMENT_STACK
+#error "JIT_STUB_ARGUMENT configuration not supported."
+#endif
+ call JSC::Interpreter::cti_vm_throw;
+ add esp, 0x1c;
+ pop ebx;
+ pop edi;
+ pop esi;
+ pop ebp;
+ ret;
+ }
+ }
+
+}
+
+#endif
+
+void ctiSetReturnAddress(void** where, void* what)
+{
+ *where = what;
+}
+
+void ctiPatchCallByReturnAddress(void* where, void* what)
+{
+ MacroAssembler::Jump::patch(where, what);
+}
+
+JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
+ : m_interpreter(globalData->interpreter)
+ , m_globalData(globalData)
+ , m_codeBlock(codeBlock)
+ , m_labels(codeBlock ? codeBlock->instructions().size() : 0)
+ , m_propertyAccessCompilationInfo(codeBlock ? codeBlock->numberOfStructureStubInfos() : 0)
+ , m_callStructureStubCompilationInfo(codeBlock ? codeBlock->numberOfCallLinkInfos() : 0)
+ , m_lastResultBytecodeRegister(std::numeric_limits<int>::max())
+ , m_jumpTargetsPosition(0)
+{
+}
+
+void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
+{
+ bool negated = (type == OpNStrictEq);
+
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegisters(src1, X86::eax, src2, X86::edx);
+
+ // Check that bot are immediates, if so check if they're equal
+ Jump firstNotImmediate = emitJumpIfJSCell(X86::eax);
+ Jump secondNotImmediate = emitJumpIfJSCell(X86::edx);
+ Jump bothWereImmediatesButNotEqual = jne32(X86::edx, X86::eax);
+
+ // They are equal - set the result to true. (Or false, if negated).
+ move(ImmPtr(JSValuePtr::encode(jsBoolean(!negated))), X86::eax);
+ Jump bothWereImmediatesAndEqual = jump();
+
+ // eax was not an immediate, we haven't yet checked edx.
+ // If edx is also a JSCell, or is 0, then jump to a slow case,
+ // otherwise these values are not equal.
+ firstNotImmediate.link(this);
+ emitJumpSlowCaseIfJSCell(X86::edx);
+ addSlowCase(jePtr(X86::edx, ImmPtr(JSValuePtr::encode(JSImmediate::zeroImmediate()))));
+ Jump firstWasNotImmediate = jump();
+
+ // eax was an immediate, but edx wasn't.
+ // If eax is 0 jump to a slow case, otherwise these values are not equal.
+ secondNotImmediate.link(this);
+ addSlowCase(jePtr(X86::eax, ImmPtr(JSValuePtr::encode(JSImmediate::zeroImmediate()))));
+
+ // We get here if the two values are different immediates, or one is 0 and the other is a JSCell.
+ // Vaelues are not equal, set the result to false.
+ bothWereImmediatesButNotEqual.link(this);
+ firstWasNotImmediate.link(this);
+ move(ImmPtr(JSValuePtr::encode(jsBoolean(negated))), X86::eax);
+
+ bothWereImmediatesAndEqual.link(this);
+ emitPutVirtualRegister(dst);
+}
+
+void JIT::emitSlowScriptCheck()
+{
+ Jump skipTimeout = jnzSub32(Imm32(1), timeoutCheckRegister);
+ emitCTICall(Interpreter::cti_timeout_check);
+ move(X86::eax, timeoutCheckRegister);
+ skipTimeout.link(this);
+
+ killLastResultRegister();
+}
+
+
+#define NEXT_OPCODE(name) \
+ m_bytecodeIndex += OPCODE_LENGTH(name); \
+ break;
+
+#define CTI_COMPILE_BINARY_OP(name) \
+ case name: { \
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, X86::ecx); \
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 2, X86::ecx); \
+ emitCTICall(Interpreter::cti_##name); \
+ emitPutVirtualRegister(currentInstruction[1].u.operand); \
+ NEXT_OPCODE(name); \
+ }
+
+#define CTI_COMPILE_UNARY_OP(name) \
+ case name: { \
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, X86::ecx); \
+ emitCTICall(Interpreter::cti_##name); \
+ emitPutVirtualRegister(currentInstruction[1].u.operand); \
+ NEXT_OPCODE(name); \
+ }
+
+void JIT::privateCompileMainPass()
+{
+ Instruction* instructionsBegin = m_codeBlock->instructions().begin();
+ unsigned instructionCount = m_codeBlock->instructions().size();
+ unsigned propertyAccessInstructionIndex = 0;
+ unsigned globalResolveInfoIndex = 0;
+ unsigned callLinkInfoIndex = 0;
+
+ for (m_bytecodeIndex = 0; m_bytecodeIndex < instructionCount; ) {
+ Instruction* currentInstruction = instructionsBegin + m_bytecodeIndex;
+ ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeIndex);
+
+#if ENABLE(OPCODE_SAMPLING)
+ if (m_bytecodeIndex > 0) // Avoid the overhead of sampling op_enter twice.
+ store32(m_interpreter->sampler()->encodeSample(currentInstruction), m_interpreter->sampler()->sampleSlot());
+#endif
+
+ m_labels[m_bytecodeIndex] = label();
+ OpcodeID opcodeID = m_interpreter->getOpcodeID(currentInstruction->u.opcode);
+
+ switch (opcodeID) {
+ case op_mov: {
+ emitGetVirtualRegister(currentInstruction[2].u.operand, X86::eax);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_mov);
+ }
+ case op_add: {
+ compileFastArith_op_add(currentInstruction);
+ NEXT_OPCODE(op_add);
+ }
+ case op_end: {
+ if (m_codeBlock->needsFullScopeChain())
+ emitCTICall(Interpreter::cti_op_end);
+ emitGetVirtualRegister(currentInstruction[1].u.operand, X86::eax);
+ push(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
+ ret();
+ NEXT_OPCODE(op_end);
+ }
+ case op_jmp: {
+ unsigned target = currentInstruction[1].u.operand;
+ addJump(jump(), target + 1);
+ NEXT_OPCODE(op_jmp);
+ }
+ case op_pre_inc: {
+ compileFastArith_op_pre_inc(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_pre_inc);
+ }
+ case op_loop: {
+ emitSlowScriptCheck();
+
+ unsigned target = currentInstruction[1].u.operand;
+ addJump(jump(), target + 1);
+ NEXT_OPCODE(op_end);
+ }
+ case op_loop_if_less: {
+ emitSlowScriptCheck();
+
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+ if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, X86::eax);
+ emitJumpSlowCaseIfNotImmNum(X86::eax);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ int32_t op2imm = JSImmediate::intValue(getConstantOperand(op2));
+#else
+ int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
+#endif
+ addJump(jl32(X86::eax, Imm32(op2imm)), target + 3);
+ } else {
+ emitGetVirtualRegisters(op1, X86::eax, op2, X86::edx);
+ emitJumpSlowCaseIfNotImmNum(X86::eax);
+ emitJumpSlowCaseIfNotImmNum(X86::edx);
+ addJump(jl32(X86::eax, X86::edx), target + 3);
+ }
+ NEXT_OPCODE(op_loop_if_less);
+ }
+ case op_loop_if_lesseq: {
+ emitSlowScriptCheck();
+
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+ if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, X86::eax);
+ emitJumpSlowCaseIfNotImmNum(X86::eax);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ int32_t op2imm = JSImmediate::intValue(getConstantOperand(op2));
+#else
+ int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
+#endif
+ addJump(jle32(X86::eax, Imm32(op2imm)), target + 3);
+ } else {
+ emitGetVirtualRegisters(op1, X86::eax, op2, X86::edx);
+ emitJumpSlowCaseIfNotImmNum(X86::eax);
+ emitJumpSlowCaseIfNotImmNum(X86::edx);
+ addJump(jle32(X86::eax, X86::edx), target + 3);
+ }
+ NEXT_OPCODE(op_loop_if_less);
+ }
+ case op_new_object: {
+ emitCTICall(Interpreter::cti_op_new_object);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_new_object);
+ }
+ case op_put_by_id: {
+ compilePutByIdHotPath(currentInstruction[1].u.operand, &(m_codeBlock->identifier(currentInstruction[2].u.operand)), currentInstruction[3].u.operand, propertyAccessInstructionIndex++);
+ NEXT_OPCODE(op_put_by_id);
+ }
+ case op_get_by_id: {
+ compileGetByIdHotPath(currentInstruction[1].u.operand, currentInstruction[2].u.operand, &(m_codeBlock->identifier(currentInstruction[3].u.operand)), propertyAccessInstructionIndex++);
+ NEXT_OPCODE(op_get_by_id);
+ }
+ case op_instanceof: {
+ emitGetVirtualRegister(currentInstruction[2].u.operand, X86::eax); // value
+ emitGetVirtualRegister(currentInstruction[3].u.operand, X86::ecx); // baseVal
+ emitGetVirtualRegister(currentInstruction[4].u.operand, X86::edx); // proto
+
+ // check if any are immediates
+ move(X86::eax, X86::ebx);
+ orPtr(X86::ecx, X86::ebx);
+ orPtr(X86::edx, X86::ebx);
+ emitJumpSlowCaseIfNotJSCell(X86::ebx);
+
+ // check that all are object type - this is a bit of a bithack to avoid excess branching;
+ // we check that the sum of the three type codes from Structures is exactly 3 * ObjectType,
+ // this works because NumberType and StringType are smaller
+ move(Imm32(3 * ObjectType), X86::ebx);
+ loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::eax);
+ loadPtr(Address(X86::ecx, FIELD_OFFSET(JSCell, m_structure)), X86::ecx);
+ loadPtr(Address(X86::edx, FIELD_OFFSET(JSCell, m_structure)), X86::edx);
+ sub32(Address(X86::eax, FIELD_OFFSET(Structure, m_typeInfo.m_type)), X86::ebx);
+ sub32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo.m_type)), X86::ebx);
+ addSlowCase(jne32(Address(X86::edx, FIELD_OFFSET(Structure, m_typeInfo.m_type)), X86::ebx));
+
+ // check that baseVal's flags include ImplementsHasInstance but not OverridesHasInstance
+ load32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), X86::ecx);
+ and32(Imm32(ImplementsHasInstance | OverridesHasInstance), X86::ecx);
+ addSlowCase(jne32(X86::ecx, Imm32(ImplementsHasInstance)));
+
+ emitGetVirtualRegister(currentInstruction[2].u.operand, X86::ecx); // reload value
+ emitGetVirtualRegister(currentInstruction[4].u.operand, X86::edx); // reload proto
+
+ // optimistically load true result
+ move(ImmPtr(JSValuePtr::encode(jsBoolean(true))), X86::eax);
+
+ Label loop(this);
+
+ // load value's prototype
+ loadPtr(Address(X86::ecx, FIELD_OFFSET(JSCell, m_structure)), X86::ecx);
+ loadPtr(Address(X86::ecx, FIELD_OFFSET(Structure, m_prototype)), X86::ecx);
+
+ Jump exit = jePtr(X86::ecx, X86::edx);
+
+ jnePtr(X86::ecx, ImmPtr(JSValuePtr::encode(jsNull())), loop);
+
+ move(ImmPtr(JSValuePtr::encode(jsBoolean(false))), X86::eax);
+
+ exit.link(this);
+
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+
+ NEXT_OPCODE(op_instanceof);
+ }
+ case op_del_by_id: {
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, X86::ecx);
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+ emitPutJITStubArgConstant(ident, 2);
+ emitCTICall(Interpreter::cti_op_del_by_id);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_del_by_id);
+ }
+ case op_mul: {
+ compileFastArith_op_mul(currentInstruction);
+ NEXT_OPCODE(op_mul);
+ }
+ case op_new_func: {
+ FuncDeclNode* func = m_codeBlock->function(currentInstruction[2].u.operand);
+ emitPutJITStubArgConstant(func, 1);
+ emitCTICall(Interpreter::cti_op_new_func);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_new_func);
+ }
+ case op_call: {
+ compileOpCall(opcodeID, currentInstruction, callLinkInfoIndex++);
+ NEXT_OPCODE(op_call);
+ }
+ case op_call_eval: {
+ compileOpCall(opcodeID, currentInstruction, callLinkInfoIndex++);
+ NEXT_OPCODE(op_call_eval);
+ }
+ case op_construct: {
+ compileOpCall(opcodeID, currentInstruction, callLinkInfoIndex++);
+ NEXT_OPCODE(op_construct);
+ }
+ case op_get_global_var: {
+ JSVariableObject* globalObject = static_cast<JSVariableObject*>(currentInstruction[2].u.jsCell);
+ move(ImmPtr(globalObject), X86::eax);
+ emitGetVariableObjectRegister(X86::eax, currentInstruction[3].u.operand, X86::eax);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_get_global_var);
+ }
+ case op_put_global_var: {
+ emitGetVirtualRegister(currentInstruction[3].u.operand, X86::edx);
+ JSVariableObject* globalObject = static_cast<JSVariableObject*>(currentInstruction[1].u.jsCell);
+ move(ImmPtr(globalObject), X86::eax);
+ emitPutVariableObjectRegister(X86::edx, X86::eax, currentInstruction[2].u.operand);
+ NEXT_OPCODE(op_put_global_var);
+ }
+ case op_get_scoped_var: {
+ int skip = currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain();
+
+ emitGetFromCallFrameHeader(RegisterFile::ScopeChain, X86::eax);
+ while (skip--)
+ loadPtr(Address(X86::eax, FIELD_OFFSET(ScopeChainNode, next)), X86::eax);
+
+ loadPtr(Address(X86::eax, FIELD_OFFSET(ScopeChainNode, object)), X86::eax);
+ emitGetVariableObjectRegister(X86::eax, currentInstruction[2].u.operand, X86::eax);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_get_scoped_var);
+ }
+ case op_put_scoped_var: {
+ int skip = currentInstruction[2].u.operand + m_codeBlock->needsFullScopeChain();
+
+ emitGetFromCallFrameHeader(RegisterFile::ScopeChain, X86::edx);
+ emitGetVirtualRegister(currentInstruction[3].u.operand, X86::eax);
+ while (skip--)
+ loadPtr(Address(X86::edx, FIELD_OFFSET(ScopeChainNode, next)), X86::edx);
+
+ loadPtr(Address(X86::edx, FIELD_OFFSET(ScopeChainNode, object)), X86::edx);
+ emitPutVariableObjectRegister(X86::eax, X86::edx, currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_put_scoped_var);
+ }
+ case op_tear_off_activation: {
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, X86::ecx);
+ emitCTICall(Interpreter::cti_op_tear_off_activation);
+ NEXT_OPCODE(op_tear_off_activation);
+ }
+ case op_tear_off_arguments: {
+ emitCTICall(Interpreter::cti_op_tear_off_arguments);
+ NEXT_OPCODE(op_tear_off_arguments);
+ }
+ case op_ret: {
+ // We could JIT generate the deref, only calling out to C when the refcount hits zero.
+ if (m_codeBlock->needsFullScopeChain())
+ emitCTICall(Interpreter::cti_op_ret_scopeChain);
+
+ // Return the result in %eax.
+ emitGetVirtualRegister(currentInstruction[1].u.operand, X86::eax);
+
+ // Grab the return address.
+ emitGetFromCallFrameHeader(RegisterFile::ReturnPC, X86::edx);
+
+ // Restore our caller's "r".
+ emitGetFromCallFrameHeader(RegisterFile::CallerFrame, callFrameRegister);
+
+ // Return.
+ push(X86::edx);
+ ret();
+
+ NEXT_OPCODE(op_ret);
+ }
+ case op_new_array: {
+ emitPutJITStubArgConstant(currentInstruction[2].u.operand, 1);
+ emitPutJITStubArgConstant(currentInstruction[3].u.operand, 2);
+ emitCTICall(Interpreter::cti_op_new_array);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_new_array);
+ }
+ case op_resolve: {
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
+ emitPutJITStubArgConstant(ident, 1);
+ emitCTICall(Interpreter::cti_op_resolve);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_resolve);
+ }
+ case op_construct_verify: {
+ emitGetVirtualRegister(currentInstruction[1].u.operand, X86::eax);
+
+ emitJumpSlowCaseIfNotJSCell(X86::eax);
+ loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::ecx);
+ addSlowCase(jne32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType)));
+
+ NEXT_OPCODE(op_construct_verify);
+ }
+ case op_get_by_val: {
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, X86::eax, currentInstruction[3].u.operand, X86::edx);
+ emitJumpSlowCaseIfNotImmNum(X86::edx);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
+ // We check the value as if it was a uint32 against the m_fastAccessCutoff - which will always fail if
+ // number was signed since m_fastAccessCutoff is always less than intmax (since the total allocation
+ // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value
+ // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
+ // extending since it makes it easier to re-tag the value in the slow case.
+ zeroExtend32ToPtr(X86::edx, X86::edx);
+#else
+ emitFastArithImmToInt(X86::edx);
+#endif
+ emitJumpSlowCaseIfNotJSCell(X86::eax);
+ addSlowCase(jnePtr(Address(X86::eax), ImmPtr(m_interpreter->m_jsArrayVptr)));
+
+ // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
+ loadPtr(Address(X86::eax, FIELD_OFFSET(JSArray, m_storage)), X86::ecx);
+ addSlowCase(jae32(X86::edx, Address(X86::eax, FIELD_OFFSET(JSArray, m_fastAccessCutoff))));
+
+ // Get the value from the vector
+ loadPtr(BaseIndex(X86::ecx, X86::edx, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])), X86::eax);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_get_by_val);
+ }
+ case op_resolve_func: {
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+ emitPutJITStubArgConstant(ident, 1);
+ emitCTICall(Interpreter::cti_op_resolve_func);
+ emitPutVirtualRegister(currentInstruction[2].u.operand, X86::edx);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_resolve_func);
+ }
+ case op_sub: {
+ compileBinaryArithOp(op_sub, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
+ NEXT_OPCODE(op_sub);
+ }
+ case op_put_by_val: {
+ emitGetVirtualRegisters(currentInstruction[1].u.operand, X86::eax, currentInstruction[2].u.operand, X86::edx);
+ emitJumpSlowCaseIfNotImmNum(X86::edx);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ // See comment in op_get_by_val.
+ zeroExtend32ToPtr(X86::edx, X86::edx);
+#else
+ emitFastArithImmToInt(X86::edx);
+#endif
+ emitJumpSlowCaseIfNotJSCell(X86::eax);
+ addSlowCase(jnePtr(Address(X86::eax), ImmPtr(m_interpreter->m_jsArrayVptr)));
+
+ // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
+ loadPtr(Address(X86::eax, FIELD_OFFSET(JSArray, m_storage)), X86::ecx);
+ Jump inFastVector = jb32(X86::edx, Address(X86::eax, FIELD_OFFSET(JSArray, m_fastAccessCutoff)));
+ // No; oh well, check if the access if within the vector - if so, we may still be okay.
+ addSlowCase(jae32(X86::edx, Address(X86::ecx, FIELD_OFFSET(ArrayStorage, m_vectorLength))));
+
+ // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
+ // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
+ addSlowCase(jzPtr(BaseIndex(X86::ecx, X86::edx, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0]))));
+
+ // All good - put the value into the array.
+ inFastVector.link(this);
+ emitGetVirtualRegister(currentInstruction[3].u.operand, X86::eax);
+ storePtr(X86::eax, BaseIndex(X86::ecx, X86::edx, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])));
+ NEXT_OPCODE(op_put_by_val);
+ }
+ CTI_COMPILE_BINARY_OP(op_lesseq)
+ case op_loop_if_true: {
+ emitSlowScriptCheck();
+
+ unsigned target = currentInstruction[2].u.operand;
+ emitGetVirtualRegister(currentInstruction[1].u.operand, X86::eax);
+
+ Jump isZero = jePtr(X86::eax, ImmPtr(JSValuePtr::encode(JSImmediate::zeroImmediate())));
+ addJump(emitJumpIfImmNum(X86::eax), target + 2);
+
+ addJump(jePtr(X86::eax, ImmPtr(JSValuePtr::encode(jsBoolean(true)))), target + 2);
+ addSlowCase(jnePtr(X86::eax, ImmPtr(JSValuePtr::encode(jsBoolean(false)))));
+
+ isZero.link(this);
+ NEXT_OPCODE(op_loop_if_true);
+ };
+ case op_resolve_base: {
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
+ emitPutJITStubArgConstant(ident, 1);
+ emitCTICall(Interpreter::cti_op_resolve_base);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_resolve_base);
+ }
+ case op_negate: {
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, X86::ecx);
+ emitCTICall(Interpreter::cti_op_negate);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_negate);
+ }
+ case op_resolve_skip: {
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
+ emitPutJITStubArgConstant(ident, 1);
+ emitPutJITStubArgConstant(currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain(), 2);
+ emitCTICall(Interpreter::cti_op_resolve_skip);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_resolve_skip);
+ }
+ case op_resolve_global: {
+ // Fast case
+ void* globalObject = currentInstruction[2].u.jsCell;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ unsigned currentIndex = globalResolveInfoIndex++;
+ void* structureAddress = &(m_codeBlock->globalResolveInfo(currentIndex).structure);
+ void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset);
+
+ // Check Structure of global object
+ move(ImmPtr(globalObject), X86::eax);
+ loadPtr(structureAddress, X86::edx);
+ Jump noMatch = jnePtr(X86::edx, Address(X86::eax, FIELD_OFFSET(JSCell, m_structure))); // Structures don't match
+
+ // Load cached property
+ loadPtr(Address(X86::eax, FIELD_OFFSET(JSGlobalObject, m_propertyStorage)), X86::eax);
+ load32(offsetAddr, X86::edx);
+ loadPtr(BaseIndex(X86::eax, X86::edx, ScalePtr), X86::eax);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ Jump end = jump();
+
+ // Slow case
+ noMatch.link(this);
+ emitPutJITStubArgConstant(globalObject, 1);
+ emitPutJITStubArgConstant(ident, 2);
+ emitPutJITStubArgConstant(currentIndex, 3);
+ emitCTICall(Interpreter::cti_op_resolve_global);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ end.link(this);
+ NEXT_OPCODE(op_resolve_global);
+ }
+ CTI_COMPILE_BINARY_OP(op_div)
+ case op_pre_dec: {
+ compileFastArith_op_pre_dec(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_pre_dec);
+ }
+ case op_jnless: {
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+ if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, X86::eax);
+ emitJumpSlowCaseIfNotImmNum(X86::eax);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ int32_t op2imm = JSImmediate::intValue(getConstantOperand(op2));
+#else
+ int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
+#endif
+ addJump(jge32(X86::eax, Imm32(op2imm)), target + 3);
+ } else {
+ emitGetVirtualRegisters(op1, X86::eax, op2, X86::edx);
+ emitJumpSlowCaseIfNotImmNum(X86::eax);
+ emitJumpSlowCaseIfNotImmNum(X86::edx);
+ addJump(jge32(X86::eax, X86::edx), target + 3);
+ }
+ NEXT_OPCODE(op_jnless);
+ }
+ case op_not: {
+ emitGetVirtualRegister(currentInstruction[2].u.operand, X86::eax);
+ xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), X86::eax);
+ addSlowCase(jnzPtr(X86::eax, Imm32(static_cast<int32_t>(~JSImmediate::ExtendedPayloadBitBoolValue))));
+ xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue)), X86::eax);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_not);
+ }
+ case op_jfalse: {
+ unsigned target = currentInstruction[2].u.operand;
+ emitGetVirtualRegister(currentInstruction[1].u.operand, X86::eax);
+
+ addJump(jePtr(X86::eax, ImmPtr(JSValuePtr::encode(JSImmediate::zeroImmediate()))), target + 2);
+ Jump isNonZero = emitJumpIfImmNum(X86::eax);
+
+ addJump(jePtr(X86::eax, ImmPtr(JSValuePtr::encode(jsBoolean(false)))), target + 2);
+ addSlowCase(jnePtr(X86::eax, ImmPtr(JSValuePtr::encode(jsBoolean(true)))));
+
+ isNonZero.link(this);
+ NEXT_OPCODE(op_jfalse);
+ };
+ case op_jeq_null: {
+ unsigned src = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(src, X86::eax);
+ Jump isImmediate = emitJumpIfNotJSCell(X86::eax);
+
+ // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
+ loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::ecx);
+ addJump(jnz32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
+ Jump wasNotImmediate = jump();
+
+ // Now handle the immediate cases - undefined & null
+ isImmediate.link(this);
+ and32(Imm32(~JSImmediate::ExtendedTagBitUndefined), X86::eax);
+ addJump(jePtr(X86::eax, ImmPtr(JSValuePtr::encode(jsNull()))), target + 2);
+
+ wasNotImmediate.link(this);
+ NEXT_OPCODE(op_jeq_null);
+ };
+ case op_jneq_null: {
+ unsigned src = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(src, X86::eax);
+ Jump isImmediate = emitJumpIfNotJSCell(X86::eax);
+
+ // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
+ loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::ecx);
+ addJump(jz32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
+ Jump wasNotImmediate = jump();
+
+ // Now handle the immediate cases - undefined & null
+ isImmediate.link(this);
+ and32(Imm32(~JSImmediate::ExtendedTagBitUndefined), X86::eax);
+ addJump(jnePtr(X86::eax, ImmPtr(JSValuePtr::encode(jsNull()))), target + 2);
+
+ wasNotImmediate.link(this);
+ NEXT_OPCODE(op_jneq_null);
+ }
+ case op_post_inc: {
+ compileFastArith_op_post_inc(currentInstruction[1].u.operand, currentInstruction[2].u.operand);
+ NEXT_OPCODE(op_post_inc);
+ }
+ case op_unexpected_load: {
+ JSValuePtr v = m_codeBlock->unexpectedConstant(currentInstruction[2].u.operand);
+ move(ImmPtr(JSValuePtr::encode(v)), X86::eax);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_unexpected_load);
+ }
+ case op_jsr: {
+ int retAddrDst = currentInstruction[1].u.operand;
+ int target = currentInstruction[2].u.operand;
+ DataLabelPtr storeLocation = storePtrWithPatch(Address(callFrameRegister, sizeof(Register) * retAddrDst));
+ addJump(jump(), target + 2);
+ m_jsrSites.append(JSRInfo(storeLocation, label()));
+ NEXT_OPCODE(op_jsr);
+ }
+ case op_sret: {
+ jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand));
+ NEXT_OPCODE(op_sret);
+ }
+ case op_eq: {
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, X86::eax, currentInstruction[3].u.operand, X86::edx);
+ emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, X86::ecx);
+ sete32(X86::edx, X86::eax);
+ emitTagAsBoolImmediate(X86::eax);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_eq);
+ }
+ case op_lshift: {
+ compileFastArith_op_lshift(currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand);
+ NEXT_OPCODE(op_lshift);
+ }
+ case op_bitand: {
+ compileFastArith_op_bitand(currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand);
+ NEXT_OPCODE(op_bitand);
+ }
+ case op_rshift: {
+ compileFastArith_op_rshift(currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand);
+ NEXT_OPCODE(op_rshift);
+ }
+ case op_bitnot: {
+ emitGetVirtualRegister(currentInstruction[2].u.operand, X86::eax);
+ emitJumpSlowCaseIfNotImmNum(X86::eax);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ not32(X86::eax);
+ emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
+#else
+ xorPtr(Imm32(~JSImmediate::TagTypeInteger), X86::eax);
+#endif
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_bitnot);
+ }
+ case op_resolve_with_base: {
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+ emitPutJITStubArgConstant(ident, 1);
+ emitCTICall(Interpreter::cti_op_resolve_with_base);
+ emitPutVirtualRegister(currentInstruction[2].u.operand, X86::edx);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_resolve_with_base);
+ }
+ case op_new_func_exp: {
+ FuncExprNode* func = m_codeBlock->functionExpression(currentInstruction[2].u.operand);
+ emitPutJITStubArgConstant(func, 1);
+ emitCTICall(Interpreter::cti_op_new_func_exp);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_new_func_exp);
+ }
+ case op_mod: {
+ compileFastArith_op_mod(currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand);
+ NEXT_OPCODE(op_mod);
+ }
+ case op_jtrue: {
+ unsigned target = currentInstruction[2].u.operand;
+ emitGetVirtualRegister(currentInstruction[1].u.operand, X86::eax);
+
+ Jump isZero = jePtr(X86::eax, ImmPtr(JSValuePtr::encode(JSImmediate::zeroImmediate())));
+ addJump(emitJumpIfImmNum(X86::eax), target + 2);
+
+ addJump(jePtr(X86::eax, ImmPtr(JSValuePtr::encode(jsBoolean(true)))), target + 2);
+ addSlowCase(jnePtr(X86::eax, ImmPtr(JSValuePtr::encode(jsBoolean(false)))));
+
+ isZero.link(this);
+ NEXT_OPCODE(op_jtrue);
+ }
+ CTI_COMPILE_BINARY_OP(op_less)
+ case op_neq: {
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, X86::eax, currentInstruction[3].u.operand, X86::edx);
+ emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, X86::ecx);
+ setne32(X86::edx, X86::eax);
+ emitTagAsBoolImmediate(X86::eax);
+
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+
+ NEXT_OPCODE(op_neq);
+ }
+ case op_post_dec: {
+ compileFastArith_op_post_dec(currentInstruction[1].u.operand, currentInstruction[2].u.operand);
+ NEXT_OPCODE(op_post_dec);
+ }
+ CTI_COMPILE_BINARY_OP(op_urshift)
+ case op_bitxor: {
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, X86::eax, currentInstruction[3].u.operand, X86::edx);
+ emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, X86::ecx);
+ xorPtr(X86::edx, X86::eax);
+ emitFastArithReTagImmediate(X86::eax, X86::eax);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_bitxor);
+ }
+ case op_new_regexp: {
+ RegExp* regExp = m_codeBlock->regexp(currentInstruction[2].u.operand);
+ emitPutJITStubArgConstant(regExp, 1);
+ emitCTICall(Interpreter::cti_op_new_regexp);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_new_regexp);
+ }
+ case op_bitor: {
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, X86::eax, currentInstruction[3].u.operand, X86::edx);
+ emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, X86::ecx);
+ orPtr(X86::edx, X86::eax);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_bitor);
+ }
+ case op_throw: {
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, X86::ecx);
+ emitCTICall(Interpreter::cti_op_throw);
+#if PLATFORM(X86_64)
+ addPtr(Imm32(0x38), X86::esp);
+ pop(X86::ebx);
+ pop(X86::r13);
+ pop(X86::r12);
+ pop(X86::ebp);
+ ret();
+#else
+ addPtr(Imm32(0x1c), X86::esp);
+ pop(X86::ebx);
+ pop(X86::edi);
+ pop(X86::esi);
+ pop(X86::ebp);
+ ret();
+#endif
+ NEXT_OPCODE(op_throw);
+ }
+ case op_get_pnames: {
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, X86::ecx);
+ emitCTICall(Interpreter::cti_op_get_pnames);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_get_pnames);
+ }
+ case op_next_pname: {
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, X86::ecx);
+ unsigned target = currentInstruction[3].u.operand;
+ emitCTICall(Interpreter::cti_op_next_pname);
+ Jump endOfIter = jzPtr(X86::eax);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ addJump(jump(), target + 3);
+ endOfIter.link(this);
+ NEXT_OPCODE(op_next_pname);
+ }
+ case op_push_scope: {
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, X86::ecx);
+ emitCTICall(Interpreter::cti_op_push_scope);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_push_scope);
+ }
+ case op_pop_scope: {
+ emitCTICall(Interpreter::cti_op_pop_scope);
+ NEXT_OPCODE(op_pop_scope);
+ }
+ CTI_COMPILE_UNARY_OP(op_typeof)
+ CTI_COMPILE_UNARY_OP(op_is_undefined)
+ CTI_COMPILE_UNARY_OP(op_is_boolean)
+ CTI_COMPILE_UNARY_OP(op_is_number)
+ CTI_COMPILE_UNARY_OP(op_is_string)
+ CTI_COMPILE_UNARY_OP(op_is_object)
+ CTI_COMPILE_UNARY_OP(op_is_function)
+ case op_stricteq: {
+ compileOpStrictEq(currentInstruction, OpStrictEq);
+ NEXT_OPCODE(op_stricteq);
+ }
+ case op_nstricteq: {
+ compileOpStrictEq(currentInstruction, OpNStrictEq);
+ NEXT_OPCODE(op_nstricteq);
+ }
+ case op_to_jsnumber: {
+ int srcVReg = currentInstruction[2].u.operand;
+ emitGetVirtualRegister(srcVReg, X86::eax);
+
+ Jump wasImmediate = emitJumpIfImmNum(X86::eax);
+
+ emitJumpSlowCaseIfNotJSCell(X86::eax, srcVReg);
+ loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::ecx);
+ addSlowCase(jne32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo.m_type)), Imm32(NumberType)));
+
+ wasImmediate.link(this);
+
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_to_jsnumber);
+ }
+ CTI_COMPILE_BINARY_OP(op_in)
+ case op_push_new_scope: {
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
+ emitPutJITStubArgConstant(ident, 1);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 2, X86::ecx);
+ emitCTICall(Interpreter::cti_op_push_new_scope);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_push_new_scope);
+ }
+ case op_catch: {
+ emitGetCTIParam(STUB_ARGS_callFrame, callFrameRegister);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_catch);
+ }
+ case op_jmp_scopes: {
+ unsigned count = currentInstruction[1].u.operand;
+ emitPutJITStubArgConstant(count, 1);
+ emitCTICall(Interpreter::cti_op_jmp_scopes);
+ unsigned target = currentInstruction[2].u.operand;
+ addJump(jump(), target + 2);
+ NEXT_OPCODE(op_jmp_scopes);
+ }
+ case op_put_by_index: {
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, X86::ecx);
+ emitPutJITStubArgConstant(currentInstruction[2].u.operand, 2);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 3, X86::ecx);
+ emitCTICall(Interpreter::cti_op_put_by_index);
+ NEXT_OPCODE(op_put_by_index);
+ }
+ case op_switch_imm: {
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate));
+ jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+
+ emitPutJITStubArgFromVirtualRegister(scrutinee, 1, X86::ecx);
+ emitPutJITStubArgConstant(tableIndex, 2);
+ emitCTICall(Interpreter::cti_op_switch_imm);
+ jump(X86::eax);
+ NEXT_OPCODE(op_switch_imm);
+ }
+ case op_switch_char: {
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Character));
+ jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+
+ emitPutJITStubArgFromVirtualRegister(scrutinee, 1, X86::ecx);
+ emitPutJITStubArgConstant(tableIndex, 2);
+ emitCTICall(Interpreter::cti_op_switch_char);
+ jump(X86::eax);
+ NEXT_OPCODE(op_switch_char);
+ }
+ case op_switch_string: {
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset));
+
+ emitPutJITStubArgFromVirtualRegister(scrutinee, 1, X86::ecx);
+ emitPutJITStubArgConstant(tableIndex, 2);
+ emitCTICall(Interpreter::cti_op_switch_string);
+ jump(X86::eax);
+ NEXT_OPCODE(op_switch_string);
+ }
+ case op_del_by_val: {
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, X86::ecx);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 2, X86::ecx);
+ emitCTICall(Interpreter::cti_op_del_by_val);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_del_by_val);
+ }
+ case op_put_getter: {
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, X86::ecx);
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
+ emitPutJITStubArgConstant(ident, 2);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 3, X86::ecx);
+ emitCTICall(Interpreter::cti_op_put_getter);
+ NEXT_OPCODE(op_put_getter);
+ }
+ case op_put_setter: {
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, X86::ecx);
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
+ emitPutJITStubArgConstant(ident, 2);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 3, X86::ecx);
+ emitCTICall(Interpreter::cti_op_put_setter);
+ NEXT_OPCODE(op_put_setter);
+ }
+ case op_new_error: {
+ JSValuePtr message = m_codeBlock->unexpectedConstant(currentInstruction[3].u.operand);
+ emitPutJITStubArgConstant(currentInstruction[2].u.operand, 1);
+ emitPutJITStubArgConstant(JSValuePtr::encode(message), 2);
+ emitPutJITStubArgConstant(m_bytecodeIndex, 3);
+ emitCTICall(Interpreter::cti_op_new_error);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_new_error);
+ }
+ case op_debug: {
+ emitPutJITStubArgConstant(currentInstruction[1].u.operand, 1);
+ emitPutJITStubArgConstant(currentInstruction[2].u.operand, 2);
+ emitPutJITStubArgConstant(currentInstruction[3].u.operand, 3);
+ emitCTICall(Interpreter::cti_op_debug);
+ NEXT_OPCODE(op_debug);
+ }
+ case op_eq_null: {
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(src1, X86::eax);
+ Jump isImmediate = emitJumpIfNotJSCell(X86::eax);
+
+ loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::ecx);
+ setnz32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), X86::eax);
+
+ Jump wasNotImmediate = jump();
+
+ isImmediate.link(this);
+
+ and32(Imm32(~JSImmediate::ExtendedTagBitUndefined), X86::eax);
+ sete32(Imm32(JSImmediate::FullTagTypeNull), X86::eax);
+
+ wasNotImmediate.link(this);
+
+ emitTagAsBoolImmediate(X86::eax);
+ emitPutVirtualRegister(dst);
+
+ NEXT_OPCODE(op_eq_null);
+ }
+ case op_neq_null: {
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(src1, X86::eax);
+ Jump isImmediate = emitJumpIfNotJSCell(X86::eax);
+
+ loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::ecx);
+ setz32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), X86::eax);
+
+ Jump wasNotImmediate = jump();
+
+ isImmediate.link(this);
+
+ and32(Imm32(~JSImmediate::ExtendedTagBitUndefined), X86::eax);
+ setne32(Imm32(JSImmediate::FullTagTypeNull), X86::eax);
+
+ wasNotImmediate.link(this);
+
+ emitTagAsBoolImmediate(X86::eax);
+ emitPutVirtualRegister(dst);
+
+ NEXT_OPCODE(op_neq_null);
+ }
+ case op_enter: {
+ // Even though CTI doesn't use them, we initialize our constant
+ // registers to zap stale pointers, to avoid unnecessarily prolonging
+ // object lifetime and increasing GC pressure.
+ size_t count = m_codeBlock->m_numVars + m_codeBlock->numberOfConstantRegisters();
+ for (size_t j = 0; j < count; ++j)
+ emitInitRegister(j);
+
+ NEXT_OPCODE(op_enter);
+ }
+ case op_enter_with_activation: {
+ // Even though CTI doesn't use them, we initialize our constant
+ // registers to zap stale pointers, to avoid unnecessarily prolonging
+ // object lifetime and increasing GC pressure.
+ size_t count = m_codeBlock->m_numVars + m_codeBlock->numberOfConstantRegisters();
+ for (size_t j = 0; j < count; ++j)
+ emitInitRegister(j);
+
+ emitCTICall(Interpreter::cti_op_push_activation);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+
+ NEXT_OPCODE(op_enter_with_activation);
+ }
+ case op_create_arguments: {
+ if (m_codeBlock->m_numParameters == 1)
+ emitCTICall(Interpreter::cti_op_create_arguments_no_params);
+ else
+ emitCTICall(Interpreter::cti_op_create_arguments);
+ NEXT_OPCODE(op_create_arguments);
+ }
+ case op_convert_this: {
+ emitGetVirtualRegister(currentInstruction[1].u.operand, X86::eax);
+
+ emitJumpSlowCaseIfNotJSCell(X86::eax);
+ loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::edx);
+ addSlowCase(jnz32(Address(X86::edx, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
+
+ NEXT_OPCODE(op_convert_this);
+ }
+ case op_profile_will_call: {
+ emitGetCTIParam(STUB_ARGS_profilerReference, X86::eax);
+ Jump noProfiler = jzPtr(Address(X86::eax));
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, X86::eax);
+ emitCTICall(Interpreter::cti_op_profile_will_call);
+ noProfiler.link(this);
+
+ NEXT_OPCODE(op_profile_will_call);
+ }
+ case op_profile_did_call: {
+ emitGetCTIParam(STUB_ARGS_profilerReference, X86::eax);
+ Jump noProfiler = jzPtr(Address(X86::eax));
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, X86::eax);
+ emitCTICall(Interpreter::cti_op_profile_did_call);
+ noProfiler.link(this);
+
+ NEXT_OPCODE(op_profile_did_call);
+ }
+ case op_get_array_length:
+ case op_get_by_id_chain:
+ case op_get_by_id_generic:
+ case op_get_by_id_proto:
+ case op_get_by_id_proto_list:
+ case op_get_by_id_self:
+ case op_get_by_id_self_list:
+ case op_get_string_length:
+ case op_put_by_id_generic:
+ case op_put_by_id_replace:
+ case op_put_by_id_transition:
+ ASSERT_NOT_REACHED();
+ }
+ }
+
+ ASSERT(propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos());
+ ASSERT(callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos());
+
+#ifndef NDEBUG
+ // reset this, in order to guard it's use with asserts
+ m_bytecodeIndex = (unsigned)-1;
+#endif
+}
+
+
+void JIT::privateCompileLinkPass()
+{
+ unsigned jmpTableCount = m_jmpTable.size();
+ for (unsigned i = 0; i < jmpTableCount; ++i)
+ m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeIndex], this);
+ m_jmpTable.clear();
+}
+
+void JIT::privateCompileSlowCases()
+{
+ Instruction* instructionsBegin = m_codeBlock->instructions().begin();
+ unsigned propertyAccessInstructionIndex = 0;
+ unsigned callLinkInfoIndex = 0;
+
+ for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
+ // FIXME: enable peephole optimizations for slow cases when applicable
+ killLastResultRegister();
+
+ m_bytecodeIndex = iter->to;
+#ifndef NDEBUG
+ unsigned firstTo = m_bytecodeIndex;
+#endif
+ Instruction* currentInstruction = instructionsBegin + m_bytecodeIndex;
+
+ switch (OpcodeID opcodeID = m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
+ case op_convert_this: {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ emitPutJITStubArg(X86::eax, 1);
+ emitCTICall(Interpreter::cti_op_convert_this);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_convert_this);
+ }
+ case op_add: {
+ compileFastArithSlow_op_add(currentInstruction, iter);
+ NEXT_OPCODE(op_add);
+ }
+ case op_construct_verify: {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ emitGetVirtualRegister(currentInstruction[2].u.operand, X86::eax);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+
+ NEXT_OPCODE(op_construct_verify);
+ }
+ case op_get_by_val: {
+ // The slow case that handles accesses to arrays (below) may jump back up to here.
+ Label beginGetByValSlow(this);
+
+ Jump notImm = getSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ emitFastArithIntToImmNoCheck(X86::edx, X86::edx);
+ notImm.link(this);
+ emitPutJITStubArg(X86::eax, 1);
+ emitPutJITStubArg(X86::edx, 2);
+ emitCTICall(Interpreter::cti_op_get_by_val);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
+
+ // This is slow case that handles accesses to arrays above the fast cut-off.
+ // First, check if this is an access to the vector
+ linkSlowCase(iter);
+ jae32(X86::edx, Address(X86::ecx, FIELD_OFFSET(ArrayStorage, m_vectorLength)), beginGetByValSlow);
+
+ // okay, missed the fast region, but it is still in the vector. Get the value.
+ loadPtr(BaseIndex(X86::ecx, X86::edx, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])), X86::ecx);
+ // Check whether the value loaded is zero; if so we need to return undefined.
+ jzPtr(X86::ecx, beginGetByValSlow);
+ move(X86::ecx, X86::eax);
+ emitPutVirtualRegister(currentInstruction[1].u.operand, X86::eax);
+
+ NEXT_OPCODE(op_get_by_val);
+ }
+ case op_sub: {
+ compileBinaryArithOpSlowCase(op_sub, iter, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
+ NEXT_OPCODE(op_sub);
+ }
+ case op_rshift: {
+ compileFastArithSlow_op_rshift(currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, iter);
+ NEXT_OPCODE(op_rshift);
+ }
+ case op_lshift: {
+ compileFastArithSlow_op_lshift(currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, iter);
+ NEXT_OPCODE(op_lshift);
+ }
+ case op_loop_if_less: {
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+ if (isOperandConstantImmediateInt(op2)) {
+ linkSlowCase(iter);
+ emitPutJITStubArg(X86::eax, 1);
+ emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
+ emitCTICall(Interpreter::cti_op_loop_if_less);
+ emitJumpSlowToHot(jnz32(X86::eax), target + 3);
+ } else {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ emitPutJITStubArg(X86::eax, 1);
+ emitPutJITStubArg(X86::edx, 2);
+ emitCTICall(Interpreter::cti_op_loop_if_less);
+ emitJumpSlowToHot(jnz32(X86::eax), target + 3);
+ }
+ NEXT_OPCODE(op_loop_if_less);
+ }
+ case op_put_by_id: {
+ compilePutByIdSlowCase(currentInstruction[1].u.operand, &(m_codeBlock->identifier(currentInstruction[2].u.operand)), currentInstruction[3].u.operand, iter, propertyAccessInstructionIndex++);
+ NEXT_OPCODE(op_put_by_id);
+ }
+ case op_get_by_id: {
+ compileGetByIdSlowCase(currentInstruction[1].u.operand, currentInstruction[2].u.operand, &(m_codeBlock->identifier(currentInstruction[3].u.operand)), iter, propertyAccessInstructionIndex++);
+ NEXT_OPCODE(op_get_by_id);
+ }
+ case op_loop_if_lesseq: {
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+ if (isOperandConstantImmediateInt(op2)) {
+ linkSlowCase(iter);
+ emitPutJITStubArg(X86::eax, 1);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 2, X86::ecx);
+ emitCTICall(Interpreter::cti_op_loop_if_lesseq);
+ emitJumpSlowToHot(jnz32(X86::eax), target + 3);
+ } else {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ emitPutJITStubArg(X86::eax, 1);
+ emitPutJITStubArg(X86::edx, 2);
+ emitCTICall(Interpreter::cti_op_loop_if_lesseq);
+ emitJumpSlowToHot(jnz32(X86::eax), target + 3);
+ }
+ NEXT_OPCODE(op_loop_if_lesseq);
+ }
+ case op_pre_inc: {
+ compileFastArithSlow_op_pre_inc(currentInstruction[1].u.operand, iter);
+ NEXT_OPCODE(op_pre_inc);
+ }
+ case op_put_by_val: {
+ // Normal slow cases - either is not an immediate imm, or is an array.
+ Jump notImm = getSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ emitFastArithIntToImmNoCheck(X86::edx, X86::edx);
+ notImm.link(this);
+ emitGetVirtualRegister(currentInstruction[3].u.operand, X86::ecx);
+ emitPutJITStubArg(X86::eax, 1);
+ emitPutJITStubArg(X86::edx, 2);
+ emitPutJITStubArg(X86::ecx, 3);
+ emitCTICall(Interpreter::cti_op_put_by_val);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_put_by_val));
+
+ // slow cases for immediate int accesses to arrays
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ emitGetVirtualRegister(currentInstruction[3].u.operand, X86::ecx);
+ emitPutJITStubArg(X86::eax, 1);
+ emitPutJITStubArg(X86::edx, 2);
+ emitPutJITStubArg(X86::ecx, 3);
+ emitCTICall(Interpreter::cti_op_put_by_val_array);
+
+ NEXT_OPCODE(op_put_by_val);
+ }
+ case op_loop_if_true: {
+ linkSlowCase(iter);
+ emitPutJITStubArg(X86::eax, 1);
+ emitCTICall(Interpreter::cti_op_jtrue);
+ unsigned target = currentInstruction[2].u.operand;
+ emitJumpSlowToHot(jnz32(X86::eax), target + 2);
+ NEXT_OPCODE(op_loop_if_true);
+ }
+ case op_pre_dec: {
+ compileFastArithSlow_op_pre_dec(currentInstruction[1].u.operand, iter);
+ NEXT_OPCODE(op_pre_dec);
+ }
+ case op_jnless: {
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+ if (isOperandConstantImmediateInt(op2)) {
+ linkSlowCase(iter);
+ emitPutJITStubArg(X86::eax, 1);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 2, X86::ecx);
+ emitCTICall(Interpreter::cti_op_jless);
+ emitJumpSlowToHot(jz32(X86::eax), target + 3);
+ } else {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ emitPutJITStubArg(X86::eax, 1);
+ emitPutJITStubArg(X86::edx, 2);
+ emitCTICall(Interpreter::cti_op_jless);
+ emitJumpSlowToHot(jz32(X86::eax), target + 3);
+ }
+ NEXT_OPCODE(op_jnless);
+ }
+ case op_not: {
+ linkSlowCase(iter);
+ xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), X86::eax);
+ emitPutJITStubArg(X86::eax, 1);
+ emitCTICall(Interpreter::cti_op_not);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_not);
+ }
+ case op_jfalse: {
+ linkSlowCase(iter);
+ emitPutJITStubArg(X86::eax, 1);
+ emitCTICall(Interpreter::cti_op_jtrue);
+ unsigned target = currentInstruction[2].u.operand;
+ emitJumpSlowToHot(jz32(X86::eax), target + 2); // inverted!
+ NEXT_OPCODE(op_jfalse);
+ }
+ case op_post_inc: {
+ compileFastArithSlow_op_post_inc(currentInstruction[1].u.operand, currentInstruction[2].u.operand, iter);
+ NEXT_OPCODE(op_post_inc);
+ }
+ case op_bitnot: {
+ linkSlowCase(iter);
+ emitPutJITStubArg(X86::eax, 1);
+ emitCTICall(Interpreter::cti_op_bitnot);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_bitnot);
+ }
+ case op_bitand: {
+ compileFastArithSlow_op_bitand(currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, iter);
+ NEXT_OPCODE(op_bitand);
+ }
+ case op_jtrue: {
+ linkSlowCase(iter);
+ emitPutJITStubArg(X86::eax, 1);
+ emitCTICall(Interpreter::cti_op_jtrue);
+ unsigned target = currentInstruction[2].u.operand;
+ emitJumpSlowToHot(jnz32(X86::eax), target + 2);
+ NEXT_OPCODE(op_jtrue);
+ }
+ case op_post_dec: {
+ compileFastArithSlow_op_post_dec(currentInstruction[1].u.operand, currentInstruction[2].u.operand, iter);
+ NEXT_OPCODE(op_post_dec);
+ }
+ case op_bitxor: {
+ linkSlowCase(iter);
+ emitPutJITStubArg(X86::eax, 1);
+ emitPutJITStubArg(X86::edx, 2);
+ emitCTICall(Interpreter::cti_op_bitxor);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_bitxor);
+ }
+ case op_bitor: {
+ linkSlowCase(iter);
+ emitPutJITStubArg(X86::eax, 1);
+ emitPutJITStubArg(X86::edx, 2);
+ emitCTICall(Interpreter::cti_op_bitor);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_bitor);
+ }
+ case op_eq: {
+ linkSlowCase(iter);
+ emitPutJITStubArg(X86::eax, 1);
+ emitPutJITStubArg(X86::edx, 2);
+ emitCTICall(Interpreter::cti_op_eq);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_eq);
+ }
+ case op_neq: {
+ linkSlowCase(iter);
+ emitPutJITStubArg(X86::eax, 1);
+ emitPutJITStubArg(X86::edx, 2);
+ emitCTICall(Interpreter::cti_op_neq);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_neq);
+ }
+ case op_stricteq: {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ emitPutJITStubArg(X86::eax, 1);
+ emitPutJITStubArg(X86::edx, 2);
+ emitCTICall(Interpreter::cti_op_stricteq);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_stricteq);
+ }
+ case op_nstricteq: {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ emitPutJITStubArg(X86::eax, 1);
+ emitPutJITStubArg(X86::edx, 2);
+ emitCTICall(Interpreter::cti_op_nstricteq);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_nstricteq);
+ }
+ case op_instanceof: {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, X86::ecx);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 2, X86::ecx);
+ emitPutJITStubArgFromVirtualRegister(currentInstruction[4].u.operand, 3, X86::ecx);
+ emitCTICall(Interpreter::cti_op_instanceof);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_instanceof);
+ }
+ case op_mod: {
+ compileFastArithSlow_op_mod(currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, iter);
+ NEXT_OPCODE(op_mod);
+ }
+ case op_mul: {
+ compileFastArithSlow_op_mul(currentInstruction, iter);
+ NEXT_OPCODE(op_mul);
+ }
+
+ case op_call: {
+ compileOpCallSlowCase(currentInstruction, iter, callLinkInfoIndex++, opcodeID);
+ NEXT_OPCODE(op_call);
+ }
+ case op_call_eval: {
+ compileOpCallSlowCase(currentInstruction, iter, callLinkInfoIndex++, opcodeID);
+ NEXT_OPCODE(op_call_eval);
+ }
+ case op_construct: {
+ compileOpCallSlowCase(currentInstruction, iter, callLinkInfoIndex++, opcodeID);
+ NEXT_OPCODE(op_construct);
+ }
+ case op_to_jsnumber: {
+ linkSlowCaseIfNotJSCell(iter, currentInstruction[2].u.operand);
+ linkSlowCase(iter);
+
+ emitPutJITStubArg(X86::eax, 1);
+ emitCTICall(Interpreter::cti_op_to_jsnumber);
+
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ NEXT_OPCODE(op_to_jsnumber);
+ }
+
+ default:
+ ASSERT_NOT_REACHED();
+ }
+
+ ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to,"Not enough jumps linked in slow case codegen.");
+ ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
+
+ emitJumpSlowToHot(jump(), 0);
+ }
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ ASSERT(propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos());
+#endif
+ ASSERT(callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos());
+
+#ifndef NDEBUG
+ // reset this, in order to guard it's use with asserts
+ m_bytecodeIndex = (unsigned)-1;
+#endif
+}
+
+void JIT::privateCompile()
+{
+#if ENABLE(CODEBLOCK_SAMPLING)
+ storePtr(ImmPtr(m_codeBlock), m_interpreter->sampler()->codeBlockSlot());
+#endif
+#if ENABLE(OPCODE_SAMPLING)
+ store32(Imm32(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions().begin())), m_interpreter->sampler()->sampleSlot());
+#endif
+
+ // Could use a pop_m, but would need to offset the following instruction if so.
+ pop(X86::ecx);
+ emitPutToCallFrameHeader(X86::ecx, RegisterFile::ReturnPC);
+
+ Jump slowRegisterFileCheck;
+ Label afterRegisterFileCheck;
+ if (m_codeBlock->codeType() == FunctionCode) {
+ // In the case of a fast linked call, we do not set this up in the caller.
+ emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock);
+
+ emitGetCTIParam(STUB_ARGS_registerFile, X86::eax);
+ addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, X86::edx);
+
+ slowRegisterFileCheck = jg32(X86::edx, Address(X86::eax, FIELD_OFFSET(RegisterFile, m_end)));
+ afterRegisterFileCheck = label();
+ }
+
+ privateCompileMainPass();
+ privateCompileLinkPass();
+ privateCompileSlowCases();
+
+ if (m_codeBlock->codeType() == FunctionCode) {
+ slowRegisterFileCheck.link(this);
+ m_bytecodeIndex = 0; // emitCTICall will add to the map, but doesn't actually need this...
+ emitCTICall(Interpreter::cti_register_file_check);
+#ifndef NDEBUG
+ // reset this, in order to guard it's use with asserts
+ m_bytecodeIndex = (unsigned)-1;
+#endif
+ jump(afterRegisterFileCheck);
+ }
+
+ ASSERT(m_jmpTable.isEmpty());
+
+ RefPtr<ExecutablePool> allocator = m_globalData->poolForSize(m_assembler.size());
+ void* code = m_assembler.executableCopy(allocator.get());
+ JITCodeRef codeRef(code, allocator);
+
+ PatchBuffer patchBuffer(code);
+
+ // Translate vPC offsets into addresses in JIT generated code, for switch tables.
+ for (unsigned i = 0; i < m_switches.size(); ++i) {
+ SwitchRecord record = m_switches[i];
+ unsigned bytecodeIndex = record.bytecodeIndex;
+
+ if (record.type != SwitchRecord::String) {
+ ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character);
+ ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
+
+ record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.addressOf(m_labels[bytecodeIndex + 3 + record.defaultOffset]);
+
+ for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
+ unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
+ record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.addressOf(m_labels[bytecodeIndex + 3 + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
+ }
+ } else {
+ ASSERT(record.type == SwitchRecord::String);
+
+ record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.addressOf(m_labels[bytecodeIndex + 3 + record.defaultOffset]);
+
+ StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end();
+ for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) {
+ unsigned offset = it->second.branchOffset;
+ it->second.ctiOffset = offset ? patchBuffer.addressOf(m_labels[bytecodeIndex + 3 + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
+ }
+ }
+ }
+
+ for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) {
+ HandlerInfo& handler = m_codeBlock->exceptionHandler(i);
+ handler.nativeCode = patchBuffer.addressOf(m_labels[handler.target]);
+ }
+
+ m_codeBlock->pcVector().reserveCapacity(m_calls.size());
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, iter->to);
+ m_codeBlock->pcVector().append(PC(reinterpret_cast<void**>(patchBuffer.addressOf(iter->from)) - reinterpret_cast<void**>(code), iter->bytecodeIndex));
+ }
+
+ // Link absolute addresses for jsr
+ for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter)
+ patchBuffer.setPtr(iter->storeLocation, patchBuffer.addressOf(iter->target));
+
+ for (unsigned i = 0; i < m_codeBlock->numberOfStructureStubInfos(); ++i) {
+ StructureStubInfo& info = m_codeBlock->structureStubInfo(i);
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ info.callReturnLocation = patchBuffer.addressOf(m_propertyAccessCompilationInfo[i].callReturnLocation);
+ info.hotPathBegin = patchBuffer.addressOf(m_propertyAccessCompilationInfo[i].hotPathBegin);
+#else
+ info.callReturnLocation = 0;
+ info.hotPathBegin = 0;
+#endif
+ }
+ for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) {
+ CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
+#if ENABLE(JIT_OPTIMIZE_CALL)
+ info.callReturnLocation = patchBuffer.addressOf(m_callStructureStubCompilationInfo[i].callReturnLocation);
+ info.hotPathBegin = patchBuffer.addressOf(m_callStructureStubCompilationInfo[i].hotPathBegin);
+ info.hotPathOther = patchBuffer.addressOf(m_callStructureStubCompilationInfo[i].hotPathOther);
+ info.coldPathOther = patchBuffer.addressOf(m_callStructureStubCompilationInfo[i].coldPathOther);
+#else
+ info.callReturnLocation = 0;
+ info.hotPathBegin = 0;
+ info.hotPathOther = 0;
+ info.coldPathOther = 0;
+#endif
+ }
+
+ m_codeBlock->setJITCode(codeRef);
+}
+
+void JIT::privateCompileCTIMachineTrampolines()
+{
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ // (1) The first function provides fast property access for array length
+ Label arrayLengthBegin = align();
+
+ // Check eax is an array
+ Jump array_failureCases1 = emitJumpIfNotJSCell(X86::eax);
+ Jump array_failureCases2 = jnePtr(Address(X86::eax), ImmPtr(m_interpreter->m_jsArrayVptr));
+
+ // Checks out okay! - get the length from the storage
+ loadPtr(Address(X86::eax, FIELD_OFFSET(JSArray, m_storage)), X86::eax);
+ load32(Address(X86::eax, FIELD_OFFSET(ArrayStorage, m_length)), X86::eax);
+
+ Jump array_failureCases3 = ja32(X86::eax, Imm32(JSImmediate::maxImmediateInt));
+
+ // X86::eax contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
+ emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
+
+ ret();
+
+ // (2) The second function provides fast property access for string length
+ Label stringLengthBegin = align();
+
+ // Check eax is a string
+ Jump string_failureCases1 = emitJumpIfNotJSCell(X86::eax);
+ Jump string_failureCases2 = jnePtr(Address(X86::eax), ImmPtr(m_interpreter->m_jsStringVptr));
+
+ // Checks out okay! - get the length from the Ustring.
+ loadPtr(Address(X86::eax, FIELD_OFFSET(JSString, m_value) + FIELD_OFFSET(UString, m_rep)), X86::eax);
+ load32(Address(X86::eax, FIELD_OFFSET(UString::Rep, len)), X86::eax);
+
+ Jump string_failureCases3 = ja32(X86::eax, Imm32(JSImmediate::maxImmediateInt));
+
+ // X86::eax contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
+ emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
+
+ ret();
+#endif
+
+ // (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
+
+ Label virtualCallPreLinkBegin = align();
+
+ // Load the callee CodeBlock* into eax
+ loadPtr(Address(X86::ecx, FIELD_OFFSET(JSFunction, m_body)), X86::eax);
+ loadPtr(Address(X86::eax, FIELD_OFFSET(FunctionBodyNode, m_code)), X86::eax);
+ Jump hasCodeBlock1 = jnzPtr(X86::eax);
+ pop(X86::ebx);
+ restoreArgumentReference();
+ Jump callJSFunction1 = call();
+ emitGetJITStubArg(1, X86::ecx);
+ emitGetJITStubArg(3, X86::edx);
+ push(X86::ebx);
+ hasCodeBlock1.link(this);
+
+ // Check argCount matches callee arity.
+ Jump arityCheckOkay1 = je32(Address(X86::eax, FIELD_OFFSET(CodeBlock, m_numParameters)), X86::edx);
+ pop(X86::ebx);
+ emitPutJITStubArg(X86::ebx, 2);
+ emitPutJITStubArg(X86::eax, 4);
+ restoreArgumentReference();
+ Jump callArityCheck1 = call();
+ move(X86::edx, callFrameRegister);
+ emitGetJITStubArg(1, X86::ecx);
+ emitGetJITStubArg(3, X86::edx);
+ push(X86::ebx);
+ arityCheckOkay1.link(this);
+
+ compileOpCallInitializeCallFrame();
+
+ pop(X86::ebx);
+ emitPutJITStubArg(X86::ebx, 2);
+ restoreArgumentReference();
+ Jump callDontLazyLinkCall = call();
+ push(X86::ebx);
+
+ jump(X86::eax);
+
+ Label virtualCallLinkBegin = align();
+
+ // Load the callee CodeBlock* into eax
+ loadPtr(Address(X86::ecx, FIELD_OFFSET(JSFunction, m_body)), X86::eax);
+ loadPtr(Address(X86::eax, FIELD_OFFSET(FunctionBodyNode, m_code)), X86::eax);
+ Jump hasCodeBlock2 = jnzPtr(X86::eax);
+ pop(X86::ebx);
+ restoreArgumentReference();
+ Jump callJSFunction2 = call();
+ emitGetJITStubArg(1, X86::ecx);
+ emitGetJITStubArg(3, X86::edx);
+ push(X86::ebx);
+ hasCodeBlock2.link(this);
+
+ // Check argCount matches callee arity.
+ Jump arityCheckOkay2 = je32(Address(X86::eax, FIELD_OFFSET(CodeBlock, m_numParameters)), X86::edx);
+ pop(X86::ebx);
+ emitPutJITStubArg(X86::ebx, 2);
+ emitPutJITStubArg(X86::eax, 4);
+ restoreArgumentReference();
+ Jump callArityCheck2 = call();
+ move(X86::edx, callFrameRegister);
+ emitGetJITStubArg(1, X86::ecx);
+ emitGetJITStubArg(3, X86::edx);
+ push(X86::ebx);
+ arityCheckOkay2.link(this);
+
+ compileOpCallInitializeCallFrame();
+
+ pop(X86::ebx);
+ emitPutJITStubArg(X86::ebx, 2);
+ restoreArgumentReference();
+ Jump callLazyLinkCall = call();
+ push(X86::ebx);
+
+ jump(X86::eax);
+
+ Label virtualCallBegin = align();
+
+ // Load the callee CodeBlock* into eax
+ loadPtr(Address(X86::ecx, FIELD_OFFSET(JSFunction, m_body)), X86::eax);
+ loadPtr(Address(X86::eax, FIELD_OFFSET(FunctionBodyNode, m_code)), X86::eax);
+ Jump hasCodeBlock3 = jnzPtr(X86::eax);
+ pop(X86::ebx);
+ restoreArgumentReference();
+ Jump callJSFunction3 = call();
+ emitGetJITStubArg(1, X86::ecx);
+ emitGetJITStubArg(3, X86::edx);
+ push(X86::ebx);
+ hasCodeBlock3.link(this);
+
+ // Check argCount matches callee arity.
+ Jump arityCheckOkay3 = je32(Address(X86::eax, FIELD_OFFSET(CodeBlock, m_numParameters)), X86::edx);
+ pop(X86::ebx);
+ emitPutJITStubArg(X86::ebx, 2);
+ emitPutJITStubArg(X86::eax, 4);
+ restoreArgumentReference();
+ Jump callArityCheck3 = call();
+ move(X86::edx, callFrameRegister);
+ emitGetJITStubArg(1, X86::ecx);
+ emitGetJITStubArg(3, X86::edx);
+ push(X86::ebx);
+ arityCheckOkay3.link(this);
+
+ compileOpCallInitializeCallFrame();
+
+ // load ctiCode from the new codeBlock.
+ loadPtr(Address(X86::eax, FIELD_OFFSET(CodeBlock, m_jitCode)), X86::eax);
+
+ jump(X86::eax);
+
+ // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
+ m_interpreter->m_executablePool = m_globalData->poolForSize(m_assembler.size());
+ void* code = m_assembler.executableCopy(m_interpreter->m_executablePool.get());
+ PatchBuffer patchBuffer(code);
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ patchBuffer.link(array_failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail));
+ patchBuffer.link(array_failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail));
+ patchBuffer.link(array_failureCases3, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail));
+ patchBuffer.link(string_failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_string_fail));
+ patchBuffer.link(string_failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_string_fail));
+ patchBuffer.link(string_failureCases3, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_string_fail));
+
+ m_interpreter->m_ctiArrayLengthTrampoline = patchBuffer.addressOf(arrayLengthBegin);
+ m_interpreter->m_ctiStringLengthTrampoline = patchBuffer.addressOf(stringLengthBegin);
+#endif
+ patchBuffer.link(callArityCheck1, reinterpret_cast<void*>(Interpreter::cti_op_call_arityCheck));
+ patchBuffer.link(callArityCheck2, reinterpret_cast<void*>(Interpreter::cti_op_call_arityCheck));
+ patchBuffer.link(callArityCheck3, reinterpret_cast<void*>(Interpreter::cti_op_call_arityCheck));
+ patchBuffer.link(callJSFunction1, reinterpret_cast<void*>(Interpreter::cti_op_call_JSFunction));
+ patchBuffer.link(callJSFunction2, reinterpret_cast<void*>(Interpreter::cti_op_call_JSFunction));
+ patchBuffer.link(callJSFunction3, reinterpret_cast<void*>(Interpreter::cti_op_call_JSFunction));
+ patchBuffer.link(callDontLazyLinkCall, reinterpret_cast<void*>(Interpreter::cti_vm_dontLazyLinkCall));
+ patchBuffer.link(callLazyLinkCall, reinterpret_cast<void*>(Interpreter::cti_vm_lazyLinkCall));
+
+ m_interpreter->m_ctiVirtualCallPreLink = patchBuffer.addressOf(virtualCallPreLinkBegin);
+ m_interpreter->m_ctiVirtualCallLink = patchBuffer.addressOf(virtualCallLinkBegin);
+ m_interpreter->m_ctiVirtualCall = patchBuffer.addressOf(virtualCallBegin);
+}
+
+void JIT::emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst)
+{
+ loadPtr(Address(variableObject, FIELD_OFFSET(JSVariableObject, d)), dst);
+ loadPtr(Address(dst, FIELD_OFFSET(JSVariableObject::JSVariableObjectData, registers)), dst);
+ loadPtr(Address(dst, index * sizeof(Register)), dst);
+}
+
+void JIT::emitPutVariableObjectRegister(RegisterID src, RegisterID variableObject, int index)
+{
+ loadPtr(Address(variableObject, FIELD_OFFSET(JSVariableObject, d)), variableObject);
+ loadPtr(Address(variableObject, FIELD_OFFSET(JSVariableObject::JSVariableObjectData, registers)), variableObject);
+ storePtr(src, Address(variableObject, index * sizeof(Register)));
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/src/3rdparty/webkit/JavaScriptCore/jit/JIT.h b/src/3rdparty/webkit/JavaScriptCore/jit/JIT.h
new file mode 100644
index 0000000..931eb3b
--- /dev/null
+++ b/src/3rdparty/webkit/JavaScriptCore/jit/JIT.h
@@ -0,0 +1,530 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JIT_h
+#define JIT_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(JIT)
+
+#define WTF_USE_CTI_REPATCH_PIC 1
+
+#include "Interpreter.h"
+#include "Opcode.h"
+#include "RegisterFile.h"
+#include "MacroAssembler.h"
+#include "Profiler.h"
+#include <wtf/AlwaysInline.h>
+#include <wtf/Vector.h>
+
+#define STUB_ARGS_offset 0x0C
+#define STUB_ARGS_code (STUB_ARGS_offset)
+#define STUB_ARGS_registerFile (STUB_ARGS_offset + 1)
+#define STUB_ARGS_callFrame (STUB_ARGS_offset + 2)
+#define STUB_ARGS_exception (STUB_ARGS_offset + 3)
+#define STUB_ARGS_profilerReference (STUB_ARGS_offset + 4)
+#define STUB_ARGS_globalData (STUB_ARGS_offset + 5)
+
+#define ARG_callFrame static_cast<CallFrame*>(ARGS[STUB_ARGS_callFrame])
+#define ARG_registerFile static_cast<RegisterFile*>(ARGS[STUB_ARGS_registerFile])
+#define ARG_exception static_cast<JSValuePtr*>(ARGS[STUB_ARGS_exception])
+#define ARG_profilerReference static_cast<Profiler**>(ARGS[STUB_ARGS_profilerReference])
+#define ARG_globalData static_cast<JSGlobalData*>(ARGS[STUB_ARGS_globalData])
+
+#define ARG_setCallFrame(newCallFrame) (ARGS[STUB_ARGS_callFrame] = (newCallFrame))
+
+#define ARG_src1 JSValuePtr::decode(static_cast<JSValueEncodedAsPointer*>(ARGS[1]))
+#define ARG_src2 JSValuePtr::decode(static_cast<JSValueEncodedAsPointer*>(ARGS[2]))
+#define ARG_src3 JSValuePtr::decode(static_cast<JSValueEncodedAsPointer*>(ARGS[3]))
+#define ARG_src4 JSValuePtr::decode(static_cast<JSValueEncodedAsPointer*>(ARGS[4]))
+#define ARG_src5 JSValuePtr::decode(static_cast<JSValueEncodedAsPointer*>(ARGS[5]))
+#define ARG_id1 static_cast<Identifier*>(ARGS[1])
+#define ARG_id2 static_cast<Identifier*>(ARGS[2])
+#define ARG_id3 static_cast<Identifier*>(ARGS[3])
+#define ARG_id4 static_cast<Identifier*>(ARGS[4])
+#define ARG_int1 static_cast<int32_t>(reinterpret_cast<intptr_t>(ARGS[1]))
+#define ARG_int2 static_cast<int32_t>(reinterpret_cast<intptr_t>(ARGS[2]))
+#define ARG_int3 static_cast<int32_t>(reinterpret_cast<intptr_t>(ARGS[3]))
+#define ARG_int4 static_cast<int32_t>(reinterpret_cast<intptr_t>(ARGS[4]))
+#define ARG_int5 static_cast<int32_t>(reinterpret_cast<intptr_t>(ARGS[5]))
+#define ARG_int6 static_cast<int32_t>(reinterpret_cast<intptr_t>(ARGS[6]))
+#define ARG_func1 static_cast<FuncDeclNode*>(ARGS[1])
+#define ARG_funcexp1 static_cast<FuncExprNode*>(ARGS[1])
+#define ARG_regexp1 static_cast<RegExp*>(ARGS[1])
+#define ARG_pni1 static_cast<JSPropertyNameIterator*>(ARGS[1])
+#define ARG_returnAddress2 static_cast<void*>(ARGS[2])
+#define ARG_codeBlock4 static_cast<CodeBlock*>(ARGS[4])
+
+#define STUB_RETURN_ADDRESS_SLOT (ARGS[-1])
+
+namespace JSC {
+
+ class CodeBlock;
+ class JSPropertyNameIterator;
+ class Interpreter;
+ class Register;
+ class RegisterFile;
+ class ScopeChainNode;
+ class SimpleJumpTable;
+ class StringJumpTable;
+ class StructureChain;
+
+ struct CallLinkInfo;
+ struct Instruction;
+ struct OperandTypes;
+ struct PolymorphicAccessStructureList;
+ struct StructureStubInfo;
+
+ typedef JSValueEncodedAsPointer* (JIT_STUB *CTIHelper_j)(STUB_ARGS);
+ typedef JSObject* (JIT_STUB *CTIHelper_o)(STUB_ARGS);
+ typedef JSPropertyNameIterator* (JIT_STUB *CTIHelper_p)(STUB_ARGS);
+ typedef void (JIT_STUB *CTIHelper_v)(STUB_ARGS);
+ typedef void* (JIT_STUB *CTIHelper_s)(STUB_ARGS);
+ typedef int (JIT_STUB *CTIHelper_b)(STUB_ARGS);
+ typedef VoidPtrPair (JIT_STUB *CTIHelper_2)(STUB_ARGS);
+
+ struct CallRecord {
+ MacroAssembler::Jump from;
+ unsigned bytecodeIndex;
+ void* to;
+
+ CallRecord()
+ {
+ }
+
+ CallRecord(MacroAssembler::Jump from, unsigned bytecodeIndex, void* to = 0)
+ : from(from)
+ , bytecodeIndex(bytecodeIndex)
+ , to(to)
+ {
+ }
+ };
+
+ struct JumpTable {
+ MacroAssembler::Jump from;
+ unsigned toBytecodeIndex;
+
+ JumpTable(MacroAssembler::Jump f, unsigned t)
+ : from(f)
+ , toBytecodeIndex(t)
+ {
+ }
+ };
+
+ struct SlowCaseEntry {
+ MacroAssembler::Jump from;
+ unsigned to;
+ unsigned hint;
+
+ SlowCaseEntry(MacroAssembler::Jump f, unsigned t, unsigned h = 0)
+ : from(f)
+ , to(t)
+ , hint(h)
+ {
+ }
+ };
+
+ struct SwitchRecord {
+ enum Type {
+ Immediate,
+ Character,
+ String
+ };
+
+ Type type;
+
+ union {
+ SimpleJumpTable* simpleJumpTable;
+ StringJumpTable* stringJumpTable;
+ } jumpTable;
+
+ unsigned bytecodeIndex;
+ unsigned defaultOffset;
+
+ SwitchRecord(SimpleJumpTable* jumpTable, unsigned bytecodeIndex, unsigned defaultOffset, Type type)
+ : type(type)
+ , bytecodeIndex(bytecodeIndex)
+ , defaultOffset(defaultOffset)
+ {
+ this->jumpTable.simpleJumpTable = jumpTable;
+ }
+
+ SwitchRecord(StringJumpTable* jumpTable, unsigned bytecodeIndex, unsigned defaultOffset)
+ : type(String)
+ , bytecodeIndex(bytecodeIndex)
+ , defaultOffset(defaultOffset)
+ {
+ this->jumpTable.stringJumpTable = jumpTable;
+ }
+ };
+
+ struct PropertyStubCompilationInfo {
+ MacroAssembler::Jump callReturnLocation;
+ MacroAssembler::Label hotPathBegin;
+ };
+
+ struct StructureStubCompilationInfo {
+ MacroAssembler::DataLabelPtr hotPathBegin;
+ MacroAssembler::Jump hotPathOther;
+ MacroAssembler::Jump callReturnLocation;
+ MacroAssembler::Label coldPathOther;
+ };
+
+ extern "C" {
+ JSValueEncodedAsPointer* ctiTrampoline(
+#if PLATFORM(X86_64)
+ // FIXME: (bug #22910) this will force all arguments onto the stack (regparm(0) does not appear to have any effect).
+ // We can allow register passing here, and move the writes of these values into the trampoline.
+ void*, void*, void*, void*, void*, void*,
+#endif
+ void* code, RegisterFile*, CallFrame*, JSValuePtr* exception, Profiler**, JSGlobalData*);
+ void ctiVMThrowTrampoline();
+ };
+
+ void ctiSetReturnAddress(void** where, void* what);
+ void ctiPatchCallByReturnAddress(void* where, void* what);
+
+ class JIT : private MacroAssembler {
+ using MacroAssembler::Jump;
+ using MacroAssembler::JumpList;
+ using MacroAssembler::Label;
+
+#if PLATFORM(X86_64)
+ static const RegisterID timeoutCheckRegister = X86::r12;
+ static const RegisterID callFrameRegister = X86::r13;
+#else
+ static const RegisterID timeoutCheckRegister = X86::esi;
+ static const RegisterID callFrameRegister = X86::edi;
+#endif
+
+ static const int patchGetByIdDefaultStructure = -1;
+ // Magic number - initial offset cannot be representable as a signed 8bit value, or the X86Assembler
+ // will compress the displacement, and we may not be able to fit a patched offset.
+ static const int patchGetByIdDefaultOffset = 256;
+
+#if USE(JIT_STUB_ARGUMENT_REGISTER)
+#if PLATFORM(X86_64)
+ static const int ctiArgumentInitSize = 3;
+#else
+ static const int ctiArgumentInitSize = 2;
+#endif
+#elif USE(JIT_STUB_ARGUMENT_STACK)
+ static const int ctiArgumentInitSize = 4;
+#else // JIT_STUB_ARGUMENT_VA_LIST
+ static const int ctiArgumentInitSize = 0;
+#endif
+
+#if PLATFORM(X86_64)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 10;
+ static const int patchOffsetPutByIdPropertyMapOffset = 31;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 10;
+ static const int patchOffsetGetByIdBranchToSlowCase = 20;
+ static const int patchOffsetGetByIdPropertyMapOffset = 31;
+ static const int patchOffsetGetByIdPutResult = 31;
+#if ENABLE(OPCODE_SAMPLING)
+ static const int patchOffsetGetByIdSlowCaseCall = 40 + ctiArgumentInitSize;
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 30 + ctiArgumentInitSize;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 9;
+#else
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 7;
+ static const int patchOffsetPutByIdPropertyMapOffset = 22;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 7;
+ static const int patchOffsetGetByIdBranchToSlowCase = 13;
+ static const int patchOffsetGetByIdPropertyMapOffset = 22;
+ static const int patchOffsetGetByIdPutResult = 22;
+#if ENABLE(OPCODE_SAMPLING)
+ static const int patchOffsetGetByIdSlowCaseCall = 31 + ctiArgumentInitSize;
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 21 + ctiArgumentInitSize;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 6;
+#endif
+
+ public:
+ static void compile(JSGlobalData* globalData, CodeBlock* codeBlock)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.privateCompile();
+ }
+
+ static void compileGetByIdSelf(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.privateCompileGetByIdSelf(stubInfo, structure, cachedOffset, returnAddress);
+ }
+
+ static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, void* returnAddress)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.privateCompileGetByIdProto(stubInfo, structure, prototypeStructure, cachedOffset, returnAddress, callFrame);
+ }
+
+#if USE(CTI_REPATCH_PIC)
+ static void compileGetByIdSelfList(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.privateCompileGetByIdSelfList(stubInfo, polymorphicStructures, currentIndex, structure, cachedOffset);
+ }
+ static void compileGetByIdProtoList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.privateCompileGetByIdProtoList(stubInfo, prototypeStructureList, currentIndex, structure, prototypeStructure, cachedOffset, callFrame);
+ }
+ static void compileGetByIdChainList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.privateCompileGetByIdChainList(stubInfo, prototypeStructureList, currentIndex, structure, chain, count, cachedOffset, callFrame);
+ }
+#endif
+
+ static void compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, void* returnAddress)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.privateCompileGetByIdChain(stubInfo, structure, chain, count, cachedOffset, returnAddress, callFrame);
+ }
+
+ static void compilePutByIdReplace(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.privateCompilePutByIdReplace(stubInfo, structure, cachedOffset, returnAddress);
+ }
+
+ static void compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, void* returnAddress)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.privateCompilePutByIdTransition(stubInfo, oldStructure, newStructure, cachedOffset, chain, returnAddress);
+ }
+
+ static void compileCTIMachineTrampolines(JSGlobalData* globalData)
+ {
+ JIT jit(globalData);
+ jit.privateCompileCTIMachineTrampolines();
+ }
+
+ static void patchGetByIdSelf(StructureStubInfo*, Structure*, size_t cachedOffset, void* returnAddress);
+ static void patchPutByIdReplace(StructureStubInfo*, Structure*, size_t cachedOffset, void* returnAddress);
+
+ static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, void* returnAddress)
+ {
+ JIT jit(globalData, codeBlock);
+ return jit.privateCompilePatchGetArrayLength(returnAddress);
+ }
+
+ static void linkCall(JSFunction* callee, CodeBlock* calleeCodeBlock, void* ctiCode, CallLinkInfo* callLinkInfo, int callerArgCount);
+ static void unlinkCall(CallLinkInfo*);
+
+ inline static JSValuePtr execute(void* code, RegisterFile* registerFile, CallFrame* callFrame, JSGlobalData* globalData, JSValuePtr* exception)
+ {
+ return JSValuePtr::decode(ctiTrampoline(
+#if PLATFORM(X86_64)
+ 0, 0, 0, 0, 0, 0,
+#endif
+ code, registerFile, callFrame, exception, Profiler::enabledProfilerReference(), globalData));
+ }
+
+ private:
+ JIT(JSGlobalData*, CodeBlock* = 0);
+
+ void privateCompileMainPass();
+ void privateCompileLinkPass();
+ void privateCompileSlowCases();
+ void privateCompile();
+ void privateCompileGetByIdSelf(StructureStubInfo*, Structure*, size_t cachedOffset, void* returnAddress);
+ void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, size_t cachedOffset, void* returnAddress, CallFrame* callFrame);
+#if USE(CTI_REPATCH_PIC)
+ void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, size_t cachedOffset);
+ void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame);
+ void privateCompileGetByIdChainList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame);
+#endif
+ void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, size_t cachedOffset, void* returnAddress, CallFrame* callFrame);
+ void privateCompilePutByIdReplace(StructureStubInfo*, Structure*, size_t cachedOffset, void* returnAddress);
+ void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, size_t cachedOffset, StructureChain*, void* returnAddress);
+
+ void privateCompileCTIMachineTrampolines();
+ void privateCompilePatchGetArrayLength(void* returnAddress);
+
+ void addSlowCase(Jump);
+ void addJump(Jump, int);
+ void emitJumpSlowToHot(Jump, int);
+
+ void compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, unsigned propertyAccessInstructionIndex);
+ void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex);
+ void compilePutByIdHotPath(int baseVReg, Identifier* ident, int valueVReg, unsigned propertyAccessInstructionIndex);
+ void compilePutByIdSlowCase(int baseVReg, Identifier* ident, int valueVReg, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex);
+ void compileOpCall(OpcodeID, Instruction* instruction, unsigned callLinkInfoIndex);
+ void compileOpCallInitializeCallFrame();
+ void compileOpCallSetupArgs(Instruction*);
+ void compileOpCallEvalSetupArgs(Instruction*);
+ void compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID);
+ void compileOpConstructSetupArgs(Instruction*);
+ enum CompileOpStrictEqType { OpStrictEq, OpNStrictEq };
+ void compileOpStrictEq(Instruction* instruction, CompileOpStrictEqType type);
+ void putDoubleResultToJSNumberCellOrJSImmediate(X86Assembler::XMMRegisterID xmmSource, RegisterID jsNumberCell, unsigned dst, X86Assembler::JmpSrc* wroteJSNumberCell, X86Assembler::XMMRegisterID tempXmm, RegisterID tempReg1, RegisterID tempReg2);
+
+ void compileFastArith_op_add(Instruction*);
+ void compileFastArith_op_mul(Instruction*);
+ void compileFastArith_op_mod(unsigned result, unsigned op1, unsigned op2);
+ void compileFastArith_op_bitand(unsigned result, unsigned op1, unsigned op2);
+ void compileFastArith_op_lshift(unsigned result, unsigned op1, unsigned op2);
+ void compileFastArith_op_rshift(unsigned result, unsigned op1, unsigned op2);
+ void compileFastArith_op_pre_inc(unsigned srcDst);
+ void compileFastArith_op_pre_dec(unsigned srcDst);
+ void compileFastArith_op_post_inc(unsigned result, unsigned srcDst);
+ void compileFastArith_op_post_dec(unsigned result, unsigned srcDst);
+ void compileFastArithSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void compileFastArithSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void compileFastArithSlow_op_mod(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator&);
+ void compileFastArithSlow_op_bitand(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator&);
+ void compileFastArithSlow_op_lshift(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator&);
+ void compileFastArithSlow_op_rshift(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator&);
+ void compileFastArithSlow_op_pre_inc(unsigned srcDst, Vector<SlowCaseEntry>::iterator&);
+ void compileFastArithSlow_op_pre_dec(unsigned srcDst, Vector<SlowCaseEntry>::iterator&);
+ void compileFastArithSlow_op_post_inc(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator&);
+ void compileFastArithSlow_op_post_dec(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator&);
+ void compileBinaryArithOp(OpcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi);
+ void compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi);
+
+ void emitGetVirtualRegister(int src, RegisterID dst);
+ void emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2);
+ void emitPutVirtualRegister(unsigned dst, RegisterID from = X86::eax);
+
+ void emitPutJITStubArg(RegisterID src, unsigned argumentNumber);
+ void emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch);
+ void emitPutJITStubArgConstant(unsigned value, unsigned argumentNumber);
+ void emitPutJITStubArgConstant(void* value, unsigned argumentNumber);
+ void emitGetJITStubArg(unsigned argumentNumber, RegisterID dst);
+
+ void emitInitRegister(unsigned dst);
+
+ void emitPutCTIParam(void* value, unsigned name);
+ void emitPutCTIParam(RegisterID from, unsigned name);
+ void emitGetCTIParam(unsigned name, RegisterID to);
+
+ void emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry);
+ void emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry);
+ void emitGetFromCallFrameHeader(RegisterFile::CallFrameHeaderEntry entry, RegisterID to);
+
+ JSValuePtr getConstantOperand(unsigned src);
+ int32_t getConstantOperandImmediateInt(unsigned src);
+ bool isOperandConstantImmediateInt(unsigned src);
+
+ Jump emitJumpIfJSCell(RegisterID);
+ void emitJumpSlowCaseIfJSCell(RegisterID);
+ Jump emitJumpIfNotJSCell(RegisterID);
+ void emitJumpSlowCaseIfNotJSCell(RegisterID);
+ void emitJumpSlowCaseIfNotJSCell(RegisterID, int VReg);
+
+ Jump getSlowCase(Vector<SlowCaseEntry>::iterator& iter)
+ {
+ return iter++->from;
+ }
+ void linkSlowCase(Vector<SlowCaseEntry>::iterator& iter)
+ {
+ iter->from.link(this);
+ ++iter;
+ }
+ void linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&, int vReg);
+
+ JIT::Jump emitJumpIfImmNum(RegisterID);
+ void emitJumpSlowCaseIfNotImmNum(RegisterID);
+ void emitJumpSlowCaseIfNotImmNums(RegisterID, RegisterID, RegisterID);
+
+ Jump checkStructure(RegisterID reg, Structure* structure);
+
+#if !USE(ALTERNATE_JSIMMEDIATE)
+ void emitFastArithDeTagImmediate(RegisterID);
+ Jump emitFastArithDeTagImmediateJumpIfZero(RegisterID);
+#endif
+ void emitFastArithReTagImmediate(RegisterID src, RegisterID dest);
+ void emitFastArithImmToInt(RegisterID);
+ void emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest);
+
+ void emitTagAsBoolImmediate(RegisterID reg);
+
+ void restoreArgumentReference();
+ void restoreArgumentReferenceForTrampoline();
+
+ Jump emitNakedCall(RegisterID);
+ Jump emitNakedCall(void* function);
+ Jump emitCTICall_internal(void*);
+ Jump emitCTICall(CTIHelper_j helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); }
+ Jump emitCTICall(CTIHelper_o helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); }
+ Jump emitCTICall(CTIHelper_p helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); }
+ Jump emitCTICall(CTIHelper_v helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); }
+ Jump emitCTICall(CTIHelper_s helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); }
+ Jump emitCTICall(CTIHelper_b helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); }
+ Jump emitCTICall(CTIHelper_2 helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); }
+
+ void emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst);
+ void emitPutVariableObjectRegister(RegisterID src, RegisterID variableObject, int index);
+
+ void emitSlowScriptCheck();
+#ifndef NDEBUG
+ void printBytecodeOperandTypes(unsigned src1, unsigned src2);
+#endif
+
+ void killLastResultRegister();
+
+ Interpreter* m_interpreter;
+ JSGlobalData* m_globalData;
+ CodeBlock* m_codeBlock;
+
+ Vector<CallRecord> m_calls;
+ Vector<Label> m_labels;
+ Vector<PropertyStubCompilationInfo> m_propertyAccessCompilationInfo;
+ Vector<StructureStubCompilationInfo> m_callStructureStubCompilationInfo;
+ Vector<JumpTable> m_jmpTable;
+
+ struct JSRInfo {
+ DataLabelPtr storeLocation;
+ Label target;
+
+ JSRInfo(DataLabelPtr storeLocation, Label targetLocation)
+ : storeLocation(storeLocation)
+ , target(targetLocation)
+ {
+ }
+ };
+
+ unsigned m_bytecodeIndex;
+ Vector<JSRInfo> m_jsrSites;
+ Vector<SlowCaseEntry> m_slowCases;
+ Vector<SwitchRecord> m_switches;
+
+ int m_lastResultBytecodeRegister;
+ unsigned m_jumpTargetsPosition;
+ };
+}
+
+#endif // ENABLE(JIT)
+
+#endif // JIT_h
diff --git a/src/3rdparty/webkit/JavaScriptCore/jit/JITArithmetic.cpp b/src/3rdparty/webkit/JavaScriptCore/jit/JITArithmetic.cpp
new file mode 100644
index 0000000..f95bab8
--- /dev/null
+++ b/src/3rdparty/webkit/JavaScriptCore/jit/JITArithmetic.cpp
@@ -0,0 +1,769 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JIT.h"
+
+#if ENABLE(JIT)
+
+#include "CodeBlock.h"
+#include "JITInlineMethods.h"
+#include "JSArray.h"
+#include "JSFunction.h"
+#include "Interpreter.h"
+#include "ResultType.h"
+#include "SamplingTool.h"
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+#define __ m_assembler.
+
+using namespace std;
+
+namespace JSC {
+
+void JIT::compileFastArith_op_lshift(unsigned result, unsigned op1, unsigned op2)
+{
+ emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
+ // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmNums'? - we *probably* ought to be consistent.
+ emitJumpSlowCaseIfNotImmNum(X86::eax);
+ emitJumpSlowCaseIfNotImmNum(X86::ecx);
+ emitFastArithImmToInt(X86::eax);
+ emitFastArithImmToInt(X86::ecx);
+#if !PLATFORM(X86)
+ // Mask with 0x1f as per ecma-262 11.7.2 step 7.
+ // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
+ and32(Imm32(0x1f), X86::ecx);
+#endif
+ lshift32(X86::ecx, X86::eax);
+#if !USE(ALTERNATE_JSIMMEDIATE)
+ addSlowCase(joAdd32(X86::eax, X86::eax));
+ signExtend32ToPtr(X86::eax, X86::eax);
+#endif
+ emitFastArithReTagImmediate(X86::eax, X86::eax);
+ emitPutVirtualRegister(result);
+}
+void JIT::compileFastArithSlow_op_lshift(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator& iter)
+{
+#if USE(ALTERNATE_JSIMMEDIATE)
+ UNUSED_PARAM(op1);
+ UNUSED_PARAM(op2);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+#else
+ // If we are limited to 32-bit immediates there is a third slow case, which required the operands to have been reloaded.
+ Jump notImm1 = getSlowCase(iter);
+ Jump notImm2 = getSlowCase(iter);
+ linkSlowCase(iter);
+ emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
+ notImm1.link(this);
+ notImm2.link(this);
+#endif
+ emitPutJITStubArg(X86::eax, 1);
+ emitPutJITStubArg(X86::ecx, 2);
+ emitCTICall(Interpreter::cti_op_lshift);
+ emitPutVirtualRegister(result);
+}
+
+void JIT::compileFastArith_op_rshift(unsigned result, unsigned op1, unsigned op2)
+{
+ if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, X86::eax);
+ emitJumpSlowCaseIfNotImmNum(X86::eax);
+ // Mask with 0x1f as per ecma-262 11.7.2 step 7.
+#if USE(ALTERNATE_JSIMMEDIATE)
+ rshift32(Imm32(JSImmediate::getTruncatedUInt32(getConstantOperand(op2)) & 0x1f), X86::eax);
+#else
+ rshiftPtr(Imm32(JSImmediate::getTruncatedUInt32(getConstantOperand(op2)) & 0x1f), X86::eax);
+#endif
+ } else {
+ emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
+ emitJumpSlowCaseIfNotImmNum(X86::eax);
+ emitJumpSlowCaseIfNotImmNum(X86::ecx);
+ emitFastArithImmToInt(X86::ecx);
+#if !PLATFORM(X86)
+ // Mask with 0x1f as per ecma-262 11.7.2 step 7.
+ // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
+ and32(Imm32(0x1f), X86::ecx);
+#endif
+#if USE(ALTERNATE_JSIMMEDIATE)
+ rshift32(X86::ecx, X86::eax);
+#else
+ rshiftPtr(X86::ecx, X86::eax);
+#endif
+ }
+#if USE(ALTERNATE_JSIMMEDIATE)
+ emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
+#else
+ orPtr(Imm32(JSImmediate::TagTypeInteger), X86::eax);
+#endif
+ emitPutVirtualRegister(result);
+}
+void JIT::compileFastArithSlow_op_rshift(unsigned result, unsigned, unsigned op2, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ if (isOperandConstantImmediateInt(op2))
+ emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
+ else {
+ linkSlowCase(iter);
+ emitPutJITStubArg(X86::ecx, 2);
+ }
+
+ emitPutJITStubArg(X86::eax, 1);
+ emitCTICall(Interpreter::cti_op_rshift);
+ emitPutVirtualRegister(result);
+}
+
+void JIT::compileFastArith_op_bitand(unsigned result, unsigned op1, unsigned op2)
+{
+ if (isOperandConstantImmediateInt(op1)) {
+ emitGetVirtualRegister(op2, X86::eax);
+ emitJumpSlowCaseIfNotImmNum(X86::eax);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ int32_t imm = JSImmediate::intValue(getConstantOperand(op1));
+ andPtr(Imm32(imm), X86::eax);
+ if (imm >= 0)
+ emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
+#else
+ andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)))), X86::eax);
+#endif
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, X86::eax);
+ emitJumpSlowCaseIfNotImmNum(X86::eax);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ int32_t imm = JSImmediate::intValue(getConstantOperand(op2));
+ andPtr(Imm32(imm), X86::eax);
+ if (imm >= 0)
+ emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
+#else
+ andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)))), X86::eax);
+#endif
+ } else {
+ emitGetVirtualRegisters(op1, X86::eax, op2, X86::edx);
+ andPtr(X86::edx, X86::eax);
+ emitJumpSlowCaseIfNotImmNum(X86::eax);
+ }
+ emitPutVirtualRegister(result);
+}
+void JIT::compileFastArithSlow_op_bitand(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ if (isOperandConstantImmediateInt(op1)) {
+ emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
+ emitPutJITStubArg(X86::eax, 2);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitPutJITStubArg(X86::eax, 1);
+ emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
+ } else {
+ emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
+ emitPutJITStubArg(X86::edx, 2);
+ }
+ emitCTICall(Interpreter::cti_op_bitand);
+ emitPutVirtualRegister(result);
+}
+
+void JIT::compileFastArith_op_mod(unsigned result, unsigned op1, unsigned op2)
+{
+ emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
+ emitJumpSlowCaseIfNotImmNum(X86::eax);
+ emitJumpSlowCaseIfNotImmNum(X86::ecx);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ addSlowCase(jePtr(X86::ecx, ImmPtr(JSValuePtr::encode(JSImmediate::zeroImmediate()))));
+ mod32(X86::ecx, X86::eax, X86::edx);
+#else
+ emitFastArithDeTagImmediate(X86::eax);
+ addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86::ecx));
+ mod32(X86::ecx, X86::eax, X86::edx);
+ signExtend32ToPtr(X86::edx, X86::edx);
+#endif
+ emitFastArithReTagImmediate(X86::edx, X86::eax);
+ emitPutVirtualRegister(result);
+}
+void JIT::compileFastArithSlow_op_mod(unsigned result, unsigned, unsigned, Vector<SlowCaseEntry>::iterator& iter)
+{
+#if USE(ALTERNATE_JSIMMEDIATE)
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+#else
+ Jump notImm1 = getSlowCase(iter);
+ Jump notImm2 = getSlowCase(iter);
+ linkSlowCase(iter);
+ emitFastArithReTagImmediate(X86::eax, X86::eax);
+ emitFastArithReTagImmediate(X86::ecx, X86::ecx);
+ notImm1.link(this);
+ notImm2.link(this);
+#endif
+ emitPutJITStubArg(X86::eax, 1);
+ emitPutJITStubArg(X86::ecx, 2);
+ emitCTICall(Interpreter::cti_op_mod);
+ emitPutVirtualRegister(result);
+}
+
+void JIT::compileFastArith_op_add(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op1)) {
+ emitGetVirtualRegister(op2, X86::eax);
+ emitJumpSlowCaseIfNotImmNum(X86::eax);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ // FIXME: investigate performing a 31-bit add here (can we preserve upper bit & detect overflow from low word to high?)
+ // (or, detect carry? - if const is positive, will only carry when overflowing from negative to positive?)
+ addSlowCase(joAdd32(Imm32(getConstantOperandImmediateInt(op1)), X86::eax));
+ emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
+#else
+ addSlowCase(joAdd32(Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), X86::eax));
+ signExtend32ToPtr(X86::eax, X86::eax);
+#endif
+ emitPutVirtualRegister(result);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, X86::eax);
+ emitJumpSlowCaseIfNotImmNum(X86::eax);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ emitFastArithImmToInt(X86::eax);
+ addSlowCase(joAdd32(Imm32(getConstantOperandImmediateInt(op2)), X86::eax));
+ emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
+#else
+ addSlowCase(joAdd32(Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), X86::eax));
+ signExtend32ToPtr(X86::eax, X86::eax);
+#endif
+ emitPutVirtualRegister(result);
+ } else {
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+ if (types.first().mightBeNumber() && types.second().mightBeNumber())
+ compileBinaryArithOp(op_add, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
+ else {
+ emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
+ emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
+ emitCTICall(Interpreter::cti_op_add);
+ emitPutVirtualRegister(result);
+ }
+ }
+}
+void JIT::compileFastArithSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op1)) {
+#if USE(ALTERNATE_JSIMMEDIATE)
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
+ emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
+#else
+ Jump notImm = getSlowCase(iter);
+ linkSlowCase(iter);
+ sub32(Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), X86::eax);
+ notImm.link(this);
+ emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
+ emitPutJITStubArg(X86::eax, 2);
+#endif
+ emitCTICall(Interpreter::cti_op_add);
+ emitPutVirtualRegister(result);
+ } else if (isOperandConstantImmediateInt(op2)) {
+#if USE(ALTERNATE_JSIMMEDIATE)
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
+ emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
+#else
+ Jump notImm = getSlowCase(iter);
+ linkSlowCase(iter);
+ sub32(Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), X86::eax);
+ notImm.link(this);
+ emitPutJITStubArg(X86::eax, 1);
+ emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
+#endif
+ emitCTICall(Interpreter::cti_op_add);
+ emitPutVirtualRegister(result);
+ } else {
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+ ASSERT(types.first().mightBeNumber() && types.second().mightBeNumber());
+ compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types);
+ }
+}
+
+void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ // For now, only plant a fast int case if the constant operand is greater than zero.
+ int32_t value;
+ if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
+ emitGetVirtualRegister(op2, X86::eax);
+ emitJumpSlowCaseIfNotImmNum(X86::eax);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ addSlowCase(joMul32(Imm32(value), X86::eax, X86::eax));
+#else
+ emitFastArithDeTagImmediate(X86::eax);
+ addSlowCase(joMul32(Imm32(value), X86::eax, X86::eax));
+ signExtend32ToPtr(X86::eax, X86::eax);
+#endif
+ emitFastArithReTagImmediate(X86::eax, X86::eax);
+ emitPutVirtualRegister(result);
+ } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
+ emitGetVirtualRegister(op1, X86::eax);
+ emitJumpSlowCaseIfNotImmNum(X86::eax);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ addSlowCase(joMul32(Imm32(value), X86::eax, X86::eax));
+#else
+ emitFastArithDeTagImmediate(X86::eax);
+ addSlowCase(joMul32(Imm32(value), X86::eax, X86::eax));
+ signExtend32ToPtr(X86::eax, X86::eax);
+#endif
+ emitFastArithReTagImmediate(X86::eax, X86::eax);
+ emitPutVirtualRegister(result);
+ } else
+ compileBinaryArithOp(op_mul, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
+}
+void JIT::compileFastArithSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int result = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
+
+ if ((isOperandConstantImmediateInt(op1) && (getConstantOperandImmediateInt(op1) > 0))
+ || (isOperandConstantImmediateInt(op2) && (getConstantOperandImmediateInt(op2) > 0))) {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
+ emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
+ emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
+ emitCTICall(Interpreter::cti_op_mul);
+ emitPutVirtualRegister(result);
+ } else
+ compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
+}
+
+void JIT::compileFastArith_op_post_inc(unsigned result, unsigned srcDst)
+{
+ emitGetVirtualRegister(srcDst, X86::eax);
+ move(X86::eax, X86::edx);
+ emitJumpSlowCaseIfNotImmNum(X86::eax);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ addSlowCase(joAdd32(Imm32(1), X86::edx));
+ emitFastArithIntToImmNoCheck(X86::edx, X86::edx);
+#else
+ addSlowCase(joAdd32(Imm32(1 << JSImmediate::IntegerPayloadShift), X86::edx));
+ signExtend32ToPtr(X86::edx, X86::edx);
+#endif
+ emitPutVirtualRegister(srcDst, X86::edx);
+ emitPutVirtualRegister(result);
+}
+void JIT::compileFastArithSlow_op_post_inc(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ emitPutJITStubArg(X86::eax, 1);
+ emitCTICall(Interpreter::cti_op_post_inc);
+ emitPutVirtualRegister(srcDst, X86::edx);
+ emitPutVirtualRegister(result);
+}
+
+void JIT::compileFastArith_op_post_dec(unsigned result, unsigned srcDst)
+{
+ emitGetVirtualRegister(srcDst, X86::eax);
+ move(X86::eax, X86::edx);
+ emitJumpSlowCaseIfNotImmNum(X86::eax);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ addSlowCase(joSub32(Imm32(1), X86::edx));
+ emitFastArithIntToImmNoCheck(X86::edx, X86::edx);
+#else
+ addSlowCase(joSub32(Imm32(1 << JSImmediate::IntegerPayloadShift), X86::edx));
+ signExtend32ToPtr(X86::edx, X86::edx);
+#endif
+ emitPutVirtualRegister(srcDst, X86::edx);
+ emitPutVirtualRegister(result);
+}
+void JIT::compileFastArithSlow_op_post_dec(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ emitPutJITStubArg(X86::eax, 1);
+ emitCTICall(Interpreter::cti_op_post_dec);
+ emitPutVirtualRegister(srcDst, X86::edx);
+ emitPutVirtualRegister(result);
+}
+
+void JIT::compileFastArith_op_pre_inc(unsigned srcDst)
+{
+ emitGetVirtualRegister(srcDst, X86::eax);
+ emitJumpSlowCaseIfNotImmNum(X86::eax);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ // FIXME: Could add ptr & specify int64; no need to re-sign-extend?
+ addSlowCase(joAdd32(Imm32(1), X86::eax));
+ emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
+#else
+ addSlowCase(joAdd32(Imm32(1 << JSImmediate::IntegerPayloadShift), X86::eax));
+ signExtend32ToPtr(X86::eax, X86::eax);
+#endif
+ emitPutVirtualRegister(srcDst);
+}
+void JIT::compileFastArithSlow_op_pre_inc(unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
+{
+ Jump notImm = getSlowCase(iter);
+ linkSlowCase(iter);
+ emitGetVirtualRegister(srcDst, X86::eax);
+ notImm.link(this);
+ emitPutJITStubArg(X86::eax, 1);
+ emitCTICall(Interpreter::cti_op_pre_inc);
+ emitPutVirtualRegister(srcDst);
+}
+
+void JIT::compileFastArith_op_pre_dec(unsigned srcDst)
+{
+ emitGetVirtualRegister(srcDst, X86::eax);
+ emitJumpSlowCaseIfNotImmNum(X86::eax);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ addSlowCase(joSub32(Imm32(1), X86::eax));
+ emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
+#else
+ addSlowCase(joSub32(Imm32(1 << JSImmediate::IntegerPayloadShift), X86::eax));
+ signExtend32ToPtr(X86::eax, X86::eax);
+#endif
+ emitPutVirtualRegister(srcDst);
+}
+void JIT::compileFastArithSlow_op_pre_dec(unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
+{
+ Jump notImm = getSlowCase(iter);
+ linkSlowCase(iter);
+ emitGetVirtualRegister(srcDst, X86::eax);
+ notImm.link(this);
+ emitPutJITStubArg(X86::eax, 1);
+ emitCTICall(Interpreter::cti_op_pre_dec);
+ emitPutVirtualRegister(srcDst);
+}
+
+
+#if !ENABLE(JIT_OPTIMIZE_ARITHMETIC)
+
+void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes)
+{
+ emitPutJITStubArgFromVirtualRegister(src1, 1, X86::ecx);
+ emitPutJITStubArgFromVirtualRegister(src2, 2, X86::ecx);
+ if (opcodeID == op_add)
+ emitCTICall(Interpreter::cti_op_add);
+ else if (opcodeID == op_sub)
+ emitCTICall(Interpreter::cti_op_sub);
+ else {
+ ASSERT(opcodeID == op_mul);
+ emitCTICall(Interpreter::cti_op_mul);
+ }
+ emitPutVirtualRegister(dst);
+}
+
+void JIT::compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned, unsigned, unsigned, OperandTypes)
+{
+ ASSERT_NOT_REACHED();
+}
+
+#else
+
+typedef X86Assembler::JmpSrc JmpSrc;
+typedef X86Assembler::JmpDst JmpDst;
+typedef X86Assembler::XMMRegisterID XMMRegisterID;
+
+#if PLATFORM(MAC)
+
+static inline bool isSSE2Present()
+{
+ return true; // All X86 Macs are guaranteed to support at least SSE2
+}
+
+#else
+
+static bool isSSE2Present()
+{
+ static const int SSE2FeatureBit = 1 << 26;
+ struct SSE2Check {
+ SSE2Check()
+ {
+ int flags;
+#if COMPILER(MSVC)
+ _asm {
+ mov eax, 1 // cpuid function 1 gives us the standard feature set
+ cpuid;
+ mov flags, edx;
+ }
+#else
+ flags = 0;
+ // FIXME: Add GCC code to do above asm
+#endif
+ present = (flags & SSE2FeatureBit) != 0;
+ }
+ bool present;
+ };
+ static SSE2Check check;
+ return check.present;
+}
+
+#endif
+
+/*
+ This is required since number representation is canonical - values representable as a JSImmediate should not be stored in a JSNumberCell.
+
+ In the common case, the double value from 'xmmSource' is written to the reusable JSNumberCell pointed to by 'jsNumberCell', then 'jsNumberCell'
+ is written to the output SF Register 'dst', and then a jump is planted (stored into *wroteJSNumberCell).
+
+ However if the value from xmmSource is representable as a JSImmediate, then the JSImmediate value will be written to the output, and flow
+ control will fall through from the code planted.
+*/
+void JIT::putDoubleResultToJSNumberCellOrJSImmediate(X86::XMMRegisterID xmmSource, X86::RegisterID jsNumberCell, unsigned dst, JmpSrc* wroteJSNumberCell, X86::XMMRegisterID tempXmm, X86::RegisterID tempReg1, X86::RegisterID tempReg2)
+{
+ // convert (double -> JSImmediate -> double), and check if the value is unchanged - in which case the value is representable as a JSImmediate.
+ __ cvttsd2si_rr(xmmSource, tempReg1);
+ __ addl_rr(tempReg1, tempReg1);
+ __ sarl_i8r(1, tempReg1);
+ __ cvtsi2sd_rr(tempReg1, tempXmm);
+ // Compare & branch if immediate.
+ __ ucomis_rr(tempXmm, xmmSource);
+ JmpSrc resultIsImm = __ je();
+ JmpDst resultLookedLikeImmButActuallyIsnt = __ label();
+
+ // Store the result to the JSNumberCell and jump.
+ __ movsd_rm(xmmSource, FIELD_OFFSET(JSNumberCell, m_value), jsNumberCell);
+ if (jsNumberCell != X86::eax)
+ __ movl_rr(jsNumberCell, X86::eax);
+ emitPutVirtualRegister(dst);
+ *wroteJSNumberCell = __ jmp();
+
+ __ link(resultIsImm, __ label());
+ // value == (double)(JSImmediate)value... or at least, it looks that way...
+ // ucomi will report that (0 == -0), and will report true if either input in NaN (result is unordered).
+ __ link(__ jp(), resultLookedLikeImmButActuallyIsnt); // Actually was a NaN
+ __ pextrw_irr(3, xmmSource, tempReg2);
+ __ cmpl_ir(0x8000, tempReg2);
+ __ link(__ je(), resultLookedLikeImmButActuallyIsnt); // Actually was -0
+ // Yes it really really really is representable as a JSImmediate.
+ emitFastArithIntToImmNoCheck(tempReg1, X86::eax);
+ emitPutVirtualRegister(dst);
+}
+
+void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
+{
+ Structure* numberStructure = m_globalData->numberStructure.get();
+ JmpSrc wasJSNumberCell1;
+ JmpSrc wasJSNumberCell1b;
+ JmpSrc wasJSNumberCell2;
+ JmpSrc wasJSNumberCell2b;
+
+ emitGetVirtualRegisters(src1, X86::eax, src2, X86::edx);
+
+ if (types.second().isReusable() && isSSE2Present()) {
+ ASSERT(types.second().mightBeNumber());
+
+ // Check op2 is a number
+ __ testl_i32r(JSImmediate::TagTypeInteger, X86::edx);
+ JmpSrc op2imm = __ jne();
+ if (!types.second().definitelyIsNumber()) {
+ emitJumpSlowCaseIfNotJSCell(X86::edx, src2);
+ __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::edx);
+ addSlowCase(__ jne());
+ }
+
+ // (1) In this case src2 is a reusable number cell.
+ // Slow case if src1 is not a number type.
+ __ testl_i32r(JSImmediate::TagTypeInteger, X86::eax);
+ JmpSrc op1imm = __ jne();
+ if (!types.first().definitelyIsNumber()) {
+ emitJumpSlowCaseIfNotJSCell(X86::eax, src1);
+ __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
+ addSlowCase(__ jne());
+ }
+
+ // (1a) if we get here, src1 is also a number cell
+ __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
+ JmpSrc loadedDouble = __ jmp();
+ // (1b) if we get here, src1 is an immediate
+ __ link(op1imm, __ label());
+ emitFastArithImmToInt(X86::eax);
+ __ cvtsi2sd_rr(X86::eax, X86::xmm0);
+ // (1c)
+ __ link(loadedDouble, __ label());
+ if (opcodeID == op_add)
+ __ addsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
+ else if (opcodeID == op_sub)
+ __ subsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
+ else {
+ ASSERT(opcodeID == op_mul);
+ __ mulsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
+ }
+
+ putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::edx, dst, &wasJSNumberCell2, X86::xmm1, X86::ecx, X86::eax);
+ wasJSNumberCell2b = __ jmp();
+
+ // (2) This handles cases where src2 is an immediate number.
+ // Two slow cases - either src1 isn't an immediate, or the subtract overflows.
+ __ link(op2imm, __ label());
+ emitJumpSlowCaseIfNotImmNum(X86::eax);
+ } else if (types.first().isReusable() && isSSE2Present()) {
+ ASSERT(types.first().mightBeNumber());
+
+ // Check op1 is a number
+ __ testl_i32r(JSImmediate::TagTypeInteger, X86::eax);
+ JmpSrc op1imm = __ jne();
+ if (!types.first().definitelyIsNumber()) {
+ emitJumpSlowCaseIfNotJSCell(X86::eax, src1);
+ __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
+ addSlowCase(__ jne());
+ }
+
+ // (1) In this case src1 is a reusable number cell.
+ // Slow case if src2 is not a number type.
+ __ testl_i32r(JSImmediate::TagTypeInteger, X86::edx);
+ JmpSrc op2imm = __ jne();
+ if (!types.second().definitelyIsNumber()) {
+ emitJumpSlowCaseIfNotJSCell(X86::edx, src2);
+ __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::edx);
+ addSlowCase(__ jne());
+ }
+
+ // (1a) if we get here, src2 is also a number cell
+ __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm1);
+ JmpSrc loadedDouble = __ jmp();
+ // (1b) if we get here, src2 is an immediate
+ __ link(op2imm, __ label());
+ emitFastArithImmToInt(X86::edx);
+ __ cvtsi2sd_rr(X86::edx, X86::xmm1);
+ // (1c)
+ __ link(loadedDouble, __ label());
+ __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
+ if (opcodeID == op_add)
+ __ addsd_rr(X86::xmm1, X86::xmm0);
+ else if (opcodeID == op_sub)
+ __ subsd_rr(X86::xmm1, X86::xmm0);
+ else {
+ ASSERT(opcodeID == op_mul);
+ __ mulsd_rr(X86::xmm1, X86::xmm0);
+ }
+ __ movsd_rm(X86::xmm0, FIELD_OFFSET(JSNumberCell, m_value), X86::eax);
+ emitPutVirtualRegister(dst);
+
+ putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::eax, dst, &wasJSNumberCell1, X86::xmm1, X86::ecx, X86::edx);
+ wasJSNumberCell1b = __ jmp();
+
+ // (2) This handles cases where src1 is an immediate number.
+ // Two slow cases - either src2 isn't an immediate, or the subtract overflows.
+ __ link(op1imm, __ label());
+ emitJumpSlowCaseIfNotImmNum(X86::edx);
+ } else
+ emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, X86::ecx);
+
+ if (opcodeID == op_add) {
+ emitFastArithDeTagImmediate(X86::eax);
+ __ addl_rr(X86::edx, X86::eax);
+ addSlowCase(__ jo());
+ } else if (opcodeID == op_sub) {
+ __ subl_rr(X86::edx, X86::eax);
+ addSlowCase(__ jo());
+ signExtend32ToPtr(X86::eax, X86::eax);
+ emitFastArithReTagImmediate(X86::eax, X86::eax);
+ } else {
+ ASSERT(opcodeID == op_mul);
+ // convert eax & edx from JSImmediates to ints, and check if either are zero
+ emitFastArithImmToInt(X86::edx);
+ JmpSrc op1Zero = emitFastArithDeTagImmediateJumpIfZero(X86::eax);
+ __ testl_rr(X86::edx, X86::edx);
+ JmpSrc op2NonZero = __ jne();
+ __ link(op1Zero, __ label());
+ // if either input is zero, add the two together, and check if the result is < 0.
+ // If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate.
+ __ movl_rr(X86::eax, X86::ecx);
+ __ addl_rr(X86::edx, X86::ecx);
+ addSlowCase(__ js());
+ // Skip the above check if neither input is zero
+ __ link(op2NonZero, __ label());
+ __ imull_rr(X86::edx, X86::eax);
+ addSlowCase(__ jo());
+ signExtend32ToPtr(X86::eax, X86::eax);
+ emitFastArithReTagImmediate(X86::eax, X86::eax);
+ }
+ emitPutVirtualRegister(dst);
+
+ if (types.second().isReusable() && isSSE2Present()) {
+ __ link(wasJSNumberCell2, __ label());
+ __ link(wasJSNumberCell2b, __ label());
+ }
+ else if (types.first().isReusable() && isSSE2Present()) {
+ __ link(wasJSNumberCell1, __ label());
+ __ link(wasJSNumberCell1b, __ label());
+ }
+}
+
+void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
+{
+ linkSlowCase(iter);
+ if (types.second().isReusable() && isSSE2Present()) {
+ if (!types.first().definitelyIsNumber()) {
+ linkSlowCaseIfNotJSCell(iter, src1);
+ linkSlowCase(iter);
+ }
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCaseIfNotJSCell(iter, src2);
+ linkSlowCase(iter);
+ }
+ } else if (types.first().isReusable() && isSSE2Present()) {
+ if (!types.first().definitelyIsNumber()) {
+ linkSlowCaseIfNotJSCell(iter, src1);
+ linkSlowCase(iter);
+ }
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCaseIfNotJSCell(iter, src2);
+ linkSlowCase(iter);
+ }
+ }
+ linkSlowCase(iter);
+
+ // additional entry point to handle -0 cases.
+ if (opcodeID == op_mul)
+ linkSlowCase(iter);
+
+ emitPutJITStubArgFromVirtualRegister(src1, 1, X86::ecx);
+ emitPutJITStubArgFromVirtualRegister(src2, 2, X86::ecx);
+ if (opcodeID == op_add)
+ emitCTICall(Interpreter::cti_op_add);
+ else if (opcodeID == op_sub)
+ emitCTICall(Interpreter::cti_op_sub);
+ else {
+ ASSERT(opcodeID == op_mul);
+ emitCTICall(Interpreter::cti_op_mul);
+ }
+ emitPutVirtualRegister(dst);
+}
+
+#endif
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/src/3rdparty/webkit/JavaScriptCore/jit/JITCall.cpp b/src/3rdparty/webkit/JavaScriptCore/jit/JITCall.cpp
new file mode 100644
index 0000000..0e85d75
--- /dev/null
+++ b/src/3rdparty/webkit/JavaScriptCore/jit/JITCall.cpp
@@ -0,0 +1,353 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JIT.h"
+
+#if ENABLE(JIT)
+
+#include "CodeBlock.h"
+#include "JITInlineMethods.h"
+#include "JSArray.h"
+#include "JSFunction.h"
+#include "Interpreter.h"
+#include "ResultType.h"
+#include "SamplingTool.h"
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+using namespace std;
+
+namespace JSC {
+
+void JIT::unlinkCall(CallLinkInfo* callLinkInfo)
+{
+ // When the JSFunction is deleted the pointer embedded in the instruction stream will no longer be valid
+ // (and, if a new JSFunction happened to be constructed at the same location, we could get a false positive
+ // match). Reset the check so it no longer matches.
+ DataLabelPtr::patch(callLinkInfo->hotPathBegin, JSValuePtr::encode(JSImmediate::impossibleValue()));
+}
+
+void JIT::linkCall(JSFunction* callee, CodeBlock* calleeCodeBlock, void* ctiCode, CallLinkInfo* callLinkInfo, int callerArgCount)
+{
+ // Currently we only link calls with the exact number of arguments.
+ if (callerArgCount == calleeCodeBlock->m_numParameters) {
+ ASSERT(!callLinkInfo->isLinked());
+
+ calleeCodeBlock->addCaller(callLinkInfo);
+
+ DataLabelPtr::patch(callLinkInfo->hotPathBegin, callee);
+ Jump::patch(callLinkInfo->hotPathOther, ctiCode);
+ }
+
+ // patch the instruction that jumps out to the cold path, so that we only try to link once.
+ void* patchCheck = reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(callLinkInfo->hotPathBegin) + patchOffsetOpCallCompareToJump);
+ Jump::patch(patchCheck, callLinkInfo->coldPathOther);
+}
+
+void JIT::compileOpCallInitializeCallFrame()
+{
+ store32(X86::edx, Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))));
+
+ loadPtr(Address(X86::ecx, FIELD_OFFSET(JSFunction, m_scopeChain) + FIELD_OFFSET(ScopeChain, m_node)), X86::edx); // newScopeChain
+
+ storePtr(ImmPtr(JSValuePtr::encode(noValue())), Address(callFrameRegister, RegisterFile::OptionalCalleeArguments * static_cast<int>(sizeof(Register))));
+ storePtr(X86::ecx, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register))));
+ storePtr(X86::edx, Address(callFrameRegister, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register))));
+}
+
+void JIT::compileOpCallSetupArgs(Instruction* instruction)
+{
+ int argCount = instruction[3].u.operand;
+ int registerOffset = instruction[4].u.operand;
+
+ // ecx holds func
+ emitPutJITStubArg(X86::ecx, 1);
+ emitPutJITStubArgConstant(registerOffset, 2);
+ emitPutJITStubArgConstant(argCount, 3);
+}
+
+void JIT::compileOpCallEvalSetupArgs(Instruction* instruction)
+{
+ int argCount = instruction[3].u.operand;
+ int registerOffset = instruction[4].u.operand;
+
+ // ecx holds func
+ emitPutJITStubArg(X86::ecx, 1);
+ emitPutJITStubArgConstant(registerOffset, 2);
+ emitPutJITStubArgConstant(argCount, 3);
+}
+
+void JIT::compileOpConstructSetupArgs(Instruction* instruction)
+{
+ int argCount = instruction[3].u.operand;
+ int registerOffset = instruction[4].u.operand;
+ int proto = instruction[5].u.operand;
+ int thisRegister = instruction[6].u.operand;
+
+ // ecx holds func
+ emitPutJITStubArg(X86::ecx, 1);
+ emitPutJITStubArgConstant(registerOffset, 2);
+ emitPutJITStubArgConstant(argCount, 3);
+ emitPutJITStubArgFromVirtualRegister(proto, 4, X86::eax);
+ emitPutJITStubArgConstant(thisRegister, 5);
+}
+
+#if !ENABLE(JIT_OPTIMIZE_CALL)
+
+void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
+{
+ int dst = instruction[1].u.operand;
+ int callee = instruction[2].u.operand;
+ int argCount = instruction[3].u.operand;
+ int registerOffset = instruction[4].u.operand;
+
+ // Handle eval
+ Jump wasEval;
+ if (opcodeID == op_call_eval) {
+ emitGetVirtualRegister(callee, X86::ecx);
+ compileOpCallEvalSetupArgs(instruction);
+
+ emitCTICall(Interpreter::cti_op_call_eval);
+ wasEval = jnePtr(X86::eax, ImmPtr(JSImmediate::impossibleValue()));
+ }
+
+ emitGetVirtualRegister(callee, X86::ecx);
+ // The arguments have been set up on the hot path for op_call_eval
+ if (opcodeID == op_call)
+ compileOpCallSetupArgs(instruction);
+ else if (opcodeID == op_construct)
+ compileOpConstructSetupArgs(instruction);
+
+ // Check for JSFunctions.
+ emitJumpSlowCaseIfNotJSCell(X86::ecx);
+ addSlowCase(jnePtr(Address(X86::ecx), ImmPtr(m_interpreter->m_jsFunctionVptr)));
+
+ // First, in the case of a construct, allocate the new object.
+ if (opcodeID == op_construct) {
+ emitCTICall(Interpreter::cti_op_construct_JSConstruct);
+ emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
+ emitGetVirtualRegister(callee, X86::ecx);
+ }
+
+ // Speculatively roll the callframe, assuming argCount will match the arity.
+ storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
+ addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
+ move(Imm32(argCount), X86::edx);
+
+ emitNakedCall(m_interpreter->m_ctiVirtualCall);
+
+ if (opcodeID == op_call_eval)
+ wasEval.link(this);
+
+ // Put the return value in dst. In the interpreter, op_ret does this.
+ emitPutVirtualRegister(dst);
+
+#if ENABLE(CODEBLOCK_SAMPLING)
+ storePtr(ImmPtr(m_codeBlock), m_interpreter->sampler()->codeBlockSlot());
+#endif
+}
+
+void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned, OpcodeID opcodeID)
+{
+ int dst = instruction[1].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ // This handles host functions
+ emitCTICall(((opcodeID == op_construct) ? Interpreter::cti_op_construct_NotJSConstruct : Interpreter::cti_op_call_NotJSFunction));
+ // Put the return value in dst. In the interpreter, op_ret does this.
+ emitPutVirtualRegister(dst);
+
+#if ENABLE(CODEBLOCK_SAMPLING)
+ storePtr(ImmPtr(m_codeBlock), m_interpreter->sampler()->codeBlockSlot());
+#endif
+}
+
+#else
+
+static void unreachable()
+{
+ ASSERT_NOT_REACHED();
+ exit(1);
+}
+
+void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
+{
+ int dst = instruction[1].u.operand;
+ int callee = instruction[2].u.operand;
+ int argCount = instruction[3].u.operand;
+ int registerOffset = instruction[4].u.operand;
+
+ // Handle eval
+ Jump wasEval;
+ if (opcodeID == op_call_eval) {
+ emitGetVirtualRegister(callee, X86::ecx);
+ compileOpCallEvalSetupArgs(instruction);
+
+ emitCTICall(Interpreter::cti_op_call_eval);
+ wasEval = jnePtr(X86::eax, ImmPtr(JSValuePtr::encode(JSImmediate::impossibleValue())));
+ }
+
+ // This plants a check for a cached JSFunction value, so we can plant a fast link to the callee.
+ // This deliberately leaves the callee in ecx, used when setting up the stack frame below
+ emitGetVirtualRegister(callee, X86::ecx);
+ DataLabelPtr addressOfLinkedFunctionCheck;
+ Jump jumpToSlow = jnePtrWithPatch(X86::ecx, addressOfLinkedFunctionCheck, ImmPtr(JSValuePtr::encode(JSImmediate::impossibleValue())));
+ addSlowCase(jumpToSlow);
+ ASSERT(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow) == patchOffsetOpCallCompareToJump);
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
+
+ // The following is the fast case, only used whan a callee can be linked.
+
+ // In the case of OpConstruct, call out to a cti_ function to create the new object.
+ if (opcodeID == op_construct) {
+ int proto = instruction[5].u.operand;
+ int thisRegister = instruction[6].u.operand;
+
+ emitPutJITStubArg(X86::ecx, 1);
+ emitPutJITStubArgFromVirtualRegister(proto, 4, X86::eax);
+ emitCTICall(Interpreter::cti_op_construct_JSConstruct);
+ emitPutVirtualRegister(thisRegister);
+ emitGetVirtualRegister(callee, X86::ecx);
+ }
+
+ // Fast version of stack frame initialization, directly relative to edi.
+ // Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee
+ storePtr(ImmPtr(JSValuePtr::encode(noValue())), Address(callFrameRegister, (registerOffset + RegisterFile::OptionalCalleeArguments) * static_cast<int>(sizeof(Register))));
+ storePtr(X86::ecx, Address(callFrameRegister, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register))));
+ loadPtr(Address(X86::ecx, FIELD_OFFSET(JSFunction, m_scopeChain) + FIELD_OFFSET(ScopeChain, m_node)), X86::edx); // newScopeChain
+ store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register))));
+ storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register))));
+ storePtr(X86::edx, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register))));
+ addPtr(Imm32(registerOffset * sizeof(Register)), callFrameRegister);
+
+ // Call to the callee
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(reinterpret_cast<void*>(unreachable));
+
+ if (opcodeID == op_call_eval)
+ wasEval.link(this);
+
+ // Put the return value in dst. In the interpreter, op_ret does this.
+ emitPutVirtualRegister(dst);
+
+#if ENABLE(CODEBLOCK_SAMPLING)
+ storePtr(ImmPtr(m_codeBlock), m_interpreter->sampler()->codeBlockSlot());
+#endif
+}
+
+void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID)
+{
+ int dst = instruction[1].u.operand;
+ int callee = instruction[2].u.operand;
+ int argCount = instruction[3].u.operand;
+ int registerOffset = instruction[4].u.operand;
+
+ linkSlowCase(iter);
+
+ // The arguments have been set up on the hot path for op_call_eval
+ if (opcodeID == op_call)
+ compileOpCallSetupArgs(instruction);
+ else if (opcodeID == op_construct)
+ compileOpConstructSetupArgs(instruction);
+
+ // Fast check for JS function.
+ Jump callLinkFailNotObject = emitJumpIfNotJSCell(X86::ecx);
+ Jump callLinkFailNotJSFunction = jnePtr(Address(X86::ecx), ImmPtr(m_interpreter->m_jsFunctionVptr));
+
+ // First, in the case of a construct, allocate the new object.
+ if (opcodeID == op_construct) {
+ emitCTICall(Interpreter::cti_op_construct_JSConstruct);
+ emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
+ emitGetVirtualRegister(callee, X86::ecx);
+ }
+
+ move(Imm32(argCount), X86::edx);
+
+ // Speculatively roll the callframe, assuming argCount will match the arity.
+ storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
+ addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
+
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation =
+ emitNakedCall(m_interpreter->m_ctiVirtualCallPreLink);
+
+ Jump storeResultForFirstRun = jump();
+
+// FIXME: this label can be removed, since it is a fixed offset from 'callReturnLocation'.
+ // This is the address for the cold path *after* the first run (which tries to link the call).
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].coldPathOther = MacroAssembler::Label(this);
+
+ // The arguments have been set up on the hot path for op_call_eval
+ if (opcodeID == op_call)
+ compileOpCallSetupArgs(instruction);
+ else if (opcodeID == op_construct)
+ compileOpConstructSetupArgs(instruction);
+
+ // Check for JSFunctions.
+ Jump isNotObject = emitJumpIfNotJSCell(X86::ecx);
+ Jump isJSFunction = jePtr(Address(X86::ecx), ImmPtr(m_interpreter->m_jsFunctionVptr));
+
+ // This handles host functions
+ isNotObject.link(this);
+ callLinkFailNotObject.link(this);
+ callLinkFailNotJSFunction.link(this);
+ emitCTICall(((opcodeID == op_construct) ? Interpreter::cti_op_construct_NotJSConstruct : Interpreter::cti_op_call_NotJSFunction));
+ Jump wasNotJSFunction = jump();
+
+ // Next, handle JSFunctions...
+ isJSFunction.link(this);
+
+ // First, in the case of a construct, allocate the new object.
+ if (opcodeID == op_construct) {
+ emitCTICall(Interpreter::cti_op_construct_JSConstruct);
+ emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
+ emitGetVirtualRegister(callee, X86::ecx);
+ }
+
+ // Speculatively roll the callframe, assuming argCount will match the arity.
+ storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
+ addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
+ move(Imm32(argCount), X86::edx);
+
+ emitNakedCall(m_interpreter->m_ctiVirtualCall);
+
+ // Put the return value in dst. In the interpreter, op_ret does this.
+ wasNotJSFunction.link(this);
+ storeResultForFirstRun.link(this);
+ emitPutVirtualRegister(dst);
+
+#if ENABLE(CODEBLOCK_SAMPLING)
+ storePtr(ImmPtr(m_codeBlock), m_interpreter->sampler()->codeBlockSlot());
+#endif
+}
+
+#endif
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/src/3rdparty/webkit/JavaScriptCore/jit/JITInlineMethods.h b/src/3rdparty/webkit/JavaScriptCore/jit/JITInlineMethods.h
new file mode 100644
index 0000000..3804ba9
--- /dev/null
+++ b/src/3rdparty/webkit/JavaScriptCore/jit/JITInlineMethods.h
@@ -0,0 +1,406 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITInlineMethods_h
+#define JITInlineMethods_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(JIT)
+
+#if PLATFORM(WIN)
+#undef FIELD_OFFSET // Fix conflict with winnt.h.
+#endif
+
+// FIELD_OFFSET: Like the C++ offsetof macro, but you can use it with classes.
+// The magic number 0x4000 is insignificant. We use it to avoid using NULL, since
+// NULL can cause compiler problems, especially in cases of multiple inheritance.
+#define FIELD_OFFSET(class, field) (reinterpret_cast<ptrdiff_t>(&(reinterpret_cast<class*>(0x4000)->field)) - 0x4000)
+
+namespace JSC {
+
+ALWAYS_INLINE void JIT::killLastResultRegister()
+{
+ m_lastResultBytecodeRegister = std::numeric_limits<int>::max();
+}
+
+// get arg puts an arg from the SF register array into a h/w register
+ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
+{
+ ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+
+ // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
+ if (m_codeBlock->isConstantRegisterIndex(src)) {
+ JSValuePtr value = m_codeBlock->getConstant(src);
+ move(ImmPtr(JSValuePtr::encode(value)), dst);
+ killLastResultRegister();
+ return;
+ }
+
+ if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src)) {
+ bool atJumpTarget = false;
+ while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeIndex) {
+ if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeIndex)
+ atJumpTarget = true;
+ ++m_jumpTargetsPosition;
+ }
+
+ if (!atJumpTarget) {
+ // The argument we want is already stored in eax
+ if (dst != X86::eax)
+ move(X86::eax, dst);
+ killLastResultRegister();
+ return;
+ }
+ }
+
+ loadPtr(Address(callFrameRegister, src * sizeof(Register)), dst);
+ killLastResultRegister();
+}
+
+ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2)
+{
+ if (src2 == m_lastResultBytecodeRegister) {
+ emitGetVirtualRegister(src2, dst2);
+ emitGetVirtualRegister(src1, dst1);
+ } else {
+ emitGetVirtualRegister(src1, dst1);
+ emitGetVirtualRegister(src2, dst2);
+ }
+}
+
+// puts an arg onto the stack, as an arg to a context threaded function.
+ALWAYS_INLINE void JIT::emitPutJITStubArg(RegisterID src, unsigned argumentNumber)
+{
+ poke(src, argumentNumber);
+}
+
+ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(unsigned value, unsigned argumentNumber)
+{
+ poke(Imm32(value), argumentNumber);
+}
+
+ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(void* value, unsigned argumentNumber)
+{
+ poke(ImmPtr(value), argumentNumber);
+}
+
+ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
+{
+ peek(dst, argumentNumber);
+}
+
+ALWAYS_INLINE JSValuePtr JIT::getConstantOperand(unsigned src)
+{
+ ASSERT(m_codeBlock->isConstantRegisterIndex(src));
+ return m_codeBlock->getConstant(src);
+}
+
+ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(unsigned src)
+{
+ return static_cast<int32_t>(JSImmediate::intValue(getConstantOperand(src)));
+}
+
+ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
+{
+ return m_codeBlock->isConstantRegisterIndex(src) && JSImmediate::isNumber(getConstantOperand(src));
+}
+
+// get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
+ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch)
+{
+ if (m_codeBlock->isConstantRegisterIndex(src)) {
+ JSValuePtr value = m_codeBlock->getConstant(src);
+ emitPutJITStubArgConstant(JSValuePtr::encode(value), argumentNumber);
+ } else {
+ loadPtr(Address(callFrameRegister, src * sizeof(Register)), scratch);
+ emitPutJITStubArg(scratch, argumentNumber);
+ }
+
+ killLastResultRegister();
+}
+
+ALWAYS_INLINE void JIT::emitPutCTIParam(void* value, unsigned name)
+{
+ poke(ImmPtr(value), name);
+}
+
+ALWAYS_INLINE void JIT::emitPutCTIParam(RegisterID from, unsigned name)
+{
+ poke(from, name);
+}
+
+ALWAYS_INLINE void JIT::emitGetCTIParam(unsigned name, RegisterID to)
+{
+ peek(to, name);
+ killLastResultRegister();
+}
+
+ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
+{
+ storePtr(from, Address(callFrameRegister, entry * sizeof(Register)));
+}
+
+ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry)
+{
+ storePtr(ImmPtr(value), Address(callFrameRegister, entry * sizeof(Register)));
+}
+
+ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader(RegisterFile::CallFrameHeaderEntry entry, RegisterID to)
+{
+ loadPtr(Address(callFrameRegister, entry * sizeof(Register)), to);
+ killLastResultRegister();
+}
+
+ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
+{
+ storePtr(from, Address(callFrameRegister, dst * sizeof(Register)));
+ m_lastResultBytecodeRegister = (from == X86::eax) ? dst : std::numeric_limits<int>::max();
+ // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
+}
+
+ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
+{
+ storePtr(ImmPtr(JSValuePtr::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
+ // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitNakedCall(X86::RegisterID r)
+{
+ ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+
+ Jump nakedCall = call(r);
+ m_calls.append(CallRecord(nakedCall, m_bytecodeIndex));
+ return nakedCall;
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitNakedCall(void* function)
+{
+ ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+
+ Jump nakedCall = call();
+ m_calls.append(CallRecord(nakedCall, m_bytecodeIndex, function));
+ return nakedCall;
+}
+
+#if USE(JIT_STUB_ARGUMENT_REGISTER)
+ALWAYS_INLINE void JIT::restoreArgumentReference()
+{
+#if PLATFORM(X86_64)
+ move(X86::esp, X86::edi);
+#else
+ move(X86::esp, X86::ecx);
+#endif
+ emitPutCTIParam(callFrameRegister, STUB_ARGS_callFrame);
+}
+ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
+{
+ // In the trampoline on x86-64, the first argument register is not overwritten.
+#if !PLATFORM(X86_64)
+ move(X86::esp, X86::ecx);
+ addPtr(Imm32(sizeof(void*)), X86::ecx);
+#endif
+}
+#elif USE(JIT_STUB_ARGUMENT_STACK)
+ALWAYS_INLINE void JIT::restoreArgumentReference()
+{
+ storePtr(X86::esp, X86::esp);
+ emitPutCTIParam(callFrameRegister, STUB_ARGS_callFrame);
+}
+ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() {}
+#else // JIT_STUB_ARGUMENT_VA_LIST
+ALWAYS_INLINE void JIT::restoreArgumentReference()
+{
+ emitPutCTIParam(callFrameRegister, STUB_ARGS_callFrame);
+}
+ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() {}
+#endif
+
+ALWAYS_INLINE JIT::Jump JIT::emitCTICall_internal(void* helper)
+{
+ ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+
+#if ENABLE(OPCODE_SAMPLING)
+ store32(Imm32(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions().begin() + m_bytecodeIndex, true)), m_interpreter->sampler()->sampleSlot());
+#endif
+ restoreArgumentReference();
+ Jump ctiCall = call();
+ m_calls.append(CallRecord(ctiCall, m_bytecodeIndex, helper));
+#if ENABLE(OPCODE_SAMPLING)
+ store32(Imm32(m_interpreter->sampler()->encodeSample(m_codeBlock->instructions().begin() + m_bytecodeIndex, false)), m_interpreter->sampler()->sampleSlot());
+#endif
+ killLastResultRegister();
+
+ return ctiCall;
+}
+
+ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
+{
+ return jnePtr(Address(reg, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(structure));
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg)
+{
+#if USE(ALTERNATE_JSIMMEDIATE)
+ return jzPtr(reg, ImmPtr(reinterpret_cast<void*>(JSImmediate::TagMask)));
+#else
+ return jz32(reg, Imm32(JSImmediate::TagMask));
+#endif
+}
+
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg)
+{
+ addSlowCase(emitJumpIfJSCell(reg));
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg)
+{
+#if USE(ALTERNATE_JSIMMEDIATE)
+ return jnzPtr(reg, ImmPtr(reinterpret_cast<void*>(JSImmediate::TagMask)));
+#else
+ return jnz32(reg, Imm32(JSImmediate::TagMask));
+#endif
+}
+
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg)
+{
+ addSlowCase(emitJumpIfNotJSCell(reg));
+}
+
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg)
+{
+ if (!m_codeBlock->isKnownNotImmediate(vReg))
+ emitJumpSlowCaseIfNotJSCell(reg);
+}
+
+ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
+{
+ if (!m_codeBlock->isKnownNotImmediate(vReg))
+ linkSlowCase(iter);
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmNum(RegisterID reg)
+{
+#if USE(ALTERNATE_JSIMMEDIATE)
+ return jaePtr(reg, ImmPtr(reinterpret_cast<void*>(JSImmediate::TagTypeInteger)));
+#else
+ return jnz32(reg, Imm32(JSImmediate::TagTypeInteger));
+#endif
+}
+
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmNum(RegisterID reg)
+{
+#if USE(ALTERNATE_JSIMMEDIATE)
+ addSlowCase(jbPtr(reg, ImmPtr(reinterpret_cast<void*>(JSImmediate::TagTypeInteger))));
+#else
+ addSlowCase(jz32(reg, Imm32(JSImmediate::TagTypeInteger)));
+#endif
+}
+
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmNums(RegisterID reg1, RegisterID reg2, RegisterID scratch)
+{
+ move(reg1, scratch);
+ andPtr(reg2, scratch);
+ emitJumpSlowCaseIfNotImmNum(scratch);
+}
+
+#if !USE(ALTERNATE_JSIMMEDIATE)
+ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg)
+{
+ subPtr(Imm32(JSImmediate::TagTypeInteger), reg);
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg)
+{
+ return jzSubPtr(Imm32(JSImmediate::TagTypeInteger), reg);
+}
+#endif
+
+ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest)
+{
+#if USE(ALTERNATE_JSIMMEDIATE)
+ emitFastArithIntToImmNoCheck(src, dest);
+#else
+ if (src != dest)
+ move(src, dest);
+ addPtr(Imm32(JSImmediate::TagTypeInteger), dest);
+#endif
+}
+
+ALWAYS_INLINE void JIT::emitFastArithImmToInt(RegisterID reg)
+{
+#if USE(ALTERNATE_JSIMMEDIATE)
+ UNUSED_PARAM(reg);
+#else
+ rshiftPtr(Imm32(JSImmediate::IntegerPayloadShift), reg);
+#endif
+}
+
+// operand is int32_t, must have been zero-extended if register is 64-bit.
+ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
+{
+#if USE(ALTERNATE_JSIMMEDIATE)
+ if (src != dest)
+ move(src, dest);
+ orPtr(ImmPtr(reinterpret_cast<void*>(JSImmediate::TagTypeInteger)), dest);
+#else
+ signExtend32ToPtr(src, dest);
+ addPtr(dest, dest);
+ emitFastArithReTagImmediate(dest, dest);
+#endif
+}
+
+ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
+{
+ lshift32(Imm32(JSImmediate::ExtendedPayloadShift), reg);
+ or32(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), reg);
+}
+
+ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
+{
+ ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+
+ m_slowCases.append(SlowCaseEntry(jump, m_bytecodeIndex));
+}
+
+ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
+{
+ ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+
+ m_jmpTable.append(JumpTable(jump, m_bytecodeIndex + relativeOffset));
+}
+
+ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
+{
+ ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+
+ jump.linkTo(m_labels[m_bytecodeIndex + relativeOffset], this);
+}
+
+}
+
+#endif // ENABLE(JIT)
+
+#endif
diff --git a/src/3rdparty/webkit/JavaScriptCore/jit/JITPropertyAccess.cpp b/src/3rdparty/webkit/JavaScriptCore/jit/JITPropertyAccess.cpp
new file mode 100644
index 0000000..6740bec
--- /dev/null
+++ b/src/3rdparty/webkit/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -0,0 +1,704 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JIT.h"
+
+#if ENABLE(JIT)
+
+#include "CodeBlock.h"
+#include "JITInlineMethods.h"
+#include "JSArray.h"
+#include "JSFunction.h"
+#include "Interpreter.h"
+#include "ResultType.h"
+#include "SamplingTool.h"
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+using namespace std;
+
+namespace JSC {
+
+#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, unsigned)
+{
+ // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
+ // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
+ // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
+ // to jump back to if one of these trampolies finds a match.
+
+ emitGetVirtualRegister(baseVReg, X86::eax);
+
+ emitPutJITStubArg(X86::eax, 1);
+ emitPutJITStubArgConstant(ident, 2);
+ emitCTICall(Interpreter::cti_op_get_by_id_generic);
+ emitPutVirtualRegister(resultVReg);
+}
+
+
+void JIT::compileGetByIdSlowCase(int, int, Identifier*, Vector<SlowCaseEntry>::iterator&, unsigned)
+{
+ ASSERT_NOT_REACHED();
+}
+
+void JIT::compilePutByIdHotPath(int baseVReg, Identifier* ident, int valueVReg, unsigned)
+{
+ // In order to be able to patch both the Structure, and the object offset, we store one pointer,
+ // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
+ // such that the Structure & offset are always at the same distance from this.
+
+ emitGetVirtualRegisters(baseVReg, X86::eax, valueVReg, X86::edx);
+
+ emitPutJITStubArgConstant(ident, 2);
+ emitPutJITStubArg(X86::eax, 1);
+ emitPutJITStubArg(X86::edx, 3);
+ emitCTICall(Interpreter::cti_op_put_by_id_generic);
+}
+
+void JIT::compilePutByIdSlowCase(int, Identifier*, int, Vector<SlowCaseEntry>::iterator&, unsigned)
+{
+ ASSERT_NOT_REACHED();
+}
+
+#else
+
+void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier*, unsigned propertyAccessInstructionIndex)
+{
+ // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
+ // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
+ // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
+ // to jump back to if one of these trampolies finds a match.
+
+ emitGetVirtualRegister(baseVReg, X86::eax);
+
+ emitJumpSlowCaseIfNotJSCell(X86::eax, baseVReg);
+
+ Label hotPathBegin(this);
+ m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
+
+ DataLabelPtr structureToCompare;
+ Jump structureCheck = jnePtrWithPatch(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ addSlowCase(structureCheck);
+ ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure);
+ ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase);
+
+ loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax);
+ DataLabel32 displacementLabel = loadPtrWithAddressOffsetPatch(Address(X86::eax, patchGetByIdDefaultOffset), X86::eax);
+ ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetGetByIdPropertyMapOffset);
+
+ Label putResult(this);
+ ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult);
+ emitPutVirtualRegister(resultVReg);
+}
+
+
+void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex)
+{
+ // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
+ // so that we only need track one pointer into the slow case code - we track a pointer to the location
+ // of the call (which we can use to look up the patch information), but should a array-length or
+ // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
+ // the distance from the call to the head of the slow case.
+
+ linkSlowCaseIfNotJSCell(iter, baseVReg);
+ linkSlowCase(iter);
+
+#ifndef NDEBUG
+ Label coldPathBegin(this);
+#endif
+ emitPutJITStubArg(X86::eax, 1);
+ emitPutJITStubArgConstant(ident, 2);
+ Jump call = emitCTICall(Interpreter::cti_op_get_by_id);
+ emitPutVirtualRegister(resultVReg);
+
+ ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
+
+ // Track the location of the call; this will be used to recover patch information.
+ m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
+}
+
+void JIT::compilePutByIdHotPath(int baseVReg, Identifier*, int valueVReg, unsigned propertyAccessInstructionIndex)
+{
+ // In order to be able to patch both the Structure, and the object offset, we store one pointer,
+ // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
+ // such that the Structure & offset are always at the same distance from this.
+
+ emitGetVirtualRegisters(baseVReg, X86::eax, valueVReg, X86::edx);
+
+ // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
+ emitJumpSlowCaseIfNotJSCell(X86::eax, baseVReg);
+
+ Label hotPathBegin(this);
+ m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
+
+ // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
+ DataLabelPtr structureToCompare;
+ addSlowCase(jnePtrWithPatch(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
+ ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure);
+
+ // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
+ loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax);
+ DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(X86::edx, Address(X86::eax, patchGetByIdDefaultOffset));
+ ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetPutByIdPropertyMapOffset);
+}
+
+void JIT::compilePutByIdSlowCase(int baseVReg, Identifier* ident, int, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex)
+{
+ linkSlowCaseIfNotJSCell(iter, baseVReg);
+ linkSlowCase(iter);
+
+ emitPutJITStubArgConstant(ident, 2);
+ emitPutJITStubArg(X86::eax, 1);
+ emitPutJITStubArg(X86::edx, 3);
+ Jump call = emitCTICall(Interpreter::cti_op_put_by_id);
+
+ // Track the location of the call; this will be used to recover patch information.
+ m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
+}
+
+static JSObject* resizePropertyStorage(JSObject* baseObject, int32_t oldSize, int32_t newSize)
+{
+ baseObject->allocatePropertyStorage(oldSize, newSize);
+ return baseObject;
+}
+
+static inline bool transitionWillNeedStorageRealloc(Structure* oldStructure, Structure* newStructure)
+{
+ return oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
+}
+
+void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, void* returnAddress)
+{
+ JumpList failureCases;
+ // Check eax is an object of the right Structure.
+ failureCases.append(emitJumpIfNotJSCell(X86::eax));
+ failureCases.append(jnePtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(oldStructure)));
+ JumpList successCases;
+
+ // ecx = baseObject
+ loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::ecx);
+ // proto(ecx) = baseObject->structure()->prototype()
+ failureCases.append(jne32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType)));
+
+ loadPtr(Address(X86::ecx, FIELD_OFFSET(Structure, m_prototype)), X86::ecx);
+
+ // ecx = baseObject->m_structure
+ for (RefPtr<Structure>* it = chain->head(); *it; ++it) {
+ // null check the prototype
+ successCases.append(jePtr(X86::ecx, ImmPtr(JSValuePtr::encode(jsNull()))));
+
+ // Check the structure id
+ failureCases.append(jnePtr(Address(X86::ecx, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(it->get())));
+
+ loadPtr(Address(X86::ecx, FIELD_OFFSET(JSCell, m_structure)), X86::ecx);
+ failureCases.append(jne32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType)));
+ loadPtr(Address(X86::ecx, FIELD_OFFSET(Structure, m_prototype)), X86::ecx);
+ }
+
+ successCases.link(this);
+
+ Jump callTarget;
+
+ // emit a call only if storage realloc is needed
+ if (transitionWillNeedStorageRealloc(oldStructure, newStructure)) {
+ pop(X86::ebx);
+#if PLATFORM(X86_64)
+ move(Imm32(newStructure->propertyStorageCapacity()), X86::edx);
+ move(Imm32(oldStructure->propertyStorageCapacity()), X86::esi);
+ move(X86::eax, X86::edi);
+ callTarget = call();
+#else
+ push(Imm32(newStructure->propertyStorageCapacity()));
+ push(Imm32(oldStructure->propertyStorageCapacity()));
+ push(X86::eax);
+ callTarget = call();
+ addPtr(Imm32(3 * sizeof(void*)), X86::esp);
+#endif
+ emitGetJITStubArg(3, X86::edx);
+ push(X86::ebx);
+ }
+
+ // Assumes m_refCount can be decremented easily, refcount decrement is safe as
+ // codeblock should ensure oldStructure->m_refCount > 0
+ sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
+ add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
+ storePtr(ImmPtr(newStructure), Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)));
+
+ // write the value
+ loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax);
+ storePtr(X86::edx, Address(X86::eax, cachedOffset * sizeof(JSValuePtr)));
+
+ ret();
+
+ Jump failureJump;
+ bool plantedFailureJump = false;
+ if (!failureCases.empty()) {
+ failureCases.link(this);
+ restoreArgumentReferenceForTrampoline();
+ failureJump = jump();
+ plantedFailureJump = true;
+ }
+
+ void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
+ PatchBuffer patchBuffer(code);
+
+ if (plantedFailureJump)
+ patchBuffer.link(failureJump, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail));
+
+ if (transitionWillNeedStorageRealloc(oldStructure, newStructure))
+ patchBuffer.link(callTarget, reinterpret_cast<void*>(resizePropertyStorage));
+
+ stubInfo->stubRoutine = code;
+
+ Jump::patch(returnAddress, code);
+}
+
+void JIT::patchGetByIdSelf(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress)
+{
+ // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
+ // Should probably go to Interpreter::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
+ Jump::patch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail));
+
+ // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
+ void* structureAddress = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdStructure);
+ void* displacementAddress = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPropertyMapOffset);
+ DataLabelPtr::patch(structureAddress, structure);
+ DataLabel32::patch(displacementAddress, cachedOffset * sizeof(JSValuePtr));
+}
+
+void JIT::patchPutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress)
+{
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ // Should probably go to Interpreter::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
+ Jump::patch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_generic));
+
+ // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
+ void* structureAddress = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetPutByIdStructure;
+ void* displacementAddress = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetPutByIdPropertyMapOffset;
+ DataLabelPtr::patch(structureAddress, structure);
+ DataLabel32::patch(displacementAddress, cachedOffset * sizeof(JSValuePtr));
+}
+
+void JIT::privateCompilePatchGetArrayLength(void* returnAddress)
+{
+ StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ Jump::patch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail));
+
+ // Check eax is an array
+ Jump failureCases1 = jnePtr(Address(X86::eax), ImmPtr(m_interpreter->m_jsArrayVptr));
+
+ // Checks out okay! - get the length from the storage
+ loadPtr(Address(X86::eax, FIELD_OFFSET(JSArray, m_storage)), X86::ecx);
+ load32(Address(X86::ecx, FIELD_OFFSET(ArrayStorage, m_length)), X86::ecx);
+
+ Jump failureCases2 = ja32(X86::ecx, Imm32(JSImmediate::maxImmediateInt));
+
+ emitFastArithIntToImmNoCheck(X86::ecx, X86::eax);
+ Jump success = jump();
+
+ void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
+ PatchBuffer patchBuffer(code);
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - patchOffsetGetByIdSlowCaseCall;
+ patchBuffer.link(failureCases1, slowCaseBegin);
+ patchBuffer.link(failureCases2, slowCaseBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ void* hotPathPutResult = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult;
+ patchBuffer.link(success, hotPathPutResult);
+
+ // Track the stub we have created so that it will be deleted later.
+ stubInfo->stubRoutine = code;
+
+ // Finally patch the jump to sow case back in the hot path to jump here instead.
+ void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase;
+ Jump::patch(jumpLocation, code);
+}
+
+void JIT::privateCompileGetByIdSelf(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress)
+{
+ // Check eax is an object of the right Structure.
+ Jump failureCases1 = emitJumpIfNotJSCell(X86::eax);
+ Jump failureCases2 = checkStructure(X86::eax, structure);
+
+ // Checks out okay! - getDirectOffset
+ loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax);
+ loadPtr(Address(X86::eax, cachedOffset * sizeof(JSValuePtr)), X86::eax);
+ ret();
+
+ void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
+ PatchBuffer patchBuffer(code);
+
+ patchBuffer.link(failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail));
+ patchBuffer.link(failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail));
+
+ stubInfo->stubRoutine = code;
+
+ Jump::patch(returnAddress, code);
+}
+
+void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, void* returnAddress, CallFrame* callFrame)
+{
+#if USE(CTI_REPATCH_PIC)
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ Jump::patch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list));
+
+ // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
+ // referencing the prototype object - let's speculatively load it's table nice and early!)
+ JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
+ PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
+ loadPtr(static_cast<void*>(protoPropertyStorage), X86::edx);
+
+ // Check eax is an object of the right Structure.
+ Jump failureCases1 = checkStructure(X86::eax, structure);
+
+ // Check the prototype object's Structure had not changed.
+ Structure** prototypeStructureAddress = &(protoObject->m_structure);
+#if PLATFORM(X86_64)
+ move(ImmPtr(prototypeStructure), X86::ebx);
+ Jump failureCases2 = jnePtr(X86::ebx, AbsoluteAddress(prototypeStructureAddress));
+#else
+ Jump failureCases2 = jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
+#endif
+
+ // Checks out okay! - getDirectOffset
+ loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax);
+
+ Jump success = jump();
+
+ void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
+ PatchBuffer patchBuffer(code);
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - patchOffsetGetByIdSlowCaseCall;
+ patchBuffer.link(failureCases1, slowCaseBegin);
+ patchBuffer.link(failureCases2, slowCaseBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult;
+ patchBuffer.link(success, reinterpret_cast<void*>(successDest));
+
+ // Track the stub we have created so that it will be deleted later.
+ stubInfo->stubRoutine = code;
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase;
+ Jump::patch(jumpLocation, code);
+#else
+ // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
+ // referencing the prototype object - let's speculatively load it's table nice and early!)
+ JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
+ PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
+ loadPtr(protoPropertyStorage, X86::edx);
+
+ // Check eax is an object of the right Structure.
+ Jump failureCases1 = emitJumpIfNotJSCell(X86::eax);
+ Jump failureCases2 = checkStructure(X86::eax, structure);
+
+ // Check the prototype object's Structure had not changed.
+ Structure** prototypeStructureAddress = &(protoObject->m_structure);
+ Jump failureCases3 = jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
+
+ // Checks out okay! - getDirectOffset
+ loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax);
+
+ ret();
+
+ void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
+ PatchBuffer patchBuffer(code);
+
+ patchBuffer.link(failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
+ patchBuffer.link(failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
+ patchBuffer.link(failureCases3, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
+
+ stubInfo->stubRoutine = code;
+
+ Jump::patch(returnAddress, code);
+#endif
+}
+
+#if USE(CTI_REPATCH_PIC)
+void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
+{
+ Jump failureCase = checkStructure(X86::eax, structure);
+ loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax);
+ loadPtr(Address(X86::eax, cachedOffset * sizeof(JSValuePtr)), X86::eax);
+ Jump success = jump();
+
+ void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
+ ASSERT(code);
+ PatchBuffer patchBuffer(code);
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ void* lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
+ if (!lastProtoBegin)
+ lastProtoBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - patchOffsetGetByIdSlowCaseCall;
+
+ patchBuffer.link(failureCase, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult;
+ patchBuffer.link(success, reinterpret_cast<void*>(successDest));
+
+ structure->ref();
+ polymorphicStructures->list[currentIndex].set(code, structure);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase;
+ Jump::patch(jumpLocation, code);
+}
+
+void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame)
+{
+ // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
+ // referencing the prototype object - let's speculatively load it's table nice and early!)
+ JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
+ PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
+ loadPtr(protoPropertyStorage, X86::edx);
+
+ // Check eax is an object of the right Structure.
+ Jump failureCases1 = checkStructure(X86::eax, structure);
+
+ // Check the prototype object's Structure had not changed.
+ Structure** prototypeStructureAddress = &(protoObject->m_structure);
+#if PLATFORM(X86_64)
+ move(ImmPtr(prototypeStructure), X86::ebx);
+ Jump failureCases2 = jnePtr(X86::ebx, AbsoluteAddress(prototypeStructureAddress));
+#else
+ Jump failureCases2 = jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
+#endif
+
+ // Checks out okay! - getDirectOffset
+ loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax);
+
+ Jump success = jump();
+
+ void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
+ PatchBuffer patchBuffer(code);
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ void* lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
+ patchBuffer.link(failureCases1, lastProtoBegin);
+ patchBuffer.link(failureCases2, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult;
+ patchBuffer.link(success, reinterpret_cast<void*>(successDest));
+
+ structure->ref();
+ prototypeStructure->ref();
+ prototypeStructures->list[currentIndex].set(code, structure, prototypeStructure);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase;
+ Jump::patch(jumpLocation, code);
+}
+
+void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
+{
+ ASSERT(count);
+
+ JumpList bucketsOfFail;
+
+ // Check eax is an object of the right Structure.
+ Jump baseObjectCheck = checkStructure(X86::eax, structure);
+ bucketsOfFail.append(baseObjectCheck);
+
+ Structure* currStructure = structure;
+ RefPtr<Structure>* chainEntries = chain->head();
+ JSObject* protoObject = 0;
+ for (unsigned i = 0; i < count; ++i) {
+ protoObject = asObject(currStructure->prototypeForLookup(callFrame));
+ currStructure = chainEntries[i].get();
+
+ // Check the prototype object's Structure had not changed.
+ Structure** prototypeStructureAddress = &(protoObject->m_structure);
+#if PLATFORM(X86_64)
+ move(ImmPtr(currStructure), X86::ebx);
+ bucketsOfFail.append(jnePtr(X86::ebx, AbsoluteAddress(prototypeStructureAddress)));
+#else
+ bucketsOfFail.append(jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
+#endif
+ }
+ ASSERT(protoObject);
+
+ PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
+ loadPtr(protoPropertyStorage, X86::edx);
+ loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax);
+ Jump success = jump();
+
+ void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
+ PatchBuffer patchBuffer(code);
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ void* lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
+
+ patchBuffer.link(bucketsOfFail, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult;
+ patchBuffer.link(success, reinterpret_cast<void*>(successDest));
+
+ // Track the stub we have created so that it will be deleted later.
+ structure->ref();
+ chain->ref();
+ prototypeStructures->list[currentIndex].set(code, structure, chain);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase;
+ Jump::patch(jumpLocation, code);
+}
+#endif
+
+void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, void* returnAddress, CallFrame* callFrame)
+{
+#if USE(CTI_REPATCH_PIC)
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ Jump::patch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list));
+
+ ASSERT(count);
+
+ JumpList bucketsOfFail;
+
+ // Check eax is an object of the right Structure.
+ bucketsOfFail.append(checkStructure(X86::eax, structure));
+
+ Structure* currStructure = structure;
+ RefPtr<Structure>* chainEntries = chain->head();
+ JSObject* protoObject = 0;
+ for (unsigned i = 0; i < count; ++i) {
+ protoObject = asObject(currStructure->prototypeForLookup(callFrame));
+ currStructure = chainEntries[i].get();
+
+ // Check the prototype object's Structure had not changed.
+ Structure** prototypeStructureAddress = &(protoObject->m_structure);
+#if PLATFORM(X86_64)
+ move(ImmPtr(currStructure), X86::ebx);
+ bucketsOfFail.append(jnePtr(X86::ebx, AbsoluteAddress(prototypeStructureAddress)));
+#else
+ bucketsOfFail.append(jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
+#endif
+ }
+ ASSERT(protoObject);
+
+ PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
+ loadPtr(protoPropertyStorage, X86::edx);
+ loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax);
+ Jump success = jump();
+
+ void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
+ PatchBuffer patchBuffer(code);
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - patchOffsetGetByIdSlowCaseCall;
+
+ patchBuffer.link(bucketsOfFail, slowCaseBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult;
+ patchBuffer.link(success, reinterpret_cast<void*>(successDest));
+
+ // Track the stub we have created so that it will be deleted later.
+ stubInfo->stubRoutine = code;
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase;
+ Jump::patch(jumpLocation, code);
+#else
+ ASSERT(count);
+
+ JumpList bucketsOfFail;
+
+ // Check eax is an object of the right Structure.
+ bucketsOfFail.append(emitJumpIfNotJSCell(X86::eax));
+ bucketsOfFail.append(checkStructure(X86::eax, structure));
+
+ Structure* currStructure = structure;
+ RefPtr<Structure>* chainEntries = chain->head();
+ JSObject* protoObject = 0;
+ for (unsigned i = 0; i < count; ++i) {
+ protoObject = asObject(currStructure->prototypeForLookup(callFrame));
+ currStructure = chainEntries[i].get();
+
+ // Check the prototype object's Structure had not changed.
+ Structure** prototypeStructureAddress = &(protoObject->m_structure);
+#if PLATFORM(X86_64)
+ move(ImmPtr(currStructure), X86::ebx);
+ bucketsOfFail.append(jnePtr(X86::ebx, AbsoluteAddress(prototypeStructureAddress)));
+#else
+ bucketsOfFail.append(jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
+#endif
+ }
+ ASSERT(protoObject);
+
+ PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
+ loadPtr(protoPropertyStorage, X86::edx);
+ loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax);
+ ret();
+
+ void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
+
+ patchBuffer.link(bucketsOfFail, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
+
+ stubInfo->stubRoutine = code;
+
+ Jump::patch(returnAddress, code);
+#endif
+}
+
+void JIT::privateCompilePutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress)
+{
+ // Check eax is an object of the right Structure.
+ Jump failureCases1 = emitJumpIfNotJSCell(X86::eax);
+ Jump failureCases2 = checkStructure(X86::eax, structure);
+
+ // checks out okay! - putDirectOffset
+ loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax);
+ storePtr(X86::edx, Address(X86::eax, cachedOffset * sizeof(JSValuePtr)));
+ ret();
+
+ void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
+ PatchBuffer patchBuffer(code);
+
+ patchBuffer.link(failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail));
+ patchBuffer.link(failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail));
+
+ stubInfo->stubRoutine = code;
+
+ Jump::patch(returnAddress, code);
+}
+
+#endif
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)