summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/javascriptcore/JavaScriptCore/jit
diff options
context:
space:
mode:
authorKent Hansen <khansen@trolltech.com>2009-09-24 14:35:49 (GMT)
committerKent Hansen <khansen@trolltech.com>2009-09-24 15:36:17 (GMT)
commitaabd12223bda6260756ab19430082477d5669c0a (patch)
tree6ca7a12e627915992cfd0038f6ddd8ee244711f4 /src/3rdparty/javascriptcore/JavaScriptCore/jit
parent270c374c178ec5a532d37168b018cd7ebc844558 (diff)
downloadQt-aabd12223bda6260756ab19430082477d5669c0a.zip
Qt-aabd12223bda6260756ab19430082477d5669c0a.tar.gz
Qt-aabd12223bda6260756ab19430082477d5669c0a.tar.bz2
Update src/3rdparty/javascriptcore and adapt src/script to the changes.
Reviewed-by: Simon Hausmann
Diffstat (limited to 'src/3rdparty/javascriptcore/JavaScriptCore/jit')
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocator.h44
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp5
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorWin.cpp5
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.cpp499
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.h795
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JITArithmetic.cpp1307
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JITCall.cpp497
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JITCode.h6
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JITInlineMethods.h752
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JITOpcodes.cpp1934
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JITPropertyAccess.cpp994
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubCall.h136
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.cpp880
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.h217
14 files changed, 6433 insertions, 1638 deletions
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocator.h b/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocator.h
index 4ed47e3..12e2a32 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocator.h
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocator.h
@@ -38,6 +38,10 @@
#include <sys/mman.h>
#endif
+#if PLATFORM(SYMBIAN)
+#include <e32std.h>
+#endif
+
#define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize)
#define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4)
@@ -176,30 +180,40 @@ public:
static void cacheFlush(void*, size_t)
{
}
-#elif PLATFORM_ARM_ARCH(7) && PLATFORM(IPHONE)
+#elif PLATFORM(ARM_THUMB2) && PLATFORM(IPHONE)
static void cacheFlush(void* code, size_t size)
{
sys_dcache_flush(code, size);
sys_icache_invalidate(code, size);
}
-#elif PLATFORM(ARM)
+#elif PLATFORM(SYMBIAN)
+ static void cacheFlush(void* code, size_t size)
+ {
+ User::IMB_Range(code, static_cast<char*>(code) + size);
+ }
+#elif PLATFORM(ARM) && COMPILER(GCC) && (GCC_VERSION >= 30406) && !defined(DISABLE_BUILTIN_CLEAR_CACHE)
static void cacheFlush(void* code, size_t size)
{
- #if COMPILER(GCC) && (GCC_VERSION >= 30406)
__clear_cache(reinterpret_cast<char*>(code), reinterpret_cast<char*>(code) + size);
- #else
- const int syscall = 0xf0002;
- __asm __volatile (
- "mov r0, %0\n"
- "mov r1, %1\n"
- "mov r7, %2\n"
- "mov r2, #0x0\n"
- "swi 0x00000000\n"
- :
- : "r" (code), "r" (reinterpret_cast<char*>(code) + size), "r" (syscall)
- : "r0", "r1", "r7");
- #endif // COMPILER(GCC) && (GCC_VERSION >= 30406)
}
+#elif PLATFORM(ARM_TRADITIONAL) && PLATFORM(LINUX)
+ static void cacheFlush(void* code, size_t size)
+ {
+ asm volatile (
+ "push {r7}\n"
+ "mov r0, %0\n"
+ "mov r1, %1\n"
+ "mov r7, #0xf0000\n"
+ "add r7, r7, #0x2\n"
+ "mov r2, #0x0\n"
+ "svc 0x0\n"
+ "pop {r7}\n"
+ :
+ : "r" (code), "r" (reinterpret_cast<char*>(code) + size)
+ : "r0", "r1");
+ }
+#else
+ #error "The cacheFlush support is missing on this platform."
#endif
private:
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp
index 4bd5a2c..13a8626 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp
@@ -44,7 +44,10 @@ void ExecutableAllocator::intializePageSize()
ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t n)
{
- ExecutablePool::Allocation alloc = { reinterpret_cast<char*>(mmap(NULL, n, INITIAL_PROTECTION_FLAGS, MAP_PRIVATE | MAP_ANON, VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY, 0)), n };
+ void* allocation = mmap(NULL, n, INITIAL_PROTECTION_FLAGS, MAP_PRIVATE | MAP_ANON, VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY, 0);
+ if (allocation == MAP_FAILED)
+ CRASH();
+ ExecutablePool::Allocation alloc = { reinterpret_cast<char*>(allocation), n };
return alloc;
}
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorWin.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorWin.cpp
index a9ba7d0..e6ac855 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorWin.cpp
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorWin.cpp
@@ -42,7 +42,10 @@ void ExecutableAllocator::intializePageSize()
ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t n)
{
- ExecutablePool::Allocation alloc = {reinterpret_cast<char*>(VirtualAlloc(0, n, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE)), n};
+ void* allocation = VirtualAlloc(0, n, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE);
+ if (!allocation)
+ CRASH();
+ ExecutablePool::Allocation alloc = {reinterpret_cast<char*>(allocation), n};
return alloc;
}
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.cpp
index a0e462b..bf3a418 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.cpp
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.cpp
@@ -37,6 +37,7 @@ JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse
#include "CodeBlock.h"
#include "Interpreter.h"
#include "JITInlineMethods.h"
+#include "JITStubs.h"
#include "JITStubCall.h"
#include "JSArray.h"
#include "JSFunction.h"
@@ -79,51 +80,67 @@ JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
, m_propertyAccessCompilationInfo(codeBlock ? codeBlock->numberOfStructureStubInfos() : 0)
, m_callStructureStubCompilationInfo(codeBlock ? codeBlock->numberOfCallLinkInfos() : 0)
, m_bytecodeIndex((unsigned)-1)
+#if USE(JSVALUE32_64)
+ , m_jumpTargetIndex(0)
+ , m_mappedBytecodeIndex((unsigned)-1)
+ , m_mappedVirtualRegisterIndex((unsigned)-1)
+ , m_mappedTag((RegisterID)-1)
+ , m_mappedPayload((RegisterID)-1)
+#else
, m_lastResultBytecodeRegister(std::numeric_limits<int>::max())
, m_jumpTargetsPosition(0)
+#endif
{
}
-void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
+#if USE(JSVALUE32_64)
+void JIT::emitTimeoutCheck()
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
- unsigned src2 = currentInstruction[3].u.operand;
-
- emitGetVirtualRegisters(src1, regT0, src2, regT1);
-
- // Jump to a slow case if either operand is a number, or if both are JSCell*s.
- move(regT0, regT2);
- orPtr(regT1, regT2);
- addSlowCase(emitJumpIfJSCell(regT2));
- addSlowCase(emitJumpIfImmediateNumber(regT2));
-
- if (type == OpStrictEq)
- set32(Equal, regT1, regT0, regT0);
- else
- set32(NotEqual, regT1, regT0, regT0);
- emitTagAsBoolImmediate(regT0);
-
- emitPutVirtualRegister(dst);
+ Jump skipTimeout = branchSub32(NonZero, Imm32(1), timeoutCheckRegister);
+ JITStubCall stubCall(this, cti_timeout_check);
+ stubCall.addArgument(regT1, regT0); // save last result registers.
+ stubCall.call(timeoutCheckRegister);
+ stubCall.getArgument(0, regT1, regT0); // reload last result registers.
+ skipTimeout.link(this);
}
-
+#else
void JIT::emitTimeoutCheck()
{
Jump skipTimeout = branchSub32(NonZero, Imm32(1), timeoutCheckRegister);
- JITStubCall(this, JITStubs::cti_timeout_check).call(timeoutCheckRegister);
+ JITStubCall(this, cti_timeout_check).call(timeoutCheckRegister);
skipTimeout.link(this);
killLastResultRegister();
}
-
+#endif
#define NEXT_OPCODE(name) \
m_bytecodeIndex += OPCODE_LENGTH(name); \
break;
+#if USE(JSVALUE32_64)
#define DEFINE_BINARY_OP(name) \
case name: { \
- JITStubCall stubCall(this, JITStubs::cti_##name); \
+ JITStubCall stubCall(this, cti_##name); \
+ stubCall.addArgument(currentInstruction[2].u.operand); \
+ stubCall.addArgument(currentInstruction[3].u.operand); \
+ stubCall.call(currentInstruction[1].u.operand); \
+ NEXT_OPCODE(name); \
+ }
+
+#define DEFINE_UNARY_OP(name) \
+ case name: { \
+ JITStubCall stubCall(this, cti_##name); \
+ stubCall.addArgument(currentInstruction[2].u.operand); \
+ stubCall.call(currentInstruction[1].u.operand); \
+ NEXT_OPCODE(name); \
+ }
+
+#else // USE(JSVALUE32_64)
+
+#define DEFINE_BINARY_OP(name) \
+ case name: { \
+ JITStubCall stubCall(this, cti_##name); \
stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
stubCall.addArgument(currentInstruction[3].u.operand, regT2); \
stubCall.call(currentInstruction[1].u.operand); \
@@ -132,11 +149,12 @@ void JIT::emitTimeoutCheck()
#define DEFINE_UNARY_OP(name) \
case name: { \
- JITStubCall stubCall(this, JITStubs::cti_##name); \
+ JITStubCall stubCall(this, cti_##name); \
stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
stubCall.call(currentInstruction[1].u.operand); \
NEXT_OPCODE(name); \
}
+#endif // USE(JSVALUE32_64)
#define DEFINE_OP(name) \
case name: { \
@@ -168,14 +186,18 @@ void JIT::privateCompileMainPass()
sampleInstruction(currentInstruction);
#endif
+#if !USE(JSVALUE32_64)
if (m_labels[m_bytecodeIndex].isUsed())
killLastResultRegister();
-
+#endif
+
m_labels[m_bytecodeIndex] = label();
switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
DEFINE_BINARY_OP(op_del_by_val)
+#if !USE(JSVALUE32_64)
DEFINE_BINARY_OP(op_div)
+#endif
DEFINE_BINARY_OP(op_in)
DEFINE_BINARY_OP(op_less)
DEFINE_BINARY_OP(op_lesseq)
@@ -187,7 +209,9 @@ void JIT::privateCompileMainPass()
DEFINE_UNARY_OP(op_is_object)
DEFINE_UNARY_OP(op_is_string)
DEFINE_UNARY_OP(op_is_undefined)
+#if !USE(JSVALUE32_64)
DEFINE_UNARY_OP(op_negate)
+#endif
DEFINE_UNARY_OP(op_typeof)
DEFINE_OP(op_add)
@@ -206,6 +230,9 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_create_arguments)
DEFINE_OP(op_debug)
DEFINE_OP(op_del_by_id)
+#if USE(JSVALUE32_64)
+ DEFINE_OP(op_div)
+#endif
DEFINE_OP(op_end)
DEFINE_OP(op_enter)
DEFINE_OP(op_enter_with_activation)
@@ -236,6 +263,9 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_mod)
DEFINE_OP(op_mov)
DEFINE_OP(op_mul)
+#if USE(JSVALUE32_64)
+ DEFINE_OP(op_negate)
+#endif
DEFINE_OP(op_neq)
DEFINE_OP(op_neq_null)
DEFINE_OP(op_new_array)
@@ -265,7 +295,6 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_put_setter)
DEFINE_OP(op_resolve)
DEFINE_OP(op_resolve_base)
- DEFINE_OP(op_resolve_func)
DEFINE_OP(op_resolve_global)
DEFINE_OP(op_resolve_skip)
DEFINE_OP(op_resolve_with_base)
@@ -322,11 +351,15 @@ void JIT::privateCompileSlowCases()
Instruction* instructionsBegin = m_codeBlock->instructions().begin();
m_propertyAccessInstructionIndex = 0;
+#if USE(JSVALUE32_64)
+ m_globalResolveInfoIndex = 0;
+#endif
m_callLinkInfoIndex = 0;
for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
- // FIXME: enable peephole optimizations for slow cases when applicable
+#if !USE(JSVALUE32_64)
killLastResultRegister();
+#endif
m_bytecodeIndex = iter->to;
#ifndef NDEBUG
@@ -346,6 +379,9 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_construct)
DEFINE_SLOWCASE_OP(op_construct_verify)
DEFINE_SLOWCASE_OP(op_convert_this)
+#if USE(JSVALUE32_64)
+ DEFINE_SLOWCASE_OP(op_div)
+#endif
DEFINE_SLOWCASE_OP(op_eq)
DEFINE_SLOWCASE_OP(op_get_by_id)
DEFINE_SLOWCASE_OP(op_get_by_val)
@@ -358,9 +394,12 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_loop_if_lesseq)
DEFINE_SLOWCASE_OP(op_loop_if_true)
DEFINE_SLOWCASE_OP(op_lshift)
+ DEFINE_SLOWCASE_OP(op_method_check)
DEFINE_SLOWCASE_OP(op_mod)
DEFINE_SLOWCASE_OP(op_mul)
- DEFINE_SLOWCASE_OP(op_method_check)
+#if USE(JSVALUE32_64)
+ DEFINE_SLOWCASE_OP(op_negate)
+#endif
DEFINE_SLOWCASE_OP(op_neq)
DEFINE_SLOWCASE_OP(op_not)
DEFINE_SLOWCASE_OP(op_nstricteq)
@@ -370,6 +409,9 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_pre_inc)
DEFINE_SLOWCASE_OP(op_put_by_id)
DEFINE_SLOWCASE_OP(op_put_by_val)
+#if USE(JSVALUE32_64)
+ DEFINE_SLOWCASE_OP(op_resolve_global)
+#endif
DEFINE_SLOWCASE_OP(op_rshift)
DEFINE_SLOWCASE_OP(op_stricteq)
DEFINE_SLOWCASE_OP(op_sub)
@@ -396,7 +438,7 @@ void JIT::privateCompileSlowCases()
#endif
}
-void JIT::privateCompile()
+JITCode JIT::privateCompile()
{
sampleCodeBlock(m_codeBlock);
#if ENABLE(OPCODE_SAMPLING)
@@ -427,7 +469,7 @@ void JIT::privateCompile()
if (m_codeBlock->codeType() == FunctionCode) {
slowRegisterFileCheck.link(this);
m_bytecodeIndex = 0;
- JITStubCall(this, JITStubs::cti_register_file_check).call();
+ JITStubCall(this, cti_register_file_check).call();
#ifndef NDEBUG
m_bytecodeIndex = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
#endif
@@ -510,389 +552,10 @@ void JIT::privateCompile()
info.callReturnLocation = m_codeBlock->structureStubInfo(m_methodCallCompilationInfo[i].propertyAccessIndex).callReturnLocation;
}
- m_codeBlock->setJITCode(patchBuffer.finalizeCode());
-}
-
-void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* globalData, CodePtr* ctiArrayLengthTrampoline, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallPreLink, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk)
-{
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- // (1) The first function provides fast property access for array length
- Label arrayLengthBegin = align();
-
- // Check eax is an array
- Jump array_failureCases1 = emitJumpIfNotJSCell(regT0);
- Jump array_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
-
- // Checks out okay! - get the length from the storage
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT0);
- load32(Address(regT0, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT0);
-
- Jump array_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImmediateInt));
-
- // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
- emitFastArithIntToImmNoCheck(regT0, regT0);
-
- ret();
-
- // (2) The second function provides fast property access for string length
- Label stringLengthBegin = align();
-
- // Check eax is a string
- Jump string_failureCases1 = emitJumpIfNotJSCell(regT0);
- Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
-
- // Checks out okay! - get the length from the Ustring.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSString, m_value) + OBJECT_OFFSETOF(UString, m_rep)), regT0);
- load32(Address(regT0, OBJECT_OFFSETOF(UString::Rep, len)), regT0);
-
- Jump string_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImmediateInt));
-
- // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
- emitFastArithIntToImmNoCheck(regT0, regT0);
-
- ret();
-#endif
-
- // (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
- COMPILE_ASSERT(sizeof(CodeType) == 4, CodeTypeEnumMustBe32Bit);
-
- Label virtualCallPreLinkBegin = align();
-
- // Load the callee CodeBlock* into eax
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3);
- loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT0);
- Jump hasCodeBlock1 = branchTestPtr(NonZero, regT0);
- preserveReturnAddressAfterCall(regT3);
- restoreArgumentReference();
- Call callJSFunction1 = call();
- emitGetJITStubArg(1, regT2);
- emitGetJITStubArg(3, regT1);
- restoreReturnAddressBeforeReturn(regT3);
- hasCodeBlock1.link(this);
-
- Jump isNativeFunc1 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode));
-
- // Check argCount matches callee arity.
- Jump arityCheckOkay1 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1);
- preserveReturnAddressAfterCall(regT3);
- emitPutJITStubArg(regT3, 2);
- emitPutJITStubArg(regT0, 4);
- restoreArgumentReference();
- Call callArityCheck1 = call();
- move(regT1, callFrameRegister);
- emitGetJITStubArg(1, regT2);
- emitGetJITStubArg(3, regT1);
- restoreReturnAddressBeforeReturn(regT3);
- arityCheckOkay1.link(this);
- isNativeFunc1.link(this);
-
- compileOpCallInitializeCallFrame();
-
- preserveReturnAddressAfterCall(regT3);
- emitPutJITStubArg(regT3, 2);
- restoreArgumentReference();
- Call callDontLazyLinkCall = call();
- emitGetJITStubArg(1, regT2);
- restoreReturnAddressBeforeReturn(regT3);
-
- jump(regT0);
-
- Label virtualCallLinkBegin = align();
-
- // Load the callee CodeBlock* into eax
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3);
- loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT0);
- Jump hasCodeBlock2 = branchTestPtr(NonZero, regT0);
- preserveReturnAddressAfterCall(regT3);
- restoreArgumentReference();
- Call callJSFunction2 = call();
- emitGetJITStubArg(1, regT2);
- emitGetJITStubArg(3, regT1);
- restoreReturnAddressBeforeReturn(regT3);
- hasCodeBlock2.link(this);
-
- Jump isNativeFunc2 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode));
-
- // Check argCount matches callee arity.
- Jump arityCheckOkay2 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1);
- preserveReturnAddressAfterCall(regT3);
- emitPutJITStubArg(regT3, 2);
- emitPutJITStubArg(regT0, 4);
- restoreArgumentReference();
- Call callArityCheck2 = call();
- move(regT1, callFrameRegister);
- emitGetJITStubArg(1, regT2);
- emitGetJITStubArg(3, regT1);
- restoreReturnAddressBeforeReturn(regT3);
- arityCheckOkay2.link(this);
- isNativeFunc2.link(this);
-
- compileOpCallInitializeCallFrame();
-
- preserveReturnAddressAfterCall(regT3);
- emitPutJITStubArg(regT3, 2);
- restoreArgumentReference();
- Call callLazyLinkCall = call();
- restoreReturnAddressBeforeReturn(regT3);
-
- jump(regT0);
-
- Label virtualCallBegin = align();
-
- // Load the callee CodeBlock* into eax
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3);
- loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT0);
- Jump hasCodeBlock3 = branchTestPtr(NonZero, regT0);
- preserveReturnAddressAfterCall(regT3);
- restoreArgumentReference();
- Call callJSFunction3 = call();
- emitGetJITStubArg(1, regT2);
- emitGetJITStubArg(3, regT1);
- restoreReturnAddressBeforeReturn(regT3);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3); // reload the function body nody, so we can reload the code pointer.
- hasCodeBlock3.link(this);
-
- Jump isNativeFunc3 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode));
-
- // Check argCount matches callee arity.
- Jump arityCheckOkay3 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1);
- preserveReturnAddressAfterCall(regT3);
- emitPutJITStubArg(regT3, 2);
- emitPutJITStubArg(regT0, 4);
- restoreArgumentReference();
- Call callArityCheck3 = call();
- move(regT1, callFrameRegister);
- emitGetJITStubArg(1, regT2);
- emitGetJITStubArg(3, regT1);
- restoreReturnAddressBeforeReturn(regT3);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3); // reload the function body nody, so we can reload the code pointer.
- arityCheckOkay3.link(this);
- isNativeFunc3.link(this);
-
- // load ctiCode from the new codeBlock.
- loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_jitCode)), regT0);
-
- compileOpCallInitializeCallFrame();
- jump(regT0);
-
-
- Label nativeCallThunk = align();
- preserveReturnAddressAfterCall(regT0);
- emitPutToCallFrameHeader(regT0, RegisterFile::ReturnPC); // Push return address
-
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT1);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT1);
- emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
-
-
-#if PLATFORM(X86_64)
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, X86::ecx);
-
- // Allocate stack space for our arglist
- subPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
- COMPILE_ASSERT((sizeof(ArgList) & 0xf) == 0, ArgList_should_by_16byte_aligned);
-
- // Set up arguments
- subPtr(Imm32(1), X86::ecx); // Don't include 'this' in argcount
-
- // Push argcount
- storePtr(X86::ecx, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_argCount)));
-
- // Calculate the start of the callframe header, and store in edx
- addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int32_t)sizeof(Register)), callFrameRegister, X86::edx);
-
- // Calculate start of arguments as callframe header - sizeof(Register) * argcount (ecx)
- mul32(Imm32(sizeof(Register)), X86::ecx, X86::ecx);
- subPtr(X86::ecx, X86::edx);
-
- // push pointer to arguments
- storePtr(X86::edx, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_args)));
-
- // ArgList is passed by reference so is stackPointerRegister
- move(stackPointerRegister, X86::ecx);
-
- // edx currently points to the first argument, edx-sizeof(Register) points to 'this'
- loadPtr(Address(X86::edx, -(int32_t)sizeof(Register)), X86::edx);
-
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::esi);
-
- move(callFrameRegister, X86::edi);
-
- call(Address(X86::esi, OBJECT_OFFSETOF(JSFunction, m_data)));
-
- addPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
-#elif PLATFORM(X86)
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
-
- /* We have two structs that we use to describe the stackframe we set up for our
- * call to native code. NativeCallFrameStructure describes the how we set up the stack
- * in advance of the call. NativeFunctionCalleeSignature describes the callframe
- * as the native code expects it. We do this as we are using the fastcall calling
- * convention which results in the callee popping its arguments off the stack, but
- * not the rest of the callframe so we need a nice way to ensure we increment the
- * stack pointer by the right amount after the call.
- */
-#if COMPILER(MSVC) || PLATFORM(LINUX)
- struct NativeCallFrameStructure {
- // CallFrame* callFrame; // passed in EDX
- JSObject* callee;
- JSValue thisValue;
- ArgList* argPointer;
- ArgList args;
- JSValue result;
- };
- struct NativeFunctionCalleeSignature {
- JSObject* callee;
- JSValue thisValue;
- ArgList* argPointer;
- };
-#else
- struct NativeCallFrameStructure {
- // CallFrame* callFrame; // passed in ECX
- // JSObject* callee; // passed in EDX
- JSValue thisValue;
- ArgList* argPointer;
- ArgList args;
- };
- struct NativeFunctionCalleeSignature {
- JSValue thisValue;
- ArgList* argPointer;
- };
-#endif
- const int NativeCallFrameSize = (sizeof(NativeCallFrameStructure) + 15) & ~15;
- // Allocate system stack frame
- subPtr(Imm32(NativeCallFrameSize), stackPointerRegister);
-
- // Set up arguments
- subPtr(Imm32(1), regT0); // Don't include 'this' in argcount
-
- // push argcount
- storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_argCount)));
-
- // Calculate the start of the callframe header, and store in regT1
- addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int)sizeof(Register)), callFrameRegister, regT1);
-
- // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT0)
- mul32(Imm32(sizeof(Register)), regT0, regT0);
- subPtr(regT0, regT1);
- storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_args)));
-
- // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
- addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, args)), stackPointerRegister, regT0);
- storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, argPointer)));
-
- // regT1 currently points to the first argument, regT1 - sizeof(Register) points to 'this'
- loadPtr(Address(regT1, -(int)sizeof(Register)), regT1);
- storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue)));
-
-#if COMPILER(MSVC) || PLATFORM(LINUX)
- // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
- addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, result)), stackPointerRegister, X86::ecx);
-
- // Plant callee
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::eax);
- storePtr(X86::eax, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, callee)));
-
- // Plant callframe
- move(callFrameRegister, X86::edx);
-
- call(Address(X86::eax, OBJECT_OFFSETOF(JSFunction, m_data)));
-
- // JSValue is a non-POD type
- loadPtr(Address(X86::eax), X86::eax);
-#else
- // Plant callee
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::edx);
-
- // Plant callframe
- move(callFrameRegister, X86::ecx);
- call(Address(X86::edx, OBJECT_OFFSETOF(JSFunction, m_data)));
-#endif
-
- // We've put a few temporaries on the stack in addition to the actual arguments
- // so pull them off now
- addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister);
-
-#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
-#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
-#else
- breakpoint();
-#endif
-
- // Check for an exception
- loadPtr(&(globalData->exception), regT2);
- Jump exceptionHandler = branchTestPtr(NonZero, regT2);
-
- // Grab the return address.
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
-
- // Restore our caller's "r".
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
-
- // Return.
- restoreReturnAddressBeforeReturn(regT1);
- ret();
-
- // Handle an exception
- exceptionHandler.link(this);
- // Grab the return address.
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
- move(ImmPtr(&globalData->exceptionLocation), regT2);
- storePtr(regT1, regT2);
- move(ImmPtr(reinterpret_cast<void*>(ctiVMThrowTrampoline)), regT2);
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
- poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
- restoreReturnAddressBeforeReturn(regT2);
- ret();
-
-
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- Call array_failureCases1Call = makeTailRecursiveCall(array_failureCases1);
- Call array_failureCases2Call = makeTailRecursiveCall(array_failureCases2);
- Call array_failureCases3Call = makeTailRecursiveCall(array_failureCases3);
- Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1);
- Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2);
- Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
-#endif
-
- // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
- LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
-
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- patchBuffer.link(array_failureCases1Call, FunctionPtr(JITStubs::cti_op_get_by_id_array_fail));
- patchBuffer.link(array_failureCases2Call, FunctionPtr(JITStubs::cti_op_get_by_id_array_fail));
- patchBuffer.link(array_failureCases3Call, FunctionPtr(JITStubs::cti_op_get_by_id_array_fail));
- patchBuffer.link(string_failureCases1Call, FunctionPtr(JITStubs::cti_op_get_by_id_string_fail));
- patchBuffer.link(string_failureCases2Call, FunctionPtr(JITStubs::cti_op_get_by_id_string_fail));
- patchBuffer.link(string_failureCases3Call, FunctionPtr(JITStubs::cti_op_get_by_id_string_fail));
-#endif
- patchBuffer.link(callArityCheck1, FunctionPtr(JITStubs::cti_op_call_arityCheck));
- patchBuffer.link(callArityCheck2, FunctionPtr(JITStubs::cti_op_call_arityCheck));
- patchBuffer.link(callArityCheck3, FunctionPtr(JITStubs::cti_op_call_arityCheck));
- patchBuffer.link(callJSFunction1, FunctionPtr(JITStubs::cti_op_call_JSFunction));
- patchBuffer.link(callJSFunction2, FunctionPtr(JITStubs::cti_op_call_JSFunction));
- patchBuffer.link(callJSFunction3, FunctionPtr(JITStubs::cti_op_call_JSFunction));
- patchBuffer.link(callDontLazyLinkCall, FunctionPtr(JITStubs::cti_vm_dontLazyLinkCall));
- patchBuffer.link(callLazyLinkCall, FunctionPtr(JITStubs::cti_vm_lazyLinkCall));
-
- CodeRef finalCode = patchBuffer.finalizeCode();
- *executablePool = finalCode.m_executablePool;
-
- *ctiVirtualCallPreLink = trampolineAt(finalCode, virtualCallPreLinkBegin);
- *ctiVirtualCallLink = trampolineAt(finalCode, virtualCallLinkBegin);
- *ctiVirtualCall = trampolineAt(finalCode, virtualCallBegin);
- *ctiNativeCallThunk = trampolineAt(finalCode, nativeCallThunk);
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- *ctiArrayLengthTrampoline = trampolineAt(finalCode, arrayLengthBegin);
- *ctiStringLengthTrampoline = trampolineAt(finalCode, stringLengthBegin);
-#else
- UNUSED_PARAM(ctiArrayLengthTrampoline);
- UNUSED_PARAM(ctiStringLengthTrampoline);
-#endif
+ return patchBuffer.finalizeCode();
}
+#if !USE(JSVALUE32_64)
void JIT::emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst)
{
loadPtr(Address(variableObject, OBJECT_OFFSETOF(JSVariableObject, d)), dst);
@@ -906,24 +569,29 @@ void JIT::emitPutVariableObjectRegister(RegisterID src, RegisterID variableObjec
loadPtr(Address(variableObject, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), variableObject);
storePtr(src, Address(variableObject, index * sizeof(Register)));
}
+#endif
+#if ENABLE(JIT_OPTIMIZE_CALL)
void JIT::unlinkCall(CallLinkInfo* callLinkInfo)
{
// When the JSFunction is deleted the pointer embedded in the instruction stream will no longer be valid
// (and, if a new JSFunction happened to be constructed at the same location, we could get a false positive
// match). Reset the check so it no longer matches.
- RepatchBuffer repatchBuffer(callLinkInfo->ownerCodeBlock);
+ RepatchBuffer repatchBuffer(callLinkInfo->ownerCodeBlock.get());
+#if USE(JSVALUE32_64)
+ repatchBuffer.repatch(callLinkInfo->hotPathBegin, 0);
+#else
repatchBuffer.repatch(callLinkInfo->hotPathBegin, JSValue::encode(JSValue()));
+#endif
}
void JIT::linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JITCode& code, CallLinkInfo* callLinkInfo, int callerArgCount, JSGlobalData* globalData)
{
- ASSERT(calleeCodeBlock);
RepatchBuffer repatchBuffer(callerCodeBlock);
// Currently we only link calls with the exact number of arguments.
// If this is a native call calleeCodeBlock is null so the number of parameters is unimportant
- if (callerArgCount == calleeCodeBlock->m_numParameters || calleeCodeBlock->codeType() == NativeCode) {
+ if (!calleeCodeBlock || (callerArgCount == calleeCodeBlock->m_numParameters)) {
ASSERT(!callLinkInfo->isLinked());
if (calleeCodeBlock)
@@ -936,6 +604,7 @@ void JIT::linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* ca
// patch the call so we do not continue to try to link.
repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs.ctiVirtualCall());
}
+#endif // ENABLE(JIT_OPTIMIZE_CALL)
} // namespace JSC
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.h b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.h
index ceffe59..5c58e9d 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.h
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.h
@@ -28,11 +28,6 @@
#include <wtf/Platform.h>
-// OBJECT_OFFSETOF: Like the C++ offsetof macro, but you can use it with classes.
-// The magic number 0x4000 is insignificant. We use it to avoid using NULL, since
-// NULL can cause compiler problems, especially in cases of multiple inheritance.
-#define OBJECT_OFFSETOF(class, field) (reinterpret_cast<ptrdiff_t>(&(reinterpret_cast<class*>(0x4000)->field)) - 0x4000)
-
#if ENABLE(JIT)
// We've run into some problems where changing the size of the class JIT leads to
@@ -64,14 +59,14 @@ namespace JSC {
class Register;
class RegisterFile;
class ScopeChainNode;
- class SimpleJumpTable;
- class StringJumpTable;
class StructureChain;
struct CallLinkInfo;
struct Instruction;
struct OperandTypes;
struct PolymorphicAccessStructureList;
+ struct SimpleJumpTable;
+ struct StringJumpTable;
struct StructureStubInfo;
struct CallRecord {
@@ -177,7 +172,6 @@ namespace JSC {
class JIT : private MacroAssembler {
friend class JITStubCall;
- friend class CallEvalJITStub;
using MacroAssembler::Jump;
using MacroAssembler::JumpList;
@@ -197,57 +191,82 @@ namespace JSC {
// MacroAssembler will need to plant register swaps if it is not -
// however the code will still function correctly.
#if PLATFORM(X86_64)
- static const RegisterID returnValueRegister = X86::eax;
- static const RegisterID cachedResultRegister = X86::eax;
- static const RegisterID firstArgumentRegister = X86::edi;
-
- static const RegisterID timeoutCheckRegister = X86::r12;
- static const RegisterID callFrameRegister = X86::r13;
- static const RegisterID tagTypeNumberRegister = X86::r14;
- static const RegisterID tagMaskRegister = X86::r15;
-
- static const RegisterID regT0 = X86::eax;
- static const RegisterID regT1 = X86::edx;
- static const RegisterID regT2 = X86::ecx;
- static const RegisterID regT3 = X86::ebx;
-
- static const FPRegisterID fpRegT0 = X86::xmm0;
- static const FPRegisterID fpRegT1 = X86::xmm1;
- static const FPRegisterID fpRegT2 = X86::xmm2;
+ static const RegisterID returnValueRegister = X86Registers::eax;
+ static const RegisterID cachedResultRegister = X86Registers::eax;
+ static const RegisterID firstArgumentRegister = X86Registers::edi;
+
+ static const RegisterID timeoutCheckRegister = X86Registers::r12;
+ static const RegisterID callFrameRegister = X86Registers::r13;
+ static const RegisterID tagTypeNumberRegister = X86Registers::r14;
+ static const RegisterID tagMaskRegister = X86Registers::r15;
+
+ static const RegisterID regT0 = X86Registers::eax;
+ static const RegisterID regT1 = X86Registers::edx;
+ static const RegisterID regT2 = X86Registers::ecx;
+ static const RegisterID regT3 = X86Registers::ebx;
+
+ static const FPRegisterID fpRegT0 = X86Registers::xmm0;
+ static const FPRegisterID fpRegT1 = X86Registers::xmm1;
+ static const FPRegisterID fpRegT2 = X86Registers::xmm2;
#elif PLATFORM(X86)
- static const RegisterID returnValueRegister = X86::eax;
- static const RegisterID cachedResultRegister = X86::eax;
+ static const RegisterID returnValueRegister = X86Registers::eax;
+ static const RegisterID cachedResultRegister = X86Registers::eax;
// On x86 we always use fastcall conventions = but on
// OS X if might make more sense to just use regparm.
- static const RegisterID firstArgumentRegister = X86::ecx;
-
- static const RegisterID timeoutCheckRegister = X86::esi;
- static const RegisterID callFrameRegister = X86::edi;
-
- static const RegisterID regT0 = X86::eax;
- static const RegisterID regT1 = X86::edx;
- static const RegisterID regT2 = X86::ecx;
- static const RegisterID regT3 = X86::ebx;
-
- static const FPRegisterID fpRegT0 = X86::xmm0;
- static const FPRegisterID fpRegT1 = X86::xmm1;
- static const FPRegisterID fpRegT2 = X86::xmm2;
-#elif PLATFORM_ARM_ARCH(7)
- static const RegisterID returnValueRegister = ARM::r0;
- static const RegisterID cachedResultRegister = ARM::r0;
- static const RegisterID firstArgumentRegister = ARM::r0;
-
- static const RegisterID regT0 = ARM::r0;
- static const RegisterID regT1 = ARM::r1;
- static const RegisterID regT2 = ARM::r2;
- static const RegisterID regT3 = ARM::r4;
-
- static const RegisterID callFrameRegister = ARM::r5;
- static const RegisterID timeoutCheckRegister = ARM::r6;
-
- static const FPRegisterID fpRegT0 = ARM::d0;
- static const FPRegisterID fpRegT1 = ARM::d1;
- static const FPRegisterID fpRegT2 = ARM::d2;
+ static const RegisterID firstArgumentRegister = X86Registers::ecx;
+
+ static const RegisterID timeoutCheckRegister = X86Registers::esi;
+ static const RegisterID callFrameRegister = X86Registers::edi;
+
+ static const RegisterID regT0 = X86Registers::eax;
+ static const RegisterID regT1 = X86Registers::edx;
+ static const RegisterID regT2 = X86Registers::ecx;
+ static const RegisterID regT3 = X86Registers::ebx;
+
+ static const FPRegisterID fpRegT0 = X86Registers::xmm0;
+ static const FPRegisterID fpRegT1 = X86Registers::xmm1;
+ static const FPRegisterID fpRegT2 = X86Registers::xmm2;
+#elif PLATFORM(ARM_THUMB2)
+ static const RegisterID returnValueRegister = ARMRegisters::r0;
+ static const RegisterID cachedResultRegister = ARMRegisters::r0;
+ static const RegisterID firstArgumentRegister = ARMRegisters::r0;
+
+ static const RegisterID regT0 = ARMRegisters::r0;
+ static const RegisterID regT1 = ARMRegisters::r1;
+ static const RegisterID regT2 = ARMRegisters::r2;
+ static const RegisterID regT3 = ARMRegisters::r4;
+
+ static const RegisterID callFrameRegister = ARMRegisters::r5;
+ static const RegisterID timeoutCheckRegister = ARMRegisters::r6;
+
+ static const FPRegisterID fpRegT0 = ARMRegisters::d0;
+ static const FPRegisterID fpRegT1 = ARMRegisters::d1;
+ static const FPRegisterID fpRegT2 = ARMRegisters::d2;
+#elif PLATFORM(ARM_TRADITIONAL)
+ static const RegisterID returnValueRegister = ARMRegisters::r0;
+ static const RegisterID cachedResultRegister = ARMRegisters::r0;
+ static const RegisterID firstArgumentRegister = ARMRegisters::r0;
+
+ static const RegisterID timeoutCheckRegister = ARMRegisters::r5;
+ static const RegisterID callFrameRegister = ARMRegisters::r4;
+ static const RegisterID ctiReturnRegister = ARMRegisters::r6;
+
+ static const RegisterID regT0 = ARMRegisters::r0;
+ static const RegisterID regT1 = ARMRegisters::r1;
+ static const RegisterID regT2 = ARMRegisters::r2;
+ // Callee preserved
+ static const RegisterID regT3 = ARMRegisters::r7;
+
+ static const RegisterID regS0 = ARMRegisters::S0;
+ // Callee preserved
+ static const RegisterID regS1 = ARMRegisters::S1;
+
+ static const RegisterID regStackPtr = ARMRegisters::sp;
+ static const RegisterID regLink = ARMRegisters::lr;
+
+ static const FPRegisterID fpRegT0 = ARMRegisters::d0;
+ static const FPRegisterID fpRegT1 = ARMRegisters::d1;
+ static const FPRegisterID fpRegT2 = ARMRegisters::d2;
#else
#error "JIT not supported on this platform."
#endif
@@ -257,86 +276,10 @@ namespace JSC {
// will compress the displacement, and we may not be able to fit a patched offset.
static const int patchGetByIdDefaultOffset = 256;
-#if PLATFORM(X86_64)
- // These architecture specific value are used to enable patching - see comment on op_put_by_id.
- static const int patchOffsetPutByIdStructure = 10;
- static const int patchOffsetPutByIdExternalLoad = 20;
- static const int patchLengthPutByIdExternalLoad = 4;
- static const int patchOffsetPutByIdPropertyMapOffset = 31;
- // These architecture specific value are used to enable patching - see comment on op_get_by_id.
- static const int patchOffsetGetByIdStructure = 10;
- static const int patchOffsetGetByIdBranchToSlowCase = 20;
- static const int patchOffsetGetByIdExternalLoad = 20;
- static const int patchLengthGetByIdExternalLoad = 4;
- static const int patchOffsetGetByIdPropertyMapOffset = 31;
- static const int patchOffsetGetByIdPutResult = 31;
-#if ENABLE(OPCODE_SAMPLING)
- static const int patchOffsetGetByIdSlowCaseCall = 66;
-#else
- static const int patchOffsetGetByIdSlowCaseCall = 44;
-#endif
- static const int patchOffsetOpCallCompareToJump = 9;
-
- static const int patchOffsetMethodCheckProtoObj = 20;
- static const int patchOffsetMethodCheckProtoStruct = 30;
- static const int patchOffsetMethodCheckPutFunction = 50;
-#elif PLATFORM(X86)
- // These architecture specific value are used to enable patching - see comment on op_put_by_id.
- static const int patchOffsetPutByIdStructure = 7;
- static const int patchOffsetPutByIdExternalLoad = 13;
- static const int patchLengthPutByIdExternalLoad = 3;
- static const int patchOffsetPutByIdPropertyMapOffset = 22;
- // These architecture specific value are used to enable patching - see comment on op_get_by_id.
- static const int patchOffsetGetByIdStructure = 7;
- static const int patchOffsetGetByIdBranchToSlowCase = 13;
- static const int patchOffsetGetByIdExternalLoad = 13;
- static const int patchLengthGetByIdExternalLoad = 3;
- static const int patchOffsetGetByIdPropertyMapOffset = 22;
- static const int patchOffsetGetByIdPutResult = 22;
-#if ENABLE(OPCODE_SAMPLING) && USE(JIT_STUB_ARGUMENT_VA_LIST)
- static const int patchOffsetGetByIdSlowCaseCall = 31;
-#elif ENABLE(OPCODE_SAMPLING)
- static const int patchOffsetGetByIdSlowCaseCall = 33;
-#elif USE(JIT_STUB_ARGUMENT_VA_LIST)
- static const int patchOffsetGetByIdSlowCaseCall = 21;
-#else
- static const int patchOffsetGetByIdSlowCaseCall = 23;
-#endif
- static const int patchOffsetOpCallCompareToJump = 6;
-
- static const int patchOffsetMethodCheckProtoObj = 11;
- static const int patchOffsetMethodCheckProtoStruct = 18;
- static const int patchOffsetMethodCheckPutFunction = 29;
-#elif PLATFORM_ARM_ARCH(7)
- // These architecture specific value are used to enable patching - see comment on op_put_by_id.
- static const int patchOffsetPutByIdStructure = 10;
- static const int patchOffsetPutByIdExternalLoad = 20;
- static const int patchLengthPutByIdExternalLoad = 12;
- static const int patchOffsetPutByIdPropertyMapOffset = 40;
- // These architecture specific value are used to enable patching - see comment on op_get_by_id.
- static const int patchOffsetGetByIdStructure = 10;
- static const int patchOffsetGetByIdBranchToSlowCase = 20;
- static const int patchOffsetGetByIdExternalLoad = 20;
- static const int patchLengthGetByIdExternalLoad = 12;
- static const int patchOffsetGetByIdPropertyMapOffset = 40;
- static const int patchOffsetGetByIdPutResult = 44;
-#if ENABLE(OPCODE_SAMPLING)
- static const int patchOffsetGetByIdSlowCaseCall = 0; // FIMXE
-#else
- static const int patchOffsetGetByIdSlowCaseCall = 28;
-#endif
- static const int patchOffsetOpCallCompareToJump = 10;
-
- static const int patchOffsetMethodCheckProtoObj = 18;
- static const int patchOffsetMethodCheckProtoStruct = 28;
- static const int patchOffsetMethodCheckPutFunction = 46;
-#endif
-
public:
- static void compile(JSGlobalData* globalData, CodeBlock* codeBlock)
+ static JITCode compile(JSGlobalData* globalData, CodeBlock* codeBlock)
{
- JIT jit(globalData, codeBlock);
- jit.privateCompile();
+ return JIT(globalData, codeBlock).privateCompile();
}
static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress)
@@ -373,15 +316,15 @@ namespace JSC {
jit.privateCompilePutByIdTransition(stubInfo, oldStructure, newStructure, cachedOffset, chain, returnAddress);
}
- static void compileCTIMachineTrampolines(JSGlobalData* globalData, RefPtr<ExecutablePool>* executablePool, CodePtr* ctiArrayLengthTrampoline, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallPreLink, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk)
+ static void compileCTIMachineTrampolines(JSGlobalData* globalData, RefPtr<ExecutablePool>* executablePool, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk)
{
JIT jit(globalData);
- jit.privateCompileCTIMachineTrampolines(executablePool, globalData, ctiArrayLengthTrampoline, ctiStringLengthTrampoline, ctiVirtualCallPreLink, ctiVirtualCallLink, ctiVirtualCall, ctiNativeCallThunk);
+ jit.privateCompileCTIMachineTrampolines(executablePool, globalData, ctiStringLengthTrampoline, ctiVirtualCallLink, ctiVirtualCall, ctiNativeCallThunk);
}
static void patchGetByIdSelf(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress);
static void patchPutByIdReplace(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress);
- static void patchMethodCallProto(CodeBlock* codeblock, MethodCallLinkInfo&, JSFunction*, Structure*, JSObject*);
+ static void patchMethodCallProto(CodeBlock* codeblock, MethodCallLinkInfo&, JSFunction*, Structure*, JSObject*, ReturnAddressPtr);
static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, ReturnAddressPtr returnAddress)
{
@@ -409,7 +352,7 @@ namespace JSC {
void privateCompileMainPass();
void privateCompileLinkPass();
void privateCompileSlowCases();
- void privateCompile();
+ JITCode privateCompile();
void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, size_t cachedOffset);
void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame);
@@ -417,17 +360,14 @@ namespace JSC {
void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, size_t cachedOffset, StructureChain*, ReturnAddressPtr returnAddress);
- void privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* data, CodePtr* ctiArrayLengthTrampoline, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallPreLink, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk);
+ void privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* data, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk);
void privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress);
void addSlowCase(Jump);
+ void addSlowCase(JumpList);
void addJump(Jump, int);
void emitJumpSlowToHot(Jump, int);
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- void compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, unsigned propertyAccessInstructionIndex);
- void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex, bool isMethodCheck = false);
-#endif
void compileOpCall(OpcodeID, Instruction* instruction, unsigned callLinkInfoIndex);
void compileOpCallVarargs(Instruction* instruction);
void compileOpCallInitializeCallFrame();
@@ -436,164 +376,428 @@ namespace JSC {
void compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID);
void compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter);
void compileOpConstructSetupArgs(Instruction*);
+
enum CompileOpStrictEqType { OpStrictEq, OpNStrictEq };
void compileOpStrictEq(Instruction* instruction, CompileOpStrictEqType type);
+#if USE(JSVALUE32_64)
+ Address tagFor(unsigned index, RegisterID base = callFrameRegister);
+ Address payloadFor(unsigned index, RegisterID base = callFrameRegister);
+ Address addressFor(unsigned index, RegisterID base = callFrameRegister);
+
+ bool getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant);
+ bool isOperandConstantImmediateDouble(unsigned src);
+
+ void emitLoadTag(unsigned index, RegisterID tag);
+ void emitLoadPayload(unsigned index, RegisterID payload);
+
+ void emitLoad(const JSValue& v, RegisterID tag, RegisterID payload);
+ void emitLoad(unsigned index, RegisterID tag, RegisterID payload, RegisterID base = callFrameRegister);
+ void emitLoad2(unsigned index1, RegisterID tag1, RegisterID payload1, unsigned index2, RegisterID tag2, RegisterID payload2);
+ void emitLoadDouble(unsigned index, FPRegisterID value);
+ void emitLoadInt32ToDouble(unsigned index, FPRegisterID value);
+
+ void emitStore(unsigned index, RegisterID tag, RegisterID payload, RegisterID base = callFrameRegister);
+ void emitStore(unsigned index, const JSValue constant, RegisterID base = callFrameRegister);
+ void emitStoreInt32(unsigned index, RegisterID payload, bool indexIsInt32 = false);
+ void emitStoreInt32(unsigned index, Imm32 payload, bool indexIsInt32 = false);
+ void emitStoreCell(unsigned index, RegisterID payload, bool indexIsCell = false);
+ void emitStoreBool(unsigned index, RegisterID tag, bool indexIsBool = false);
+ void emitStoreDouble(unsigned index, FPRegisterID value);
+
+ bool isLabeled(unsigned bytecodeIndex);
+ void map(unsigned bytecodeIndex, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload);
+ void unmap(RegisterID);
+ void unmap();
+ bool isMapped(unsigned virtualRegisterIndex);
+ bool getMappedPayload(unsigned virtualRegisterIndex, RegisterID& payload);
+ bool getMappedTag(unsigned virtualRegisterIndex, RegisterID& tag);
+
+ void emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex);
+ void emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex, RegisterID tag);
+ void linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&, unsigned virtualRegisterIndex);
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ void compileGetByIdHotPath();
+ void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck = false);
+#endif
+ void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset);
+ void compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset);
+ void compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, Structure* structure, size_t cachedOffset);
+
+ // Arithmetic opcode helpers
+ void emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType);
+ void emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType);
+ void emitBinaryDoubleOp(OpcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters = true, bool op2IsInRegisters = true);
+
+#if PLATFORM(X86)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 7;
+ static const int patchOffsetPutByIdExternalLoad = 13;
+ static const int patchLengthPutByIdExternalLoad = 3;
+ static const int patchOffsetPutByIdPropertyMapOffset1 = 22;
+ static const int patchOffsetPutByIdPropertyMapOffset2 = 28;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 7;
+ static const int patchOffsetGetByIdBranchToSlowCase = 13;
+ static const int patchOffsetGetByIdExternalLoad = 13;
+ static const int patchLengthGetByIdExternalLoad = 3;
+ static const int patchOffsetGetByIdPropertyMapOffset1 = 22;
+ static const int patchOffsetGetByIdPropertyMapOffset2 = 28;
+ static const int patchOffsetGetByIdPutResult = 28;
+#if ENABLE(OPCODE_SAMPLING) && USE(JIT_STUB_ARGUMENT_VA_LIST)
+ static const int patchOffsetGetByIdSlowCaseCall = 35;
+#elif ENABLE(OPCODE_SAMPLING)
+ static const int patchOffsetGetByIdSlowCaseCall = 37;
+#elif USE(JIT_STUB_ARGUMENT_VA_LIST)
+ static const int patchOffsetGetByIdSlowCaseCall = 25;
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 27;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 6;
+
+ static const int patchOffsetMethodCheckProtoObj = 11;
+ static const int patchOffsetMethodCheckProtoStruct = 18;
+ static const int patchOffsetMethodCheckPutFunction = 29;
+#else
+#error "JSVALUE32_64 not supported on this platform."
+#endif
+
+#else // USE(JSVALUE32_64)
+ void emitGetVirtualRegister(int src, RegisterID dst);
+ void emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2);
+ void emitPutVirtualRegister(unsigned dst, RegisterID from = regT0);
+
+ int32_t getConstantOperandImmediateInt(unsigned src);
+
+ void emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst);
+ void emitPutVariableObjectRegister(RegisterID src, RegisterID variableObject, int index);
+
+ void killLastResultRegister();
+
+ Jump emitJumpIfJSCell(RegisterID);
+ Jump emitJumpIfBothJSCells(RegisterID, RegisterID, RegisterID);
+ void emitJumpSlowCaseIfJSCell(RegisterID);
+ Jump emitJumpIfNotJSCell(RegisterID);
+ void emitJumpSlowCaseIfNotJSCell(RegisterID);
+ void emitJumpSlowCaseIfNotJSCell(RegisterID, int VReg);
+#if USE(JSVALUE64)
+ JIT::Jump emitJumpIfImmediateNumber(RegisterID);
+ JIT::Jump emitJumpIfNotImmediateNumber(RegisterID);
+#else
+ JIT::Jump emitJumpIfImmediateNumber(RegisterID reg)
+ {
+ return emitJumpIfImmediateInteger(reg);
+ }
+
+ JIT::Jump emitJumpIfNotImmediateNumber(RegisterID reg)
+ {
+ return emitJumpIfNotImmediateInteger(reg);
+ }
+#endif
+ JIT::Jump emitJumpIfImmediateInteger(RegisterID);
+ JIT::Jump emitJumpIfNotImmediateInteger(RegisterID);
+ JIT::Jump emitJumpIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
+ void emitJumpSlowCaseIfNotImmediateInteger(RegisterID);
+ void emitJumpSlowCaseIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
+
+#if !USE(JSVALUE64)
+ void emitFastArithDeTagImmediate(RegisterID);
+ Jump emitFastArithDeTagImmediateJumpIfZero(RegisterID);
+#endif
+ void emitFastArithReTagImmediate(RegisterID src, RegisterID dest);
+ void emitFastArithImmToInt(RegisterID);
+ void emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest);
+
+ void emitTagAsBoolImmediate(RegisterID reg);
+ void compileBinaryArithOp(OpcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi);
+ void compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi);
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ void compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, unsigned propertyAccessInstructionIndex);
+ void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck = false);
+#endif
void compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset);
void compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID result, size_t cachedOffset);
void compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset);
- // Arithmetic Ops
+#if PLATFORM(X86_64)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 10;
+ static const int patchOffsetPutByIdExternalLoad = 20;
+ static const int patchLengthPutByIdExternalLoad = 4;
+ static const int patchOffsetPutByIdPropertyMapOffset = 31;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 10;
+ static const int patchOffsetGetByIdBranchToSlowCase = 20;
+ static const int patchOffsetGetByIdExternalLoad = 20;
+ static const int patchLengthGetByIdExternalLoad = 4;
+ static const int patchOffsetGetByIdPropertyMapOffset = 31;
+ static const int patchOffsetGetByIdPutResult = 31;
+#if ENABLE(OPCODE_SAMPLING)
+ static const int patchOffsetGetByIdSlowCaseCall = 63;
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 41;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 9;
- void emit_op_add(Instruction*);
- void emit_op_sub(Instruction*);
- void emit_op_mul(Instruction*);
- void emit_op_mod(Instruction*);
- void emit_op_bitand(Instruction*);
- void emit_op_lshift(Instruction*);
- void emit_op_rshift(Instruction*);
- void emit_op_jnless(Instruction*);
- void emit_op_jnlesseq(Instruction*);
- void emit_op_pre_inc(Instruction*);
- void emit_op_pre_dec(Instruction*);
- void emit_op_post_inc(Instruction*);
- void emit_op_post_dec(Instruction*);
- void emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_bitand(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_lshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_rshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_jnless(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_jnlesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_pre_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_pre_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_post_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_post_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ static const int patchOffsetMethodCheckProtoObj = 20;
+ static const int patchOffsetMethodCheckProtoStruct = 30;
+ static const int patchOffsetMethodCheckPutFunction = 50;
+#elif PLATFORM(X86)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 7;
+ static const int patchOffsetPutByIdExternalLoad = 13;
+ static const int patchLengthPutByIdExternalLoad = 3;
+ static const int patchOffsetPutByIdPropertyMapOffset = 22;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 7;
+ static const int patchOffsetGetByIdBranchToSlowCase = 13;
+ static const int patchOffsetGetByIdExternalLoad = 13;
+ static const int patchLengthGetByIdExternalLoad = 3;
+ static const int patchOffsetGetByIdPropertyMapOffset = 22;
+ static const int patchOffsetGetByIdPutResult = 22;
+#if ENABLE(OPCODE_SAMPLING) && USE(JIT_STUB_ARGUMENT_VA_LIST)
+ static const int patchOffsetGetByIdSlowCaseCall = 31;
+#elif ENABLE(OPCODE_SAMPLING)
+ static const int patchOffsetGetByIdSlowCaseCall = 33;
+#elif USE(JIT_STUB_ARGUMENT_VA_LIST)
+ static const int patchOffsetGetByIdSlowCaseCall = 21;
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 23;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 6;
- void emit_op_get_by_val(Instruction*);
- void emit_op_put_by_val(Instruction*);
- void emit_op_put_by_index(Instruction*);
- void emit_op_put_getter(Instruction*);
- void emit_op_put_setter(Instruction*);
- void emit_op_del_by_id(Instruction*);
+ static const int patchOffsetMethodCheckProtoObj = 11;
+ static const int patchOffsetMethodCheckProtoStruct = 18;
+ static const int patchOffsetMethodCheckPutFunction = 29;
+#elif PLATFORM(ARM_THUMB2)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 10;
+ static const int patchOffsetPutByIdExternalLoad = 20;
+ static const int patchLengthPutByIdExternalLoad = 12;
+ static const int patchOffsetPutByIdPropertyMapOffset = 40;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 10;
+ static const int patchOffsetGetByIdBranchToSlowCase = 20;
+ static const int patchOffsetGetByIdExternalLoad = 20;
+ static const int patchLengthGetByIdExternalLoad = 12;
+ static const int patchOffsetGetByIdPropertyMapOffset = 40;
+ static const int patchOffsetGetByIdPutResult = 44;
+#if ENABLE(OPCODE_SAMPLING)
+ static const int patchOffsetGetByIdSlowCaseCall = 0; // FIMXE
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 28;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 10;
- void emit_op_mov(Instruction*);
- void emit_op_end(Instruction*);
- void emit_op_jmp(Instruction*);
- void emit_op_loop(Instruction*);
- void emit_op_loop_if_less(Instruction*);
- void emit_op_loop_if_lesseq(Instruction*);
- void emit_op_new_object(Instruction*);
- void emit_op_put_by_id(Instruction*);
- void emit_op_get_by_id(Instruction*);
- void emit_op_instanceof(Instruction*);
- void emit_op_new_func(Instruction*);
+ static const int patchOffsetMethodCheckProtoObj = 18;
+ static const int patchOffsetMethodCheckProtoStruct = 28;
+ static const int patchOffsetMethodCheckPutFunction = 46;
+#elif PLATFORM(ARM_TRADITIONAL)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 4;
+ static const int patchOffsetPutByIdExternalLoad = 16;
+ static const int patchLengthPutByIdExternalLoad = 4;
+ static const int patchOffsetPutByIdPropertyMapOffset = 20;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 4;
+ static const int patchOffsetGetByIdBranchToSlowCase = 16;
+ static const int patchOffsetGetByIdExternalLoad = 16;
+ static const int patchLengthGetByIdExternalLoad = 4;
+ static const int patchOffsetGetByIdPropertyMapOffset = 20;
+ static const int patchOffsetGetByIdPutResult = 28;
+#if ENABLE(OPCODE_SAMPLING)
+ #error "OPCODE_SAMPLING is not yet supported"
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 36;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 12;
+
+ static const int patchOffsetMethodCheckProtoObj = 12;
+ static const int patchOffsetMethodCheckProtoStruct = 20;
+ static const int patchOffsetMethodCheckPutFunction = 32;
+#endif
+#endif // USE(JSVALUE32_64)
+
+#if PLATFORM(ARM_TRADITIONAL)
+ // sequenceOpCall
+ static const int sequenceOpCallInstructionSpace = 12;
+ static const int sequenceOpCallConstantSpace = 2;
+ // sequenceMethodCheck
+ static const int sequenceMethodCheckInstructionSpace = 40;
+ static const int sequenceMethodCheckConstantSpace = 6;
+ // sequenceGetByIdHotPath
+ static const int sequenceGetByIdHotPathInstructionSpace = 28;
+ static const int sequenceGetByIdHotPathConstantSpace = 3;
+ // sequenceGetByIdSlowCase
+ static const int sequenceGetByIdSlowCaseInstructionSpace = 40;
+ static const int sequenceGetByIdSlowCaseConstantSpace = 2;
+ // sequencePutById
+ static const int sequencePutByIdInstructionSpace = 28;
+ static const int sequencePutByIdConstantSpace = 3;
+#endif
+
+#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
+#define BEGIN_UNINTERRUPTED_SEQUENCE(name) beginUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace)
+#define END_UNINTERRUPTED_SEQUENCE(name) endUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace)
+
+ void beginUninterruptedSequence(int, int);
+ void endUninterruptedSequence(int, int);
+
+#else
+#define BEGIN_UNINTERRUPTED_SEQUENCE(name)
+#define END_UNINTERRUPTED_SEQUENCE(name)
+#endif
+
+ void emit_op_add(Instruction*);
+ void emit_op_bitand(Instruction*);
+ void emit_op_bitnot(Instruction*);
+ void emit_op_bitor(Instruction*);
+ void emit_op_bitxor(Instruction*);
void emit_op_call(Instruction*);
void emit_op_call_eval(Instruction*);
- void emit_op_method_check(Instruction*);
- void emit_op_load_varargs(Instruction*);
void emit_op_call_varargs(Instruction*);
+ void emit_op_catch(Instruction*);
void emit_op_construct(Instruction*);
+ void emit_op_construct_verify(Instruction*);
+ void emit_op_convert_this(Instruction*);
+ void emit_op_create_arguments(Instruction*);
+ void emit_op_debug(Instruction*);
+ void emit_op_del_by_id(Instruction*);
+ void emit_op_div(Instruction*);
+ void emit_op_end(Instruction*);
+ void emit_op_enter(Instruction*);
+ void emit_op_enter_with_activation(Instruction*);
+ void emit_op_eq(Instruction*);
+ void emit_op_eq_null(Instruction*);
+ void emit_op_get_by_id(Instruction*);
+ void emit_op_get_by_val(Instruction*);
void emit_op_get_global_var(Instruction*);
- void emit_op_put_global_var(Instruction*);
void emit_op_get_scoped_var(Instruction*);
- void emit_op_put_scoped_var(Instruction*);
- void emit_op_tear_off_activation(Instruction*);
- void emit_op_tear_off_arguments(Instruction*);
- void emit_op_ret(Instruction*);
- void emit_op_new_array(Instruction*);
- void emit_op_resolve(Instruction*);
- void emit_op_construct_verify(Instruction*);
- void emit_op_to_primitive(Instruction*);
- void emit_op_strcat(Instruction*);
- void emit_op_resolve_func(Instruction*);
- void emit_op_loop_if_true(Instruction*);
- void emit_op_resolve_base(Instruction*);
- void emit_op_resolve_skip(Instruction*);
- void emit_op_resolve_global(Instruction*);
- void emit_op_not(Instruction*);
- void emit_op_jfalse(Instruction*);
+ void emit_op_init_arguments(Instruction*);
+ void emit_op_instanceof(Instruction*);
void emit_op_jeq_null(Instruction*);
+ void emit_op_jfalse(Instruction*);
+ void emit_op_jmp(Instruction*);
+ void emit_op_jmp_scopes(Instruction*);
void emit_op_jneq_null(Instruction*);
void emit_op_jneq_ptr(Instruction*);
- void emit_op_unexpected_load(Instruction*);
+ void emit_op_jnless(Instruction*);
+ void emit_op_jnlesseq(Instruction*);
void emit_op_jsr(Instruction*);
- void emit_op_sret(Instruction*);
- void emit_op_eq(Instruction*);
- void emit_op_bitnot(Instruction*);
- void emit_op_resolve_with_base(Instruction*);
- void emit_op_new_func_exp(Instruction*);
void emit_op_jtrue(Instruction*);
+ void emit_op_load_varargs(Instruction*);
+ void emit_op_loop(Instruction*);
+ void emit_op_loop_if_less(Instruction*);
+ void emit_op_loop_if_lesseq(Instruction*);
+ void emit_op_loop_if_true(Instruction*);
+ void emit_op_lshift(Instruction*);
+ void emit_op_method_check(Instruction*);
+ void emit_op_mod(Instruction*);
+ void emit_op_mov(Instruction*);
+ void emit_op_mul(Instruction*);
+ void emit_op_negate(Instruction*);
void emit_op_neq(Instruction*);
- void emit_op_bitxor(Instruction*);
+ void emit_op_neq_null(Instruction*);
+ void emit_op_new_array(Instruction*);
+ void emit_op_new_error(Instruction*);
+ void emit_op_new_func(Instruction*);
+ void emit_op_new_func_exp(Instruction*);
+ void emit_op_new_object(Instruction*);
void emit_op_new_regexp(Instruction*);
- void emit_op_bitor(Instruction*);
- void emit_op_throw(Instruction*);
void emit_op_next_pname(Instruction*);
- void emit_op_push_scope(Instruction*);
- void emit_op_pop_scope(Instruction*);
- void emit_op_stricteq(Instruction*);
+ void emit_op_not(Instruction*);
void emit_op_nstricteq(Instruction*);
- void emit_op_to_jsnumber(Instruction*);
+ void emit_op_pop_scope(Instruction*);
+ void emit_op_post_dec(Instruction*);
+ void emit_op_post_inc(Instruction*);
+ void emit_op_pre_dec(Instruction*);
+ void emit_op_pre_inc(Instruction*);
+ void emit_op_profile_did_call(Instruction*);
+ void emit_op_profile_will_call(Instruction*);
void emit_op_push_new_scope(Instruction*);
- void emit_op_catch(Instruction*);
- void emit_op_jmp_scopes(Instruction*);
- void emit_op_switch_imm(Instruction*);
+ void emit_op_push_scope(Instruction*);
+ void emit_op_put_by_id(Instruction*);
+ void emit_op_put_by_index(Instruction*);
+ void emit_op_put_by_val(Instruction*);
+ void emit_op_put_getter(Instruction*);
+ void emit_op_put_global_var(Instruction*);
+ void emit_op_put_scoped_var(Instruction*);
+ void emit_op_put_setter(Instruction*);
+ void emit_op_resolve(Instruction*);
+ void emit_op_resolve_base(Instruction*);
+ void emit_op_resolve_global(Instruction*);
+ void emit_op_resolve_skip(Instruction*);
+ void emit_op_resolve_with_base(Instruction*);
+ void emit_op_ret(Instruction*);
+ void emit_op_rshift(Instruction*);
+ void emit_op_sret(Instruction*);
+ void emit_op_strcat(Instruction*);
+ void emit_op_stricteq(Instruction*);
+ void emit_op_sub(Instruction*);
void emit_op_switch_char(Instruction*);
+ void emit_op_switch_imm(Instruction*);
void emit_op_switch_string(Instruction*);
- void emit_op_new_error(Instruction*);
- void emit_op_debug(Instruction*);
- void emit_op_eq_null(Instruction*);
- void emit_op_neq_null(Instruction*);
- void emit_op_enter(Instruction*);
- void emit_op_enter_with_activation(Instruction*);
- void emit_op_init_arguments(Instruction*);
- void emit_op_create_arguments(Instruction*);
- void emit_op_convert_this(Instruction*);
- void emit_op_profile_will_call(Instruction*);
- void emit_op_profile_did_call(Instruction*);
+ void emit_op_tear_off_activation(Instruction*);
+ void emit_op_tear_off_arguments(Instruction*);
+ void emit_op_throw(Instruction*);
+ void emit_op_to_jsnumber(Instruction*);
+ void emit_op_to_primitive(Instruction*);
+ void emit_op_unexpected_load(Instruction*);
- void emitSlow_op_convert_this(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_bitand(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_bitnot(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_bitor(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_bitxor(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_call(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_call_eval(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_call_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_construct(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_construct_verify(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_to_primitive(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_convert_this(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_div(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_eq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_instanceof(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jfalse(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jnless(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jnlesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jtrue(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_loop_if_less(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_loop_if_lesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_loop_if_true(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_not(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_jfalse(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_bitnot(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_jtrue(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_bitxor(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_bitor(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_eq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_lshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_negate(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_neq(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_stricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_not(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_nstricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_instanceof(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_call(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_call_eval(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_call_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_construct(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_post_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_post_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_pre_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_pre_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_resolve_global(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_rshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_stricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_to_jsnumber(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_to_primitive(Instruction*, Vector<SlowCaseEntry>::iterator&);
-#if ENABLE(JIT_OPTIMIZE_ARITHMETIC)
- void compileBinaryArithOp(OpcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi);
- void compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi);
-#endif
-
- void emitGetVirtualRegister(int src, RegisterID dst);
- void emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2);
- void emitPutVirtualRegister(unsigned dst, RegisterID from = regT0);
-
+ /* These functions are deprecated: Please use JITStubCall instead. */
void emitPutJITStubArg(RegisterID src, unsigned argumentNumber);
+#if USE(JSVALUE32_64)
+ void emitPutJITStubArg(RegisterID tag, RegisterID payload, unsigned argumentNumber);
+ void emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch1, RegisterID scratch2);
+#else
void emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch);
+#endif
void emitPutJITStubArgConstant(unsigned value, unsigned argumentNumber);
void emitPutJITStubArgConstant(void* value, unsigned argumentNumber);
void emitGetJITStubArg(unsigned argumentNumber, RegisterID dst);
@@ -606,30 +810,8 @@ namespace JSC {
void emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister);
JSValue getConstantOperand(unsigned src);
- int32_t getConstantOperandImmediateInt(unsigned src);
bool isOperandConstantImmediateInt(unsigned src);
- Jump emitJumpIfJSCell(RegisterID);
- Jump emitJumpIfBothJSCells(RegisterID, RegisterID, RegisterID);
- void emitJumpSlowCaseIfJSCell(RegisterID);
- Jump emitJumpIfNotJSCell(RegisterID);
- void emitJumpSlowCaseIfNotJSCell(RegisterID);
- void emitJumpSlowCaseIfNotJSCell(RegisterID, int VReg);
-#if USE(ALTERNATE_JSIMMEDIATE)
- JIT::Jump emitJumpIfImmediateNumber(RegisterID);
- JIT::Jump emitJumpIfNotImmediateNumber(RegisterID);
-#else
- JIT::Jump emitJumpIfImmediateNumber(RegisterID reg)
- {
- return emitJumpIfImmediateInteger(reg);
- }
-
- JIT::Jump emitJumpIfNotImmediateNumber(RegisterID reg)
- {
- return emitJumpIfNotImmediateInteger(reg);
- }
-#endif
-
Jump getSlowCase(Vector<SlowCaseEntry>::iterator& iter)
{
return iter++->from;
@@ -641,43 +823,22 @@ namespace JSC {
}
void linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&, int vReg);
- JIT::Jump emitJumpIfImmediateInteger(RegisterID);
- JIT::Jump emitJumpIfNotImmediateInteger(RegisterID);
- JIT::Jump emitJumpIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
- void emitJumpSlowCaseIfNotImmediateInteger(RegisterID);
- void emitJumpSlowCaseIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
-
Jump checkStructure(RegisterID reg, Structure* structure);
-#if !USE(ALTERNATE_JSIMMEDIATE)
- void emitFastArithDeTagImmediate(RegisterID);
- Jump emitFastArithDeTagImmediateJumpIfZero(RegisterID);
-#endif
- void emitFastArithReTagImmediate(RegisterID src, RegisterID dest);
- void emitFastArithImmToInt(RegisterID);
- void emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest);
-
- void emitTagAsBoolImmediate(RegisterID reg);
-
void restoreArgumentReference();
void restoreArgumentReferenceForTrampoline();
Call emitNakedCall(CodePtr function = CodePtr());
+
void preserveReturnAddressAfterCall(RegisterID);
void restoreReturnAddressBeforeReturn(RegisterID);
void restoreReturnAddressBeforeReturn(Address);
- void emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst);
- void emitPutVariableObjectRegister(RegisterID src, RegisterID variableObject, int index);
-
void emitTimeoutCheck();
#ifndef NDEBUG
void printBytecodeOperandTypes(unsigned src1, unsigned src2);
#endif
- void killLastResultRegister();
-
-
#if ENABLE(SAMPLING_FLAGS)
void setSamplingFlag(int32_t);
void clearSamplingFlag(int32_t);
@@ -713,15 +874,29 @@ namespace JSC {
Vector<SlowCaseEntry> m_slowCases;
Vector<SwitchRecord> m_switches;
- int m_lastResultBytecodeRegister;
- unsigned m_jumpTargetsPosition;
-
unsigned m_propertyAccessInstructionIndex;
unsigned m_globalResolveInfoIndex;
unsigned m_callLinkInfoIndex;
- } JIT_CLASS_ALIGNMENT;
-}
+#if USE(JSVALUE32_64)
+ unsigned m_jumpTargetIndex;
+ unsigned m_mappedBytecodeIndex;
+ unsigned m_mappedVirtualRegisterIndex;
+ RegisterID m_mappedTag;
+ RegisterID m_mappedPayload;
+#else
+ int m_lastResultBytecodeRegister;
+ unsigned m_jumpTargetsPosition;
+#endif
+
+#ifndef NDEBUG
+#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
+ Label m_uninterruptedInstructionSequenceBegin;
+ int m_uninterruptedConstantSequenceBegin;
+#endif
+#endif
+ } JIT_CLASS_ALIGNMENT;
+} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITArithmetic.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITArithmetic.cpp
index 15808e2..3be13cb 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITArithmetic.cpp
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITArithmetic.cpp
@@ -41,11 +41,1095 @@
#include <stdio.h>
#endif
-
using namespace std;
namespace JSC {
+#if USE(JSVALUE32_64)
+
+void JIT::emit_op_negate(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump srcNotInt = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
+ addSlowCase(branch32(Equal, regT0, Imm32(0)));
+
+ neg32(regT0);
+ emitStoreInt32(dst, regT0, (dst == src));
+
+ Jump end = jump();
+
+ srcNotInt.link(this);
+ addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+
+ xor32(Imm32(1 << 31), regT1);
+ store32(regT1, tagFor(dst));
+ if (dst != src)
+ store32(regT0, payloadFor(dst));
+
+ end.link(this);
+}
+
+void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter); // 0 check
+ linkSlowCase(iter); // double check
+
+ JITStubCall stubCall(this, cti_op_negate);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_jnless(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ // Int32 less.
+ if (isOperandConstantImmediateInt(op1)) {
+ emitLoad(op2, regT3, regT2);
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThanOrEqual, regT2, Imm32(getConstantOperand(op1).asInt32())), target + 3);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target + 3);
+ } else {
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(GreaterThanOrEqual, regT0, regT2), target + 3);
+ }
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double less.
+ emitBinaryDoubleOp(op_jnless, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
+ end.link(this);
+}
+
+void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ if (!supportsFloatingPoint()) {
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter); // double check
+ linkSlowCase(iter); // int32 check
+ }
+ if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // double check
+ }
+
+ JITStubCall stubCall(this, cti_op_jless);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
+}
+
+void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ // Int32 less.
+ if (isOperandConstantImmediateInt(op1)) {
+ emitLoad(op2, regT3, regT2);
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThan, regT2, Imm32(getConstantOperand(op1).asInt32())), target + 3);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(GreaterThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target + 3);
+ } else {
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(GreaterThan, regT0, regT2), target + 3);
+ }
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double less.
+ emitBinaryDoubleOp(op_jnlesseq, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
+ end.link(this);
+}
+
+void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ if (!supportsFloatingPoint()) {
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter); // double check
+ linkSlowCase(iter); // int32 check
+ }
+ if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // double check
+ }
+
+ JITStubCall stubCall(this, cti_op_jlesseq);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
+}
+
+// LeftShift (<<)
+
+void JIT::emit_op_lshift(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
+ emitStoreInt32(dst, regT0, dst == op1);
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ if (!isOperandConstantImmediateInt(op1))
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ lshift32(regT2, regT0);
+ emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
+}
+
+void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_lshift);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// RightShift (>>)
+
+void JIT::emit_op_rshift(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ rshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
+ emitStoreInt32(dst, regT0, dst == op1);
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ if (!isOperandConstantImmediateInt(op1))
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ rshift32(regT2, regT0);
+ emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
+}
+
+void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_rshift);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// BitAnd (&)
+
+void JIT::emit_op_bitand(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ emitLoad(op, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ and32(Imm32(constant), regT0);
+ emitStoreInt32(dst, regT0, (op == dst));
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ and32(regT2, regT0);
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+}
+
+void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_bitand);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// BitOr (|)
+
+void JIT::emit_op_bitor(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ emitLoad(op, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ or32(Imm32(constant), regT0);
+ emitStoreInt32(dst, regT0, (op == dst));
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ or32(regT2, regT0);
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+}
+
+void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_bitor);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// BitXor (^)
+
+void JIT::emit_op_bitxor(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ emitLoad(op, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ xor32(Imm32(constant), regT0);
+ emitStoreInt32(dst, regT0, (op == dst));
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ xor32(regT2, regT0);
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+}
+
+void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_bitxor);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// BitNot (~)
+
+void JIT::emit_op_bitnot(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+
+ not32(regT0);
+ emitStoreInt32(dst, regT0, (dst == src));
+}
+
+void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_bitnot);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(dst);
+}
+
+// PostInc (i++)
+
+void JIT::emit_op_post_inc(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ emitLoad(srcDst, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+
+ if (dst == srcDst) // x = x++ is a noop for ints.
+ return;
+
+ emitStoreInt32(dst, regT0);
+
+ addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
+ emitStoreInt32(srcDst, regT0, true);
+}
+
+void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter); // int32 check
+ if (dst != srcDst)
+ linkSlowCase(iter); // overflow check
+
+ JITStubCall stubCall(this, cti_op_post_inc);
+ stubCall.addArgument(srcDst);
+ stubCall.addArgument(Imm32(srcDst));
+ stubCall.call(dst);
+}
+
+// PostDec (i--)
+
+void JIT::emit_op_post_dec(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ emitLoad(srcDst, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+
+ if (dst == srcDst) // x = x-- is a noop for ints.
+ return;
+
+ emitStoreInt32(dst, regT0);
+
+ addSlowCase(branchSub32(Overflow, Imm32(1), regT0));
+ emitStoreInt32(srcDst, regT0, true);
+}
+
+void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter); // int32 check
+ if (dst != srcDst)
+ linkSlowCase(iter); // overflow check
+
+ JITStubCall stubCall(this, cti_op_post_dec);
+ stubCall.addArgument(srcDst);
+ stubCall.addArgument(Imm32(srcDst));
+ stubCall.call(dst);
+}
+
+// PreInc (++i)
+
+void JIT::emit_op_pre_inc(Instruction* currentInstruction)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ emitLoad(srcDst, regT1, regT0);
+
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
+ emitStoreInt32(srcDst, regT0, true);
+}
+
+void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // overflow check
+
+ JITStubCall stubCall(this, cti_op_pre_inc);
+ stubCall.addArgument(srcDst);
+ stubCall.call(srcDst);
+}
+
+// PreDec (--i)
+
+void JIT::emit_op_pre_dec(Instruction* currentInstruction)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ emitLoad(srcDst, regT1, regT0);
+
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branchSub32(Overflow, Imm32(1), regT0));
+ emitStoreInt32(srcDst, regT0, true);
+}
+
+void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // overflow check
+
+ JITStubCall stubCall(this, cti_op_pre_dec);
+ stubCall.addArgument(srcDst);
+ stubCall.call(srcDst);
+}
+
+// Addition (+)
+
+void JIT::emit_op_add(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ emitAdd32Constant(dst, op, constant, op == op1 ? types.first() : types.second());
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+
+ // Int32 case.
+ addSlowCase(branchAdd32(Overflow, regT2, regT0));
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double case.
+ emitBinaryDoubleOp(op_add, dst, op1, op2, types, notInt32Op1, notInt32Op2);
+ end.link(this);
+}
+
+void JIT::emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
+{
+ // Int32 case.
+ emitLoad(op, regT1, regT0);
+ Jump notInt32 = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
+ addSlowCase(branchAdd32(Overflow, Imm32(constant), regT0));
+ emitStoreInt32(dst, regT0, (op == dst));
+
+ // Double case.
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32);
+ return;
+ }
+ Jump end = jump();
+
+ notInt32.link(this);
+ if (!opType.definitelyIsNumber())
+ addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+ move(Imm32(constant), regT2);
+ convertInt32ToDouble(regT2, fpRegT0);
+ emitLoadDouble(op, fpRegT1);
+ addDouble(fpRegT1, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+
+ end.link(this);
+}
+
+void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ linkSlowCase(iter); // overflow check
+
+ if (!supportsFloatingPoint()) {
+ linkSlowCase(iter); // non-sse case
+ return;
+ }
+
+ ResultType opType = op == op1 ? types.first() : types.second();
+ if (!opType.definitelyIsNumber())
+ linkSlowCase(iter); // double check
+ } else {
+ linkSlowCase(iter); // overflow check
+
+ if (!supportsFloatingPoint()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter); // double check
+
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // double check
+ }
+ }
+ }
+
+ JITStubCall stubCall(this, cti_op_add);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// Subtraction (-)
+
+void JIT::emit_op_sub(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ if (isOperandConstantImmediateInt(op2)) {
+ emitSub32Constant(dst, op1, getConstantOperand(op2).asInt32(), types.first());
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+
+ // Int32 case.
+ addSlowCase(branchSub32(Overflow, regT2, regT0));
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double case.
+ emitBinaryDoubleOp(op_sub, dst, op1, op2, types, notInt32Op1, notInt32Op2);
+ end.link(this);
+}
+
+void JIT::emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
+{
+ // Int32 case.
+ emitLoad(op, regT1, regT0);
+ Jump notInt32 = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
+ addSlowCase(branchSub32(Overflow, Imm32(constant), regT0));
+ emitStoreInt32(dst, regT0, (op == dst));
+
+ // Double case.
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32);
+ return;
+ }
+ Jump end = jump();
+
+ notInt32.link(this);
+ if (!opType.definitelyIsNumber())
+ addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+ move(Imm32(constant), regT2);
+ convertInt32ToDouble(regT2, fpRegT0);
+ emitLoadDouble(op, fpRegT1);
+ subDouble(fpRegT0, fpRegT1);
+ emitStoreDouble(dst, fpRegT1);
+
+ end.link(this);
+}
+
+void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (isOperandConstantImmediateInt(op2)) {
+ linkSlowCase(iter); // overflow check
+
+ if (!supportsFloatingPoint() || !types.first().definitelyIsNumber())
+ linkSlowCase(iter); // int32 or double check
+ } else {
+ linkSlowCase(iter); // overflow check
+
+ if (!supportsFloatingPoint()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter); // double check
+
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // double check
+ }
+ }
+ }
+
+ JITStubCall stubCall(this, cti_op_sub);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
+{
+ JumpList end;
+
+ if (!notInt32Op1.empty()) {
+ // Double case 1: Op1 is not int32; Op2 is unknown.
+ notInt32Op1.link(this);
+
+ ASSERT(op1IsInRegisters);
+
+ // Verify Op1 is double.
+ if (!types.first().definitelyIsNumber())
+ addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+
+ if (!op2IsInRegisters)
+ emitLoad(op2, regT3, regT2);
+
+ Jump doubleOp2 = branch32(Below, regT3, Imm32(JSValue::LowestTag));
+
+ if (!types.second().definitelyIsNumber())
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+
+ convertInt32ToDouble(regT2, fpRegT0);
+ Jump doTheMath = jump();
+
+ // Load Op2 as double into double register.
+ doubleOp2.link(this);
+ emitLoadDouble(op2, fpRegT0);
+
+ // Do the math.
+ doTheMath.link(this);
+ switch (opcodeID) {
+ case op_mul:
+ emitLoadDouble(op1, fpRegT2);
+ mulDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_add:
+ emitLoadDouble(op1, fpRegT2);
+ addDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_sub:
+ emitLoadDouble(op1, fpRegT1);
+ subDouble(fpRegT0, fpRegT1);
+ emitStoreDouble(dst, fpRegT1);
+ break;
+ case op_div:
+ emitLoadDouble(op1, fpRegT1);
+ divDouble(fpRegT0, fpRegT1);
+ emitStoreDouble(dst, fpRegT1);
+ break;
+ case op_jnless:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleLessThanOrEqual, fpRegT0, fpRegT2), dst + 3);
+ break;
+ case op_jnlesseq:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT2), dst + 3);
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+
+ if (!notInt32Op2.empty())
+ end.append(jump());
+ }
+
+ if (!notInt32Op2.empty()) {
+ // Double case 2: Op1 is int32; Op2 is not int32.
+ notInt32Op2.link(this);
+
+ ASSERT(op2IsInRegisters);
+
+ if (!op1IsInRegisters)
+ emitLoadPayload(op1, regT0);
+
+ convertInt32ToDouble(regT0, fpRegT0);
+
+ // Verify op2 is double.
+ if (!types.second().definitelyIsNumber())
+ addSlowCase(branch32(Above, regT3, Imm32(JSValue::LowestTag)));
+
+ // Do the math.
+ switch (opcodeID) {
+ case op_mul:
+ emitLoadDouble(op2, fpRegT2);
+ mulDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_add:
+ emitLoadDouble(op2, fpRegT2);
+ addDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_sub:
+ emitLoadDouble(op2, fpRegT2);
+ subDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_div:
+ emitLoadDouble(op2, fpRegT2);
+ divDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_jnless:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleLessThanOrEqual, fpRegT1, fpRegT0), dst + 3);
+ break;
+ case op_jnlesseq:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleLessThan, fpRegT1, fpRegT0), dst + 3);
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+ }
+
+ end.link(this);
+}
+
+// Multiplication (*)
+
+void JIT::emit_op_mul(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+
+ // Int32 case.
+ move(regT0, regT3);
+ addSlowCase(branchMul32(Overflow, regT2, regT0));
+ addSlowCase(branchTest32(Zero, regT0));
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double case.
+ emitBinaryDoubleOp(op_mul, dst, op1, op2, types, notInt32Op1, notInt32Op2);
+ end.link(this);
+}
+
+void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ Jump overflow = getSlowCase(iter); // overflow check
+ linkSlowCase(iter); // zero result check
+
+ Jump negZero = branchOr32(Signed, regT2, regT3);
+ emitStoreInt32(dst, Imm32(0), (op1 == dst || op2 == dst));
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
+
+ negZero.link(this);
+ overflow.link(this);
+
+ if (!supportsFloatingPoint()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ }
+
+ if (supportsFloatingPoint()) {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter); // double check
+
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // double check
+ }
+ }
+
+ Label jitStubCall(this);
+ JITStubCall stubCall(this, cti_op_mul);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// Division (/)
+
+void JIT::emit_op_div(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(jump());
+ return;
+ }
+
+ // Int32 divide.
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ JumpList end;
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+
+ convertInt32ToDouble(regT0, fpRegT0);
+ convertInt32ToDouble(regT2, fpRegT1);
+ divDouble(fpRegT1, fpRegT0);
+
+ JumpList doubleResult;
+ if (!isOperandConstantImmediateInt(op1) || getConstantOperand(op1).asInt32() > 1) {
+ m_assembler.cvttsd2si_rr(fpRegT0, regT0);
+ convertInt32ToDouble(regT0, fpRegT1);
+ m_assembler.ucomisd_rr(fpRegT1, fpRegT0);
+
+ doubleResult.append(m_assembler.jne());
+ doubleResult.append(m_assembler.jp());
+
+ doubleResult.append(branchTest32(Zero, regT0));
+
+ // Int32 result.
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+ end.append(jump());
+ }
+
+ // Double result.
+ doubleResult.link(this);
+ emitStoreDouble(dst, fpRegT0);
+ end.append(jump());
+
+ // Double divide.
+ emitBinaryDoubleOp(op_div, dst, op1, op2, types, notInt32Op1, notInt32Op2);
+ end.link(this);
+}
+
+void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (!supportsFloatingPoint())
+ linkSlowCase(iter);
+ else {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter); // double check
+
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // double check
+ }
+ }
+
+ JITStubCall stubCall(this, cti_op_div);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// Mod (%)
+
+/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
+
+#if PLATFORM(X86) || PLATFORM(X86_64)
+
+void JIT::emit_op_mod(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) {
+ emitLoad(op1, X86Registers::edx, X86Registers::eax);
+ move(Imm32(getConstantOperand(op2).asInt32()), X86Registers::ecx);
+ addSlowCase(branch32(NotEqual, X86Registers::edx, Imm32(JSValue::Int32Tag)));
+ if (getConstantOperand(op2).asInt32() == -1)
+ addSlowCase(branch32(Equal, X86Registers::eax, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
+ } else {
+ emitLoad2(op1, X86Registers::edx, X86Registers::eax, op2, X86Registers::ebx, X86Registers::ecx);
+ addSlowCase(branch32(NotEqual, X86Registers::edx, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, X86Registers::ebx, Imm32(JSValue::Int32Tag)));
+
+ addSlowCase(branch32(Equal, X86Registers::eax, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
+ addSlowCase(branch32(Equal, X86Registers::ecx, Imm32(0))); // divide by 0
+ }
+
+ move(X86Registers::eax, X86Registers::ebx); // Save dividend payload, in case of 0.
+ m_assembler.cdq();
+ m_assembler.idivl_r(X86Registers::ecx);
+
+ // If the remainder is zero and the dividend is negative, the result is -0.
+ Jump storeResult1 = branchTest32(NonZero, X86Registers::edx);
+ Jump storeResult2 = branchTest32(Zero, X86Registers::ebx, Imm32(0x80000000)); // not negative
+ emitStore(dst, jsNumber(m_globalData, -0.0));
+ Jump end = jump();
+
+ storeResult1.link(this);
+ storeResult2.link(this);
+ emitStoreInt32(dst, X86Registers::edx, (op1 == dst || op2 == dst));
+ end.link(this);
+}
+
+void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) {
+ linkSlowCase(iter); // int32 check
+ if (getConstantOperand(op2).asInt32() == -1)
+ linkSlowCase(iter); // 0x80000000 check
+ } else {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // 0 check
+ linkSlowCase(iter); // 0x80000000 check
+ }
+
+ JITStubCall stubCall(this, cti_op_mod);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+#else // PLATFORM(X86) || PLATFORM(X86_64)
+
+void JIT::emit_op_mod(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_mod);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+}
+
+#endif // PLATFORM(X86) || PLATFORM(X86_64)
+
+/* ------------------------------ END: OP_MOD ------------------------------ */
+
+#else // USE(JSVALUE32_64)
+
void JIT::emit_op_lshift(Instruction* currentInstruction)
{
unsigned result = currentInstruction[1].u.operand;
@@ -64,7 +1148,7 @@ void JIT::emit_op_lshift(Instruction* currentInstruction)
and32(Imm32(0x1f), regT2);
#endif
lshift32(regT2, regT0);
-#if !USE(ALTERNATE_JSIMMEDIATE)
+#if !USE(JSVALUE64)
addSlowCase(branchAdd32(Overflow, regT0, regT0));
signExtend32ToPtr(regT0, regT0);
#endif
@@ -78,7 +1162,7 @@ void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEnt
unsigned op1 = currentInstruction[2].u.operand;
unsigned op2 = currentInstruction[3].u.operand;
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
UNUSED_PARAM(op1);
UNUSED_PARAM(op2);
linkSlowCase(iter);
@@ -92,7 +1176,7 @@ void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEnt
notImm1.link(this);
notImm2.link(this);
#endif
- JITStubCall stubCall(this, JITStubs::cti_op_lshift);
+ JITStubCall stubCall(this, cti_op_lshift);
stubCall.addArgument(regT0);
stubCall.addArgument(regT2);
stubCall.call(result);
@@ -109,7 +1193,7 @@ void JIT::emit_op_rshift(Instruction* currentInstruction)
emitGetVirtualRegister(op1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
// Mask with 0x1f as per ecma-262 11.7.2 step 7.
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
#else
rshiftPtr(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
@@ -118,14 +1202,14 @@ void JIT::emit_op_rshift(Instruction* currentInstruction)
emitGetVirtualRegisters(op1, regT0, op2, regT2);
if (supportsFloatingPointTruncate()) {
Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
- // supportsFloatingPoint() && USE(ALTERNATE_JSIMMEDIATE) => 3 SlowCases
+#if USE(JSVALUE64)
+ // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
addSlowCase(emitJumpIfNotImmediateNumber(regT0));
addPtr(tagTypeNumberRegister, regT0);
movePtrToDouble(regT0, fpRegT0);
addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
#else
- // supportsFloatingPoint() && !USE(ALTERNATE_JSIMMEDIATE) => 5 SlowCases (of which 1 IfNotJSCell)
+ // supportsFloatingPoint() && !USE(JSVALUE64) => 5 SlowCases (of which 1 IfNotJSCell)
emitJumpSlowCaseIfNotJSCell(regT0, op1);
addSlowCase(checkStructure(regT0, m_globalData->numberStructure.get()));
loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
@@ -145,13 +1229,13 @@ void JIT::emit_op_rshift(Instruction* currentInstruction)
// On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
and32(Imm32(0x1f), regT2);
#endif
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
rshift32(regT2, regT0);
#else
rshiftPtr(regT2, regT0);
#endif
}
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
emitFastArithIntToImmNoCheck(regT0, regT0);
#else
orPtr(Imm32(JSImmediate::TagTypeNumber), regT0);
@@ -165,7 +1249,7 @@ void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEnt
unsigned op1 = currentInstruction[2].u.operand;
unsigned op2 = currentInstruction[3].u.operand;
- JITStubCall stubCall(this, JITStubs::cti_op_rshift);
+ JITStubCall stubCall(this, cti_op_rshift);
if (isOperandConstantImmediateInt(op2)) {
linkSlowCase(iter);
@@ -173,7 +1257,7 @@ void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEnt
stubCall.addArgument(op2, regT2);
} else {
if (supportsFloatingPointTruncate()) {
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
@@ -214,7 +1298,7 @@ void JIT::emit_op_jnless(Instruction* currentInstruction)
if (isOperandConstantImmediateInt(op2)) {
emitGetVirtualRegister(op1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
int32_t op2imm = getConstantOperandImmediateInt(op2);
#else
int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
@@ -223,7 +1307,7 @@ void JIT::emit_op_jnless(Instruction* currentInstruction)
} else if (isOperandConstantImmediateInt(op1)) {
emitGetVirtualRegister(op2, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT1);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
int32_t op1imm = getConstantOperandImmediateInt(op1);
#else
int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
@@ -253,7 +1337,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
linkSlowCase(iter);
if (supportsFloatingPoint()) {
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
addPtr(tagTypeNumberRegister, regT0);
movePtrToDouble(regT0, fpRegT0);
@@ -266,7 +1350,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
#endif
- int32_t op2imm = getConstantOperand(op2).getInt32Fast();;
+ int32_t op2imm = getConstantOperand(op2).asInt32();;
move(Imm32(op2imm), regT1);
convertInt32ToDouble(regT1, fpRegT1);
@@ -275,7 +1359,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
fail1.link(this);
#else
if (!m_codeBlock->isKnownNotImmediate(op1))
@@ -284,7 +1368,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
#endif
}
- JITStubCall stubCall(this, JITStubs::cti_op_jless);
+ JITStubCall stubCall(this, cti_op_jless);
stubCall.addArgument(regT0);
stubCall.addArgument(op2, regT2);
stubCall.call();
@@ -294,7 +1378,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
linkSlowCase(iter);
if (supportsFloatingPoint()) {
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
addPtr(tagTypeNumberRegister, regT1);
movePtrToDouble(regT1, fpRegT1);
@@ -307,7 +1391,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
#endif
- int32_t op1imm = getConstantOperand(op1).getInt32Fast();;
+ int32_t op1imm = getConstantOperand(op1).asInt32();;
move(Imm32(op1imm), regT0);
convertInt32ToDouble(regT0, fpRegT0);
@@ -316,7 +1400,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
fail1.link(this);
#else
if (!m_codeBlock->isKnownNotImmediate(op2))
@@ -325,7 +1409,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
#endif
}
- JITStubCall stubCall(this, JITStubs::cti_op_jless);
+ JITStubCall stubCall(this, cti_op_jless);
stubCall.addArgument(op1, regT2);
stubCall.addArgument(regT1);
stubCall.call();
@@ -335,7 +1419,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
linkSlowCase(iter);
if (supportsFloatingPoint()) {
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
Jump fail3 = emitJumpIfImmediateInteger(regT1);
@@ -362,7 +1446,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
fail1.link(this);
fail2.link(this);
fail3.link(this);
@@ -377,7 +1461,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
}
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_jless);
+ JITStubCall stubCall(this, cti_op_jless);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.call();
@@ -399,7 +1483,7 @@ void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
if (isOperandConstantImmediateInt(op2)) {
emitGetVirtualRegister(op1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
int32_t op2imm = getConstantOperandImmediateInt(op2);
#else
int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
@@ -408,7 +1492,7 @@ void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
} else if (isOperandConstantImmediateInt(op1)) {
emitGetVirtualRegister(op2, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT1);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
int32_t op1imm = getConstantOperandImmediateInt(op1);
#else
int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
@@ -438,7 +1522,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
linkSlowCase(iter);
if (supportsFloatingPoint()) {
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
addPtr(tagTypeNumberRegister, regT0);
movePtrToDouble(regT0, fpRegT0);
@@ -451,7 +1535,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
#endif
- int32_t op2imm = getConstantOperand(op2).getInt32Fast();;
+ int32_t op2imm = getConstantOperand(op2).asInt32();;
move(Imm32(op2imm), regT1);
convertInt32ToDouble(regT1, fpRegT1);
@@ -460,7 +1544,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
fail1.link(this);
#else
if (!m_codeBlock->isKnownNotImmediate(op1))
@@ -469,7 +1553,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
#endif
}
- JITStubCall stubCall(this, JITStubs::cti_op_jlesseq);
+ JITStubCall stubCall(this, cti_op_jlesseq);
stubCall.addArgument(regT0);
stubCall.addArgument(op2, regT2);
stubCall.call();
@@ -479,7 +1563,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
linkSlowCase(iter);
if (supportsFloatingPoint()) {
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
addPtr(tagTypeNumberRegister, regT1);
movePtrToDouble(regT1, fpRegT1);
@@ -492,7 +1576,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
#endif
- int32_t op1imm = getConstantOperand(op1).getInt32Fast();;
+ int32_t op1imm = getConstantOperand(op1).asInt32();;
move(Imm32(op1imm), regT0);
convertInt32ToDouble(regT0, fpRegT0);
@@ -501,7 +1585,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
fail1.link(this);
#else
if (!m_codeBlock->isKnownNotImmediate(op2))
@@ -510,7 +1594,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
#endif
}
- JITStubCall stubCall(this, JITStubs::cti_op_jlesseq);
+ JITStubCall stubCall(this, cti_op_jlesseq);
stubCall.addArgument(op1, regT2);
stubCall.addArgument(regT1);
stubCall.call();
@@ -520,7 +1604,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
linkSlowCase(iter);
if (supportsFloatingPoint()) {
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
Jump fail3 = emitJumpIfImmediateInteger(regT1);
@@ -547,7 +1631,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
fail1.link(this);
fail2.link(this);
fail3.link(this);
@@ -562,7 +1646,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
}
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_jlesseq);
+ JITStubCall stubCall(this, cti_op_jlesseq);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.call();
@@ -579,7 +1663,7 @@ void JIT::emit_op_bitand(Instruction* currentInstruction)
if (isOperandConstantImmediateInt(op1)) {
emitGetVirtualRegister(op2, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
int32_t imm = getConstantOperandImmediateInt(op1);
andPtr(Imm32(imm), regT0);
if (imm >= 0)
@@ -590,7 +1674,7 @@ void JIT::emit_op_bitand(Instruction* currentInstruction)
} else if (isOperandConstantImmediateInt(op2)) {
emitGetVirtualRegister(op1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
int32_t imm = getConstantOperandImmediateInt(op2);
andPtr(Imm32(imm), regT0);
if (imm >= 0)
@@ -614,17 +1698,17 @@ void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEnt
linkSlowCase(iter);
if (isOperandConstantImmediateInt(op1)) {
- JITStubCall stubCall(this, JITStubs::cti_op_bitand);
+ JITStubCall stubCall(this, cti_op_bitand);
stubCall.addArgument(op1, regT2);
stubCall.addArgument(regT0);
stubCall.call(result);
} else if (isOperandConstantImmediateInt(op2)) {
- JITStubCall stubCall(this, JITStubs::cti_op_bitand);
+ JITStubCall stubCall(this, cti_op_bitand);
stubCall.addArgument(regT0);
stubCall.addArgument(op2, regT2);
stubCall.call(result);
} else {
- JITStubCall stubCall(this, JITStubs::cti_op_bitand);
+ JITStubCall stubCall(this, cti_op_bitand);
stubCall.addArgument(op1, regT2);
stubCall.addArgument(regT1);
stubCall.call(result);
@@ -639,7 +1723,7 @@ void JIT::emit_op_post_inc(Instruction* currentInstruction)
emitGetVirtualRegister(srcDst, regT0);
move(regT0, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
addSlowCase(branchAdd32(Overflow, Imm32(1), regT1));
emitFastArithIntToImmNoCheck(regT1, regT1);
#else
@@ -657,7 +1741,7 @@ void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseE
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_post_inc);
+ JITStubCall stubCall(this, cti_op_post_inc);
stubCall.addArgument(regT0);
stubCall.addArgument(Imm32(srcDst));
stubCall.call(result);
@@ -671,7 +1755,7 @@ void JIT::emit_op_post_dec(Instruction* currentInstruction)
emitGetVirtualRegister(srcDst, regT0);
move(regT0, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
addSlowCase(branchSub32(Zero, Imm32(1), regT1));
emitFastArithIntToImmNoCheck(regT1, regT1);
#else
@@ -689,7 +1773,7 @@ void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseE
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_post_dec);
+ JITStubCall stubCall(this, cti_op_post_dec);
stubCall.addArgument(regT0);
stubCall.addArgument(Imm32(srcDst));
stubCall.call(result);
@@ -701,7 +1785,7 @@ void JIT::emit_op_pre_inc(Instruction* currentInstruction)
emitGetVirtualRegister(srcDst, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
emitFastArithIntToImmNoCheck(regT0, regT0);
#else
@@ -719,7 +1803,7 @@ void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEn
linkSlowCase(iter);
emitGetVirtualRegister(srcDst, regT0);
notImm.link(this);
- JITStubCall stubCall(this, JITStubs::cti_op_pre_inc);
+ JITStubCall stubCall(this, cti_op_pre_inc);
stubCall.addArgument(regT0);
stubCall.call(srcDst);
}
@@ -730,7 +1814,7 @@ void JIT::emit_op_pre_dec(Instruction* currentInstruction)
emitGetVirtualRegister(srcDst, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
addSlowCase(branchSub32(Zero, Imm32(1), regT0));
emitFastArithIntToImmNoCheck(regT0, regT0);
#else
@@ -748,7 +1832,7 @@ void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEn
linkSlowCase(iter);
emitGetVirtualRegister(srcDst, regT0);
notImm.link(this);
- JITStubCall stubCall(this, JITStubs::cti_op_pre_dec);
+ JITStubCall stubCall(this, cti_op_pre_dec);
stubCall.addArgument(regT0);
stubCall.call(srcDst);
}
@@ -763,21 +1847,21 @@ void JIT::emit_op_mod(Instruction* currentInstruction)
unsigned op1 = currentInstruction[2].u.operand;
unsigned op2 = currentInstruction[3].u.operand;
- emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
- emitJumpSlowCaseIfNotImmediateInteger(X86::ecx);
-#if USE(ALTERNATE_JSIMMEDIATE)
- addSlowCase(branchPtr(Equal, X86::ecx, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))));
+ emitGetVirtualRegisters(op1, X86Registers::eax, op2, X86Registers::ecx);
+ emitJumpSlowCaseIfNotImmediateInteger(X86Registers::eax);
+ emitJumpSlowCaseIfNotImmediateInteger(X86Registers::ecx);
+#if USE(JSVALUE64)
+ addSlowCase(branchPtr(Equal, X86Registers::ecx, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))));
m_assembler.cdq();
- m_assembler.idivl_r(X86::ecx);
+ m_assembler.idivl_r(X86Registers::ecx);
#else
- emitFastArithDeTagImmediate(X86::eax);
- addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86::ecx));
+ emitFastArithDeTagImmediate(X86Registers::eax);
+ addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86Registers::ecx));
m_assembler.cdq();
- m_assembler.idivl_r(X86::ecx);
- signExtend32ToPtr(X86::edx, X86::edx);
+ m_assembler.idivl_r(X86Registers::ecx);
+ signExtend32ToPtr(X86Registers::edx, X86Registers::edx);
#endif
- emitFastArithReTagImmediate(X86::edx, X86::eax);
+ emitFastArithReTagImmediate(X86Registers::edx, X86Registers::eax);
emitPutVirtualRegister(result);
}
@@ -785,7 +1869,7 @@ void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>
{
unsigned result = currentInstruction[1].u.operand;
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
@@ -793,14 +1877,14 @@ void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>
Jump notImm1 = getSlowCase(iter);
Jump notImm2 = getSlowCase(iter);
linkSlowCase(iter);
- emitFastArithReTagImmediate(X86::eax, X86::eax);
- emitFastArithReTagImmediate(X86::ecx, X86::ecx);
+ emitFastArithReTagImmediate(X86Registers::eax, X86Registers::eax);
+ emitFastArithReTagImmediate(X86Registers::ecx, X86Registers::ecx);
notImm1.link(this);
notImm2.link(this);
#endif
- JITStubCall stubCall(this, JITStubs::cti_op_mod);
- stubCall.addArgument(X86::eax);
- stubCall.addArgument(X86::ecx);
+ JITStubCall stubCall(this, cti_op_mod);
+ stubCall.addArgument(X86Registers::eax);
+ stubCall.addArgument(X86Registers::ecx);
stubCall.call(result);
}
@@ -812,7 +1896,7 @@ void JIT::emit_op_mod(Instruction* currentInstruction)
unsigned op1 = currentInstruction[2].u.operand;
unsigned op2 = currentInstruction[3].u.operand;
- JITStubCall stubCall(this, JITStubs::cti_op_mod);
+ JITStubCall stubCall(this, cti_op_mod);
stubCall.addArgument(op1, regT2);
stubCall.addArgument(op2, regT2);
stubCall.call(result);
@@ -827,64 +1911,9 @@ void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&)
/* ------------------------------ END: OP_MOD ------------------------------ */
-#if !ENABLE(JIT_OPTIMIZE_ARITHMETIC)
-
-/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_ARITHMETIC) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
-
-void JIT::emit_op_add(Instruction* currentInstruction)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, JITStubs::cti_op_add);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(op2, regT2);
- stubCall.call(result);
-}
-
-void JIT::emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- ASSERT_NOT_REACHED();
-}
-
-void JIT::emit_op_mul(Instruction* currentInstruction)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, JITStubs::cti_op_mul);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(op2, regT2);
- stubCall.call(result);
-}
-
-void JIT::emitSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- ASSERT_NOT_REACHED();
-}
-
-void JIT::emit_op_sub(Instruction* currentInstruction)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
+#if USE(JSVALUE64)
- JITStubCall stubCall(this, JITStubs::cti_op_sub);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(op2, regT2);
- stubCall.call(result);
-}
-
-void JIT::emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- ASSERT_NOT_REACHED();
-}
-
-#elif USE(ALTERNATE_JSIMMEDIATE) // *AND* ENABLE(JIT_OPTIMIZE_ARITHMETIC)
-
-/* ------------------------------ BEGIN: USE(ALTERNATE_JSIMMEDIATE) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
+/* ------------------------------ BEGIN: USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsigned op2, OperandTypes)
{
@@ -917,7 +1946,7 @@ void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>:
emitGetVirtualRegister(op1, regT0);
Label stubFunctionCall(this);
- JITStubCall stubCall(this, opcodeID == op_add ? JITStubs::cti_op_add : opcodeID == op_sub ? JITStubs::cti_op_sub : JITStubs::cti_op_mul);
+ JITStubCall stubCall(this, opcodeID == op_add ? cti_op_add : opcodeID == op_sub ? cti_op_sub : cti_op_mul);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.call(result);
@@ -968,7 +1997,7 @@ void JIT::emit_op_add(Instruction* currentInstruction)
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
- JITStubCall stubCall(this, JITStubs::cti_op_add);
+ JITStubCall stubCall(this, cti_op_add);
stubCall.addArgument(op1, regT2);
stubCall.addArgument(op2, regT2);
stubCall.call(result);
@@ -1000,7 +2029,7 @@ void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>
if (isOperandConstantImmediateInt(op1) || isOperandConstantImmediateInt(op2)) {
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_add);
+ JITStubCall stubCall(this, cti_op_add);
stubCall.addArgument(op1, regT2);
stubCall.addArgument(op2, regT2);
stubCall.call(result);
@@ -1045,7 +2074,7 @@ void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>
linkSlowCase(iter);
linkSlowCase(iter);
// There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
- JITStubCall stubCall(this, JITStubs::cti_op_mul);
+ JITStubCall stubCall(this, cti_op_mul);
stubCall.addArgument(op1, regT2);
stubCall.addArgument(op2, regT2);
stubCall.call(result);
@@ -1075,9 +2104,9 @@ void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>
compileBinaryArithOpSlowCase(op_sub, iter, result, op1, op2, types);
}
-#else // !ENABLE(JIT_OPTIMIZE_ARITHMETIC)
+#else // USE(JSVALUE64)
-/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_ARITHMETIC) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
+/* ------------------------------ BEGIN: !USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
{
@@ -1244,7 +2273,7 @@ void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>:
if (opcodeID == op_mul)
linkSlowCase(iter);
- JITStubCall stubCall(this, opcodeID == op_add ? JITStubs::cti_op_add : opcodeID == op_sub ? JITStubs::cti_op_sub : JITStubs::cti_op_mul);
+ JITStubCall stubCall(this, opcodeID == op_add ? cti_op_add : opcodeID == op_sub ? cti_op_sub : cti_op_mul);
stubCall.addArgument(src1, regT2);
stubCall.addArgument(src2, regT2);
stubCall.call(dst);
@@ -1273,7 +2302,7 @@ void JIT::emit_op_add(Instruction* currentInstruction)
if (types.first().mightBeNumber() && types.second().mightBeNumber())
compileBinaryArithOp(op_add, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
else {
- JITStubCall stubCall(this, JITStubs::cti_op_add);
+ JITStubCall stubCall(this, cti_op_add);
stubCall.addArgument(op1, regT2);
stubCall.addArgument(op2, regT2);
stubCall.call(result);
@@ -1292,7 +2321,7 @@ void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>
linkSlowCase(iter);
sub32(Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), regT0);
notImm.link(this);
- JITStubCall stubCall(this, JITStubs::cti_op_add);
+ JITStubCall stubCall(this, cti_op_add);
stubCall.addArgument(op1, regT2);
stubCall.addArgument(regT0);
stubCall.call(result);
@@ -1301,7 +2330,7 @@ void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>
linkSlowCase(iter);
sub32(Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), regT0);
notImm.link(this);
- JITStubCall stubCall(this, JITStubs::cti_op_add);
+ JITStubCall stubCall(this, cti_op_add);
stubCall.addArgument(regT0);
stubCall.addArgument(op2, regT2);
stubCall.call(result);
@@ -1351,7 +2380,7 @@ void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>
linkSlowCase(iter);
linkSlowCase(iter);
// There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
- JITStubCall stubCall(this, JITStubs::cti_op_mul);
+ JITStubCall stubCall(this, cti_op_mul);
stubCall.addArgument(op1, regT2);
stubCall.addArgument(op2, regT2);
stubCall.call(result);
@@ -1369,10 +2398,12 @@ void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>
compileBinaryArithOpSlowCase(op_sub, iter, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
}
-#endif // !ENABLE(JIT_OPTIMIZE_ARITHMETIC)
+#endif // USE(JSVALUE64)
/* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */
+#endif // USE(JSVALUE32_64)
+
} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITCall.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITCall.cpp
index abdb5ed..5bcde42 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITCall.cpp
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITCall.cpp
@@ -45,14 +45,407 @@ using namespace std;
namespace JSC {
+#if USE(JSVALUE32_64)
+
+void JIT::compileOpCallInitializeCallFrame()
+{
+ // regT0 holds callee, regT1 holds argCount
+ store32(regT1, Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))));
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // scopeChain
+
+ emitStore(static_cast<unsigned>(RegisterFile::OptionalCalleeArguments), JSValue());
+ storePtr(regT0, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register)))); // callee
+ storePtr(regT1, Address(callFrameRegister, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register)))); // scopeChain
+}
+
+void JIT::compileOpCallSetupArgs(Instruction* instruction)
+{
+ int argCount = instruction[3].u.operand;
+ int registerOffset = instruction[4].u.operand;
+
+ emitPutJITStubArg(regT1, regT0, 0);
+ emitPutJITStubArgConstant(registerOffset, 1);
+ emitPutJITStubArgConstant(argCount, 2);
+}
+
+void JIT::compileOpConstructSetupArgs(Instruction* instruction)
+{
+ int argCount = instruction[3].u.operand;
+ int registerOffset = instruction[4].u.operand;
+ int proto = instruction[5].u.operand;
+ int thisRegister = instruction[6].u.operand;
+
+ emitPutJITStubArg(regT1, regT0, 0);
+ emitPutJITStubArgConstant(registerOffset, 1);
+ emitPutJITStubArgConstant(argCount, 2);
+ emitPutJITStubArgFromVirtualRegister(proto, 3, regT2, regT3);
+ emitPutJITStubArgConstant(thisRegister, 4);
+}
+
+void JIT::compileOpCallVarargsSetupArgs(Instruction*)
+{
+ emitPutJITStubArg(regT1, regT0, 0);
+ emitPutJITStubArg(regT3, 1); // registerOffset
+ emitPutJITStubArg(regT2, 2); // argCount
+}
+
+void JIT::compileOpCallVarargs(Instruction* instruction)
+{
+ int dst = instruction[1].u.operand;
+ int callee = instruction[2].u.operand;
+ int argCountRegister = instruction[3].u.operand;
+ int registerOffset = instruction[4].u.operand;
+
+ emitLoad(callee, regT1, regT0);
+ emitLoadPayload(argCountRegister, regT2); // argCount
+ addPtr(Imm32(registerOffset), regT2, regT3); // registerOffset
+
+ compileOpCallVarargsSetupArgs(instruction);
+
+ emitJumpSlowCaseIfNotJSCell(callee, regT1);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr)));
+
+ // Speculatively roll the callframe, assuming argCount will match the arity.
+ mul32(Imm32(sizeof(Register)), regT3, regT3);
+ addPtr(callFrameRegister, regT3);
+ storePtr(callFrameRegister, Address(regT3, RegisterFile::CallerFrame * static_cast<int>(sizeof(Register))));
+ move(regT3, callFrameRegister);
+
+ move(regT2, regT1); // argCount
+
+ emitNakedCall(m_globalData->jitStubs.ctiVirtualCall());
+
+ emitStore(dst, regT1, regT0);
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = instruction[1].u.operand;
+ int callee = instruction[2].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, callee);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_call_NotJSFunction);
+ stubCall.call(dst); // In the interpreter, the callee puts the return value in dst.
+
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_call_varargs), dst, regT1, regT0);
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::emit_op_ret(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+#ifdef QT_BUILD_SCRIPT_LIB
+ JITStubCall stubCall(this, cti_op_debug_return);
+ stubCall.addArgument(Imm32(dst));
+ stubCall.call();
+#endif
+
+ // We could JIT generate the deref, only calling out to C when the refcount hits zero.
+ if (m_codeBlock->needsFullScopeChain())
+ JITStubCall(this, cti_op_ret_scopeChain).call();
+
+ emitLoad(dst, regT1, regT0);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ restoreReturnAddressBeforeReturn(regT2);
+ ret();
+}
+
+void JIT::emit_op_construct_verify(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ emitLoad(dst, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ addSlowCase(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType)));
+}
+
+void JIT::emitSlow_op_construct_verify(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ emitLoad(src, regT1, regT0);
+ emitStore(dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call);
+}
+
+void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call_eval);
+}
+
+void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallVarargsSlowCase(currentInstruction, iter);
+}
+
+void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_construct);
+}
+
+void JIT::emit_op_call(Instruction* currentInstruction)
+{
+ compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_call_eval(Instruction* currentInstruction)
+{
+ compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_load_varargs(Instruction* currentInstruction)
+{
+ int argCountDst = currentInstruction[1].u.operand;
+ int argsOffset = currentInstruction[2].u.operand;
+
+ JITStubCall stubCall(this, cti_op_load_varargs);
+ stubCall.addArgument(Imm32(argsOffset));
+ stubCall.call();
+ // Stores a naked int32 in the register file.
+ store32(returnValueRegister, Address(callFrameRegister, argCountDst * sizeof(Register)));
+}
+
+void JIT::emit_op_call_varargs(Instruction* currentInstruction)
+{
+ compileOpCallVarargs(currentInstruction);
+}
+
+void JIT::emit_op_construct(Instruction* currentInstruction)
+{
+ compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
+}
+
+#if !ENABLE(JIT_OPTIMIZE_CALL)
+
+/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
+
+void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
+{
+ int dst = instruction[1].u.operand;
+ int callee = instruction[2].u.operand;
+ int argCount = instruction[3].u.operand;
+ int registerOffset = instruction[4].u.operand;
+
+ Jump wasEval1;
+ Jump wasEval2;
+ if (opcodeID == op_call_eval) {
+ JITStubCall stubCall(this, cti_op_call_eval);
+ stubCall.addArgument(callee);
+ stubCall.addArgument(JIT::Imm32(registerOffset));
+ stubCall.addArgument(JIT::Imm32(argCount));
+ stubCall.call();
+ wasEval1 = branchTest32(NonZero, regT0);
+ wasEval2 = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+ }
+
+ emitLoad(callee, regT1, regT2);
+
+ if (opcodeID == op_call)
+ compileOpCallSetupArgs(instruction);
+ else if (opcodeID == op_construct)
+ compileOpConstructSetupArgs(instruction);
+
+ emitJumpSlowCaseIfNotJSCell(callee, regT1);
+ addSlowCase(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsFunctionVPtr)));
+
+ // First, in the case of a construct, allocate the new object.
+ if (opcodeID == op_construct) {
+ JITStubCall(this, cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
+ emitLoad(callee, regT1, regT2);
+ }
+
+ // Speculatively roll the callframe, assuming argCount will match the arity.
+ storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
+ addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
+ move(Imm32(argCount), regT1);
+
+ emitNakedCall(m_globalData->jitStubs.ctiVirtualCall());
+
+ if (opcodeID == op_call_eval) {
+ wasEval1.link(this);
+ wasEval2.link(this);
+ }
+
+ emitStore(dst, regT1, regT0);;
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned, OpcodeID opcodeID)
+{
+ int dst = instruction[1].u.operand;
+ int callee = instruction[2].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, callee);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction);
+ stubCall.call(dst); // In the interpreter, the callee puts the return value in dst.
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+#else // !ENABLE(JIT_OPTIMIZE_CALL)
+
+/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
+
+void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
+{
+ int dst = instruction[1].u.operand;
+ int callee = instruction[2].u.operand;
+ int argCount = instruction[3].u.operand;
+ int registerOffset = instruction[4].u.operand;
+
+ Jump wasEval1;
+ Jump wasEval2;
+ if (opcodeID == op_call_eval) {
+ JITStubCall stubCall(this, cti_op_call_eval);
+ stubCall.addArgument(callee);
+ stubCall.addArgument(JIT::Imm32(registerOffset));
+ stubCall.addArgument(JIT::Imm32(argCount));
+ stubCall.call();
+ wasEval1 = branchTest32(NonZero, regT0);
+ wasEval2 = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+ }
+
+ emitLoad(callee, regT1, regT0);
+
+ DataLabelPtr addressOfLinkedFunctionCheck;
+ Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, ImmPtr(0));
+ addSlowCase(jumpToSlow);
+ ASSERT(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow) == patchOffsetOpCallCompareToJump);
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
+
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+
+ // The following is the fast case, only used whan a callee can be linked.
+
+ // In the case of OpConstruct, call out to a cti_ function to create the new object.
+ if (opcodeID == op_construct) {
+ int proto = instruction[5].u.operand;
+ int thisRegister = instruction[6].u.operand;
+
+ JITStubCall stubCall(this, cti_op_construct_JSConstruct);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.addArgument(Imm32(0)); // FIXME: Remove this unused JITStub argument.
+ stubCall.addArgument(Imm32(0)); // FIXME: Remove this unused JITStub argument.
+ stubCall.addArgument(proto);
+ stubCall.call(thisRegister);
+
+ emitLoad(callee, regT1, regT0);
+ }
+
+ // Fast version of stack frame initialization, directly relative to edi.
+ // Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee
+ emitStore(registerOffset + RegisterFile::OptionalCalleeArguments, JSValue());
+ emitStore(registerOffset + RegisterFile::Callee, regT1, regT0);
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain
+ store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register))));
+ storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register))));
+ storePtr(regT1, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register))));
+ addPtr(Imm32(registerOffset * sizeof(Register)), callFrameRegister);
+
+ // Call to the callee
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
+
+ if (opcodeID == op_call_eval) {
+ wasEval1.link(this);
+ wasEval2.link(this);
+ }
+
+ // Put the return value in dst. In the interpreter, op_ret does this.
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + opcodeLengths[opcodeID], dst, regT1, regT0);
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID)
+{
+ int dst = instruction[1].u.operand;
+ int callee = instruction[2].u.operand;
+ int argCount = instruction[3].u.operand;
+ int registerOffset = instruction[4].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ // The arguments have been set up on the hot path for op_call_eval
+ if (opcodeID == op_call)
+ compileOpCallSetupArgs(instruction);
+ else if (opcodeID == op_construct)
+ compileOpConstructSetupArgs(instruction);
+
+ // Fast check for JS function.
+ Jump callLinkFailNotObject = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+ Jump callLinkFailNotJSFunction = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr));
+
+ // First, in the case of a construct, allocate the new object.
+ if (opcodeID == op_construct) {
+ JITStubCall(this, cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
+ emitLoad(callee, regT1, regT0);
+ }
+
+ // Speculatively roll the callframe, assuming argCount will match the arity.
+ storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
+ addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
+ move(Imm32(argCount), regT1);
+
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_globalData->jitStubs.ctiVirtualCallLink());
+
+ // Put the return value in dst.
+ emitStore(dst, regT1, regT0);;
+ sampleCodeBlock(m_codeBlock);
+
+ // If not, we need an extra case in the if below!
+ ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
+
+ // Done! - return back to the hot path.
+ if (opcodeID == op_construct)
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_construct));
+ else
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_call));
+
+ // This handles host functions
+ callLinkFailNotObject.link(this);
+ callLinkFailNotJSFunction.link(this);
+ JITStubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction).call();
+
+ emitStore(dst, regT1, regT0);;
+ sampleCodeBlock(m_codeBlock);
+}
+
+/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
+
+#endif // !ENABLE(JIT_OPTIMIZE_CALL)
+
+#else // USE(JSVALUE32_64)
+
void JIT::compileOpCallInitializeCallFrame()
{
store32(regT1, Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))));
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain
storePtr(ImmPtr(JSValue::encode(JSValue())), Address(callFrameRegister, RegisterFile::OptionalCalleeArguments * static_cast<int>(sizeof(Register))));
- storePtr(regT2, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register))));
+ storePtr(regT0, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register))));
storePtr(regT1, Address(callFrameRegister, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register))));
}
@@ -62,9 +455,9 @@ void JIT::compileOpCallSetupArgs(Instruction* instruction)
int registerOffset = instruction[4].u.operand;
// ecx holds func
- emitPutJITStubArg(regT2, 1);
- emitPutJITStubArgConstant(argCount, 3);
- emitPutJITStubArgConstant(registerOffset, 2);
+ emitPutJITStubArg(regT0, 0);
+ emitPutJITStubArgConstant(argCount, 2);
+ emitPutJITStubArgConstant(registerOffset, 1);
}
void JIT::compileOpCallVarargsSetupArgs(Instruction* instruction)
@@ -72,10 +465,10 @@ void JIT::compileOpCallVarargsSetupArgs(Instruction* instruction)
int registerOffset = instruction[4].u.operand;
// ecx holds func
+ emitPutJITStubArg(regT0, 0);
+ emitPutJITStubArg(regT1, 2);
+ addPtr(Imm32(registerOffset), regT1, regT2);
emitPutJITStubArg(regT2, 1);
- emitPutJITStubArg(regT1, 3);
- addPtr(Imm32(registerOffset), regT1, regT0);
- emitPutJITStubArg(regT0, 2);
}
void JIT::compileOpConstructSetupArgs(Instruction* instruction)
@@ -86,11 +479,11 @@ void JIT::compileOpConstructSetupArgs(Instruction* instruction)
int thisRegister = instruction[6].u.operand;
// ecx holds func
- emitPutJITStubArg(regT2, 1);
- emitPutJITStubArgConstant(registerOffset, 2);
- emitPutJITStubArgConstant(argCount, 3);
- emitPutJITStubArgFromVirtualRegister(proto, 4, regT0);
- emitPutJITStubArgConstant(thisRegister, 5);
+ emitPutJITStubArg(regT0, 0);
+ emitPutJITStubArgConstant(registerOffset, 1);
+ emitPutJITStubArgConstant(argCount, 2);
+ emitPutJITStubArgFromVirtualRegister(proto, 3, regT2);
+ emitPutJITStubArgConstant(thisRegister, 4);
}
void JIT::compileOpCallVarargs(Instruction* instruction)
@@ -100,20 +493,20 @@ void JIT::compileOpCallVarargs(Instruction* instruction)
int argCountRegister = instruction[3].u.operand;
emitGetVirtualRegister(argCountRegister, regT1);
- emitGetVirtualRegister(callee, regT2);
+ emitGetVirtualRegister(callee, regT0);
compileOpCallVarargsSetupArgs(instruction);
// Check for JSFunctions.
- emitJumpSlowCaseIfNotJSCell(regT2);
- addSlowCase(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsFunctionVPtr)));
-
+ emitJumpSlowCaseIfNotJSCell(regT0);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr)));
+
// Speculatively roll the callframe, assuming argCount will match the arity.
- mul32(Imm32(sizeof(Register)), regT0, regT0);
+ mul32(Imm32(sizeof(Register)), regT2, regT2);
intptr_t offset = (intptr_t)sizeof(Register) * (intptr_t)RegisterFile::CallerFrame;
- addPtr(Imm32((int32_t)offset), regT0, regT3);
+ addPtr(Imm32((int32_t)offset), regT2, regT3);
addPtr(callFrameRegister, regT3);
storePtr(callFrameRegister, regT3);
- addPtr(regT0, callFrameRegister);
+ addPtr(regT2, callFrameRegister);
emitNakedCall(m_globalData->jitStubs.ctiVirtualCall());
// Put the return value in dst. In the interpreter, op_ret does this.
@@ -128,7 +521,7 @@ void JIT::compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCase
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_call_NotJSFunction);
+ JITStubCall stubCall(this, cti_op_call_NotJSFunction);
stubCall.call(dst); // In the interpreter, the callee puts the return value in dst.
sampleCodeBlock(m_codeBlock);
@@ -148,11 +541,15 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
// Handle eval
Jump wasEval;
if (opcodeID == op_call_eval) {
- CallEvalJITStub(this, instruction).call();
+ JITStubCall stubCall(this, cti_op_call_eval);
+ stubCall.addArgument(callee, regT0);
+ stubCall.addArgument(JIT::Imm32(registerOffset));
+ stubCall.addArgument(JIT::Imm32(argCount));
+ stubCall.call();
wasEval = branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(JSValue())));
}
- emitGetVirtualRegister(callee, regT2);
+ emitGetVirtualRegister(callee, regT0);
// The arguments have been set up on the hot path for op_call_eval
if (opcodeID == op_call)
compileOpCallSetupArgs(instruction);
@@ -160,13 +557,13 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
compileOpConstructSetupArgs(instruction);
// Check for JSFunctions.
- emitJumpSlowCaseIfNotJSCell(regT2);
- addSlowCase(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsFunctionVPtr)));
+ emitJumpSlowCaseIfNotJSCell(regT0);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr)));
// First, in the case of a construct, allocate the new object.
if (opcodeID == op_construct) {
- JITStubCall(this, JITStubs::cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
- emitGetVirtualRegister(callee, regT2);
+ JITStubCall(this, cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
+ emitGetVirtualRegister(callee, regT0);
}
// Speculatively roll the callframe, assuming argCount will match the arity.
@@ -191,7 +588,7 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, opcodeID == op_construct ? JITStubs::cti_op_construct_NotJSConstruct : JITStubs::cti_op_call_NotJSFunction);
+ JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction);
stubCall.call(dst); // In the interpreter, the callee puts the return value in dst.
sampleCodeBlock(m_codeBlock);
@@ -211,15 +608,25 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
// Handle eval
Jump wasEval;
if (opcodeID == op_call_eval) {
- CallEvalJITStub(this, instruction).call();
+ JITStubCall stubCall(this, cti_op_call_eval);
+ stubCall.addArgument(callee, regT0);
+ stubCall.addArgument(JIT::Imm32(registerOffset));
+ stubCall.addArgument(JIT::Imm32(argCount));
+ stubCall.call();
wasEval = branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(JSValue())));
}
// This plants a check for a cached JSFunction value, so we can plant a fast link to the callee.
// This deliberately leaves the callee in ecx, used when setting up the stack frame below
- emitGetVirtualRegister(callee, regT2);
+ emitGetVirtualRegister(callee, regT0);
DataLabelPtr addressOfLinkedFunctionCheck;
- Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT2, addressOfLinkedFunctionCheck, ImmPtr(JSValue::encode(JSValue())));
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
+
+ Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, ImmPtr(JSValue::encode(JSValue())));
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
+
addSlowCase(jumpToSlow);
ASSERT(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow) == patchOffsetOpCallCompareToJump);
m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
@@ -231,18 +638,18 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
int proto = instruction[5].u.operand;
int thisRegister = instruction[6].u.operand;
- emitPutJITStubArg(regT2, 1);
- emitPutJITStubArgFromVirtualRegister(proto, 4, regT0);
- JITStubCall stubCall(this, JITStubs::cti_op_construct_JSConstruct);
+ emitPutJITStubArg(regT0, 0);
+ emitPutJITStubArgFromVirtualRegister(proto, 3, regT2);
+ JITStubCall stubCall(this, cti_op_construct_JSConstruct);
stubCall.call(thisRegister);
- emitGetVirtualRegister(callee, regT2);
+ emitGetVirtualRegister(callee, regT0);
}
// Fast version of stack frame initialization, directly relative to edi.
// Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee
storePtr(ImmPtr(JSValue::encode(JSValue())), Address(callFrameRegister, (registerOffset + RegisterFile::OptionalCalleeArguments) * static_cast<int>(sizeof(Register))));
- storePtr(regT2, Address(callFrameRegister, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register))));
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain
+ storePtr(regT0, Address(callFrameRegister, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register))));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain
store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register))));
storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register))));
storePtr(regT1, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register))));
@@ -276,13 +683,13 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
compileOpConstructSetupArgs(instruction);
// Fast check for JS function.
- Jump callLinkFailNotObject = emitJumpIfNotJSCell(regT2);
- Jump callLinkFailNotJSFunction = branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsFunctionVPtr));
+ Jump callLinkFailNotObject = emitJumpIfNotJSCell(regT0);
+ Jump callLinkFailNotJSFunction = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr));
// First, in the case of a construct, allocate the new object.
if (opcodeID == op_construct) {
- JITStubCall(this, JITStubs::cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
- emitGetVirtualRegister(callee, regT2);
+ JITStubCall(this, cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
+ emitGetVirtualRegister(callee, regT0);
}
// Speculatively roll the callframe, assuming argCount will match the arity.
@@ -290,7 +697,9 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
move(Imm32(argCount), regT1);
- m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_globalData->jitStubs.ctiVirtualCallPreLink());
+ move(regT0, regT2);
+
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_globalData->jitStubs.ctiVirtualCallLink());
// Put the return value in dst.
emitPutVirtualRegister(dst);
@@ -308,7 +717,7 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
// This handles host functions
callLinkFailNotObject.link(this);
callLinkFailNotJSFunction.link(this);
- JITStubCall(this, opcodeID == op_construct ? JITStubs::cti_op_construct_NotJSConstruct : JITStubs::cti_op_call_NotJSFunction).call();
+ JITStubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction).call();
emitPutVirtualRegister(dst);
sampleCodeBlock(m_codeBlock);
@@ -318,6 +727,8 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
#endif // !ENABLE(JIT_OPTIMIZE_CALL)
+#endif // USE(JSVALUE32_64)
+
} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITCode.h b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITCode.h
index b502c8a..69cf167 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITCode.h
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITCode.h
@@ -76,11 +76,7 @@ namespace JSC {
// Execute the code!
inline JSValue execute(RegisterFile* registerFile, CallFrame* callFrame, JSGlobalData* globalData, JSValue* exception)
{
- return JSValue::decode(ctiTrampoline(
-#if PLATFORM(X86_64)
- 0, 0, 0, 0, 0, 0,
-#endif
- m_ref.m_code.executableAddress(), registerFile, callFrame, exception, Profiler::enabledProfilerReference(), globalData));
+ return JSValue::decode(ctiTrampoline(m_ref.m_code.executableAddress(), registerFile, callFrame, exception, Profiler::enabledProfilerReference(), globalData));
}
void* start()
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITInlineMethods.h b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITInlineMethods.h
index f03d635..e69e273 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITInlineMethods.h
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITInlineMethods.h
@@ -32,75 +32,37 @@
namespace JSC {
-ALWAYS_INLINE void JIT::killLastResultRegister()
-{
- m_lastResultBytecodeRegister = std::numeric_limits<int>::max();
-}
-
-// get arg puts an arg from the SF register array into a h/w register
-ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
-{
- ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
-
- // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
- if (m_codeBlock->isConstantRegisterIndex(src)) {
- JSValue value = m_codeBlock->getConstant(src);
- move(ImmPtr(JSValue::encode(value)), dst);
- killLastResultRegister();
- return;
- }
-
- if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src)) {
- bool atJumpTarget = false;
- while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeIndex) {
- if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeIndex)
- atJumpTarget = true;
- ++m_jumpTargetsPosition;
- }
-
- if (!atJumpTarget) {
- // The argument we want is already stored in eax
- if (dst != cachedResultRegister)
- move(cachedResultRegister, dst);
- killLastResultRegister();
- return;
- }
- }
-
- loadPtr(Address(callFrameRegister, src * sizeof(Register)), dst);
- killLastResultRegister();
-}
-
-ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2)
-{
- if (src2 == m_lastResultBytecodeRegister) {
- emitGetVirtualRegister(src2, dst2);
- emitGetVirtualRegister(src1, dst1);
- } else {
- emitGetVirtualRegister(src1, dst1);
- emitGetVirtualRegister(src2, dst2);
- }
-}
+/* Deprecated: Please use JITStubCall instead. */
// puts an arg onto the stack, as an arg to a context threaded function.
ALWAYS_INLINE void JIT::emitPutJITStubArg(RegisterID src, unsigned argumentNumber)
{
- poke(src, argumentNumber);
+ unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + 1;
+ poke(src, argumentStackOffset);
}
+/* Deprecated: Please use JITStubCall instead. */
+
ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(unsigned value, unsigned argumentNumber)
{
- poke(Imm32(value), argumentNumber);
+ unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + 1;
+ poke(Imm32(value), argumentStackOffset);
}
+/* Deprecated: Please use JITStubCall instead. */
+
ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(void* value, unsigned argumentNumber)
{
- poke(ImmPtr(value), argumentNumber);
+ unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + 1;
+ poke(ImmPtr(value), argumentStackOffset);
}
+/* Deprecated: Please use JITStubCall instead. */
+
ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
{
- peek(dst, argumentNumber);
+ unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + 1;
+ peek(dst, argumentStackOffset);
}
ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
@@ -109,30 +71,6 @@ ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
return m_codeBlock->getConstant(src);
}
-ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(unsigned src)
-{
- return getConstantOperand(src).getInt32Fast();
-}
-
-ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
-{
- return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32Fast();
-}
-
-// get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
-ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch)
-{
- if (m_codeBlock->isConstantRegisterIndex(src)) {
- JSValue value = m_codeBlock->getConstant(src);
- emitPutJITStubArgConstant(JSValue::encode(value), argumentNumber);
- } else {
- loadPtr(Address(callFrameRegister, src * sizeof(Register)), scratch);
- emitPutJITStubArg(scratch, argumentNumber);
- }
-
- killLastResultRegister();
-}
-
ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
{
storePtr(from, Address(callFrameRegister, entry * sizeof(Register)));
@@ -146,26 +84,17 @@ ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, RegisterF
ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
{
loadPtr(Address(from, entry * sizeof(Register)), to);
+#if !USE(JSVALUE32_64)
killLastResultRegister();
+#endif
}
ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
{
load32(Address(from, entry * sizeof(Register)), to);
+#if !USE(JSVALUE32_64)
killLastResultRegister();
-}
-
-ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
-{
- storePtr(from, Address(callFrameRegister, dst * sizeof(Register)));
- m_lastResultBytecodeRegister = (from == cachedResultRegister) ? dst : std::numeric_limits<int>::max();
- // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
-}
-
-ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
-{
- storePtr(ImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
- // FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
+#endif
}
ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
@@ -177,38 +106,71 @@ ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
return nakedCall;
}
-#if PLATFORM(X86) || PLATFORM(X86_64)
+#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
+
+ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace)
+{
+#if PLATFORM(ARM_TRADITIONAL)
+#ifndef NDEBUG
+ // Ensure the label after the sequence can also fit
+ insnSpace += sizeof(ARMWord);
+ constSpace += sizeof(uint64_t);
+#endif
+
+ ensureSpace(insnSpace, constSpace);
+
+#endif
+
+#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
+#ifndef NDEBUG
+ m_uninterruptedInstructionSequenceBegin = label();
+ m_uninterruptedConstantSequenceBegin = sizeOfConstantPool();
+#endif
+#endif
+}
+
+ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace)
+{
+#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
+ ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin, label()) == insnSpace);
+ ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin == constSpace);
+#endif
+}
+
+#endif
+
+#if PLATFORM(ARM_THUMB2)
ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
{
- pop(reg);
+ move(linkRegister, reg);
}
ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
{
- push(reg);
+ move(reg, linkRegister);
}
ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
{
- push(address);
+ loadPtr(address, linkRegister);
}
-#elif PLATFORM_ARM_ARCH(7)
+#else // PLATFORM(X86) || PLATFORM(X86_64) || PLATFORM(ARM_TRADITIONAL)
ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
{
- move(linkRegister, reg);
+ pop(reg);
}
ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
{
- move(reg, linkRegister);
+ push(reg);
}
ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
{
- loadPtr(address, linkRegister);
+ push(address);
}
#endif
@@ -224,13 +186,16 @@ ALWAYS_INLINE void JIT::restoreArgumentReference()
{
move(stackPointerRegister, firstArgumentRegister);
poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
+#if PLATFORM(ARM_TRADITIONAL)
+ move(ctiReturnRegister, ARMRegisters::lr);
+#endif
}
ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
{
#if PLATFORM(X86)
// Within a trampoline the return address will be on the stack at this point.
addPtr(Imm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister);
-#elif PLATFORM_ARM_ARCH(7)
+#elif PLATFORM(ARM_THUMB2)
move(stackPointerRegister, firstArgumentRegister);
#endif
// In the trampoline on x86-64, the first argument register is not overwritten.
@@ -242,9 +207,484 @@ ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure
return branchPtr(NotEqual, Address(reg, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(structure));
}
+ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
+{
+ if (!m_codeBlock->isKnownNotImmediate(vReg))
+ linkSlowCase(iter);
+}
+
+ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
+{
+ ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+
+ m_slowCases.append(SlowCaseEntry(jump, m_bytecodeIndex));
+}
+
+ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
+{
+ ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+
+ const JumpList::JumpVector& jumpVector = jumpList.jumps();
+ size_t size = jumpVector.size();
+ for (size_t i = 0; i < size; ++i)
+ m_slowCases.append(SlowCaseEntry(jumpVector[i], m_bytecodeIndex));
+}
+
+ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
+{
+ ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+
+ m_jmpTable.append(JumpTable(jump, m_bytecodeIndex + relativeOffset));
+}
+
+ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
+{
+ ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+
+ jump.linkTo(m_labels[m_bytecodeIndex + relativeOffset], this);
+}
+
+#if ENABLE(SAMPLING_FLAGS)
+ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag)
+{
+ ASSERT(flag >= 1);
+ ASSERT(flag <= 32);
+ or32(Imm32(1u << (flag - 1)), AbsoluteAddress(&SamplingFlags::s_flags));
+}
+
+ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag)
+{
+ ASSERT(flag >= 1);
+ ASSERT(flag <= 32);
+ and32(Imm32(~(1u << (flag - 1))), AbsoluteAddress(&SamplingFlags::s_flags));
+}
+#endif
+
+#if ENABLE(SAMPLING_COUNTERS)
+ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, uint32_t count)
+{
+#if PLATFORM(X86_64) // Or any other 64-bit plattform.
+ addPtr(Imm32(count), AbsoluteAddress(&counter.m_counter));
+#elif PLATFORM(X86) // Or any other little-endian 32-bit plattform.
+ intptr_t hiWord = reinterpret_cast<intptr_t>(&counter.m_counter) + sizeof(int32_t);
+ add32(Imm32(count), AbsoluteAddress(&counter.m_counter));
+ addWithCarry32(Imm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord)));
+#else
+#error "SAMPLING_FLAGS not implemented on this platform."
+#endif
+}
+#endif
+
+#if ENABLE(OPCODE_SAMPLING)
+#if PLATFORM(X86_64)
+ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
+{
+ move(ImmPtr(m_interpreter->sampler()->sampleSlot()), X86Registers::ecx);
+ storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86Registers::ecx);
+}
+#else
+ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
+{
+ storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot());
+}
+#endif
+#endif
+
+#if ENABLE(CODEBLOCK_SAMPLING)
+#if PLATFORM(X86_64)
+ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
+{
+ move(ImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86Registers::ecx);
+ storePtr(ImmPtr(codeBlock), X86Registers::ecx);
+}
+#else
+ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
+{
+ storePtr(ImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot());
+}
+#endif
+#endif
+
+#if USE(JSVALUE32_64)
+
+inline JIT::Address JIT::tagFor(unsigned index, RegisterID base)
+{
+ return Address(base, (index * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.tag));
+}
+
+inline JIT::Address JIT::payloadFor(unsigned index, RegisterID base)
+{
+ return Address(base, (index * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.payload));
+}
+
+inline JIT::Address JIT::addressFor(unsigned index, RegisterID base)
+{
+ return Address(base, (index * sizeof(Register)));
+}
+
+inline void JIT::emitLoadTag(unsigned index, RegisterID tag)
+{
+ RegisterID mappedTag;
+ if (getMappedTag(index, mappedTag)) {
+ move(mappedTag, tag);
+ unmap(tag);
+ return;
+ }
+
+ if (m_codeBlock->isConstantRegisterIndex(index)) {
+ move(Imm32(getConstantOperand(index).tag()), tag);
+ unmap(tag);
+ return;
+ }
+
+ load32(tagFor(index), tag);
+ unmap(tag);
+}
+
+inline void JIT::emitLoadPayload(unsigned index, RegisterID payload)
+{
+ RegisterID mappedPayload;
+ if (getMappedPayload(index, mappedPayload)) {
+ move(mappedPayload, payload);
+ unmap(payload);
+ return;
+ }
+
+ if (m_codeBlock->isConstantRegisterIndex(index)) {
+ move(Imm32(getConstantOperand(index).payload()), payload);
+ unmap(payload);
+ return;
+ }
+
+ load32(payloadFor(index), payload);
+ unmap(payload);
+}
+
+inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload)
+{
+ move(Imm32(v.payload()), payload);
+ move(Imm32(v.tag()), tag);
+}
+
+inline void JIT::emitLoad(unsigned index, RegisterID tag, RegisterID payload, RegisterID base)
+{
+ ASSERT(tag != payload);
+
+ if (base == callFrameRegister) {
+ ASSERT(payload != base);
+ emitLoadPayload(index, payload);
+ emitLoadTag(index, tag);
+ return;
+ }
+
+ if (payload == base) { // avoid stomping base
+ load32(tagFor(index, base), tag);
+ load32(payloadFor(index, base), payload);
+ return;
+ }
+
+ load32(payloadFor(index, base), payload);
+ load32(tagFor(index, base), tag);
+}
+
+inline void JIT::emitLoad2(unsigned index1, RegisterID tag1, RegisterID payload1, unsigned index2, RegisterID tag2, RegisterID payload2)
+{
+ if (isMapped(index1)) {
+ emitLoad(index1, tag1, payload1);
+ emitLoad(index2, tag2, payload2);
+ return;
+ }
+ emitLoad(index2, tag2, payload2);
+ emitLoad(index1, tag1, payload1);
+}
+
+inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value)
+{
+ if (m_codeBlock->isConstantRegisterIndex(index)) {
+ Register& inConstantPool = m_codeBlock->constantRegister(index);
+ loadDouble(&inConstantPool, value);
+ } else
+ loadDouble(addressFor(index), value);
+}
+
+inline void JIT::emitLoadInt32ToDouble(unsigned index, FPRegisterID value)
+{
+ if (m_codeBlock->isConstantRegisterIndex(index)) {
+ Register& inConstantPool = m_codeBlock->constantRegister(index);
+ char* bytePointer = reinterpret_cast<char*>(&inConstantPool);
+ convertInt32ToDouble(AbsoluteAddress(bytePointer + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value);
+ } else
+ convertInt32ToDouble(payloadFor(index), value);
+}
+
+inline void JIT::emitStore(unsigned index, RegisterID tag, RegisterID payload, RegisterID base)
+{
+ store32(payload, payloadFor(index, base));
+ store32(tag, tagFor(index, base));
+}
+
+inline void JIT::emitStoreInt32(unsigned index, RegisterID payload, bool indexIsInt32)
+{
+ store32(payload, payloadFor(index, callFrameRegister));
+ if (!indexIsInt32)
+ store32(Imm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
+}
+
+inline void JIT::emitStoreInt32(unsigned index, Imm32 payload, bool indexIsInt32)
+{
+ store32(payload, payloadFor(index, callFrameRegister));
+ if (!indexIsInt32)
+ store32(Imm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
+}
+
+inline void JIT::emitStoreCell(unsigned index, RegisterID payload, bool indexIsCell)
+{
+ store32(payload, payloadFor(index, callFrameRegister));
+ if (!indexIsCell)
+ store32(Imm32(JSValue::CellTag), tagFor(index, callFrameRegister));
+}
+
+inline void JIT::emitStoreBool(unsigned index, RegisterID tag, bool indexIsBool)
+{
+ if (!indexIsBool)
+ store32(Imm32(0), payloadFor(index, callFrameRegister));
+ store32(tag, tagFor(index, callFrameRegister));
+}
+
+inline void JIT::emitStoreDouble(unsigned index, FPRegisterID value)
+{
+ storeDouble(value, addressFor(index));
+}
+
+inline void JIT::emitStore(unsigned index, const JSValue constant, RegisterID base)
+{
+ store32(Imm32(constant.payload()), payloadFor(index, base));
+ store32(Imm32(constant.tag()), tagFor(index, base));
+}
+
+ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
+{
+ emitStore(dst, jsUndefined());
+}
+
+inline bool JIT::isLabeled(unsigned bytecodeIndex)
+{
+ for (size_t numberOfJumpTargets = m_codeBlock->numberOfJumpTargets(); m_jumpTargetIndex != numberOfJumpTargets; ++m_jumpTargetIndex) {
+ unsigned jumpTarget = m_codeBlock->jumpTarget(m_jumpTargetIndex);
+ if (jumpTarget == bytecodeIndex)
+ return true;
+ if (jumpTarget > bytecodeIndex)
+ return false;
+ }
+ return false;
+}
+
+inline void JIT::map(unsigned bytecodeIndex, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload)
+{
+ if (isLabeled(bytecodeIndex))
+ return;
+
+ m_mappedBytecodeIndex = bytecodeIndex;
+ m_mappedVirtualRegisterIndex = virtualRegisterIndex;
+ m_mappedTag = tag;
+ m_mappedPayload = payload;
+}
+
+inline void JIT::unmap(RegisterID registerID)
+{
+ if (m_mappedTag == registerID)
+ m_mappedTag = (RegisterID)-1;
+ else if (m_mappedPayload == registerID)
+ m_mappedPayload = (RegisterID)-1;
+}
+
+inline void JIT::unmap()
+{
+ m_mappedBytecodeIndex = (unsigned)-1;
+ m_mappedVirtualRegisterIndex = (unsigned)-1;
+ m_mappedTag = (RegisterID)-1;
+ m_mappedPayload = (RegisterID)-1;
+}
+
+inline bool JIT::isMapped(unsigned virtualRegisterIndex)
+{
+ if (m_mappedBytecodeIndex != m_bytecodeIndex)
+ return false;
+ if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
+ return false;
+ return true;
+}
+
+inline bool JIT::getMappedPayload(unsigned virtualRegisterIndex, RegisterID& payload)
+{
+ if (m_mappedBytecodeIndex != m_bytecodeIndex)
+ return false;
+ if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
+ return false;
+ if (m_mappedPayload == (RegisterID)-1)
+ return false;
+ payload = m_mappedPayload;
+ return true;
+}
+
+inline bool JIT::getMappedTag(unsigned virtualRegisterIndex, RegisterID& tag)
+{
+ if (m_mappedBytecodeIndex != m_bytecodeIndex)
+ return false;
+ if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
+ return false;
+ if (m_mappedTag == (RegisterID)-1)
+ return false;
+ tag = m_mappedTag;
+ return true;
+}
+
+inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex)
+{
+ if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex))
+ addSlowCase(branch32(NotEqual, tagFor(virtualRegisterIndex), Imm32(JSValue::CellTag)));
+}
+
+inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex, RegisterID tag)
+{
+ if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex))
+ addSlowCase(branch32(NotEqual, tag, Imm32(JSValue::CellTag)));
+}
+
+inline void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, unsigned virtualRegisterIndex)
+{
+ if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex))
+ linkSlowCase(iter);
+}
+
+ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
+{
+ return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
+}
+
+ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant)
+{
+ if (isOperandConstantImmediateInt(op1)) {
+ constant = getConstantOperand(op1).asInt32();
+ op = op2;
+ return true;
+ }
+
+ if (isOperandConstantImmediateInt(op2)) {
+ constant = getConstantOperand(op2).asInt32();
+ op = op1;
+ return true;
+ }
+
+ return false;
+}
+
+ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(unsigned src)
+{
+ return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble();
+}
+
+/* Deprecated: Please use JITStubCall instead. */
+
+ALWAYS_INLINE void JIT::emitPutJITStubArg(RegisterID tag, RegisterID payload, unsigned argumentNumber)
+{
+ unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + 1;
+ poke(payload, argumentStackOffset);
+ poke(tag, argumentStackOffset + 1);
+}
+
+/* Deprecated: Please use JITStubCall instead. */
+
+ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch1, RegisterID scratch2)
+{
+ unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + 1;
+ if (m_codeBlock->isConstantRegisterIndex(src)) {
+ JSValue constant = m_codeBlock->getConstant(src);
+ poke(Imm32(constant.payload()), argumentStackOffset);
+ poke(Imm32(constant.tag()), argumentStackOffset + 1);
+ } else {
+ emitLoad(src, scratch1, scratch2);
+ poke(scratch2, argumentStackOffset);
+ poke(scratch1, argumentStackOffset + 1);
+ }
+}
+
+#else // USE(JSVALUE32_64)
+
+ALWAYS_INLINE void JIT::killLastResultRegister()
+{
+ m_lastResultBytecodeRegister = std::numeric_limits<int>::max();
+}
+
+// get arg puts an arg from the SF register array into a h/w register
+ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
+{
+ ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+
+ // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
+ if (m_codeBlock->isConstantRegisterIndex(src)) {
+ JSValue value = m_codeBlock->getConstant(src);
+ move(ImmPtr(JSValue::encode(value)), dst);
+ killLastResultRegister();
+ return;
+ }
+
+ if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src)) {
+ bool atJumpTarget = false;
+ while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeIndex) {
+ if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeIndex)
+ atJumpTarget = true;
+ ++m_jumpTargetsPosition;
+ }
+
+ if (!atJumpTarget) {
+ // The argument we want is already stored in eax
+ if (dst != cachedResultRegister)
+ move(cachedResultRegister, dst);
+ killLastResultRegister();
+ return;
+ }
+ }
+
+ loadPtr(Address(callFrameRegister, src * sizeof(Register)), dst);
+ killLastResultRegister();
+}
+
+ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2)
+{
+ if (src2 == m_lastResultBytecodeRegister) {
+ emitGetVirtualRegister(src2, dst2);
+ emitGetVirtualRegister(src1, dst1);
+ } else {
+ emitGetVirtualRegister(src1, dst1);
+ emitGetVirtualRegister(src2, dst2);
+ }
+}
+
+ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(unsigned src)
+{
+ return getConstantOperand(src).asInt32();
+}
+
+ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
+{
+ return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
+}
+
+ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
+{
+ storePtr(from, Address(callFrameRegister, dst * sizeof(Register)));
+ m_lastResultBytecodeRegister = (from == cachedResultRegister) ? dst : std::numeric_limits<int>::max();
+}
+
+ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
+{
+ storePtr(ImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
+}
+
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg)
{
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
return branchTestPtr(Zero, reg, tagMaskRegister);
#else
return branchTest32(Zero, reg, Imm32(JSImmediate::TagMask));
@@ -265,7 +705,7 @@ ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg)
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg)
{
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
return branchTestPtr(NonZero, reg, tagMaskRegister);
#else
return branchTest32(NonZero, reg, Imm32(JSImmediate::TagMask));
@@ -283,13 +723,7 @@ ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg)
emitJumpSlowCaseIfNotJSCell(reg);
}
-ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
-{
- if (!m_codeBlock->isKnownNotImmediate(vReg))
- linkSlowCase(iter);
-}
-
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateNumber(RegisterID reg)
{
return branchTestPtr(NonZero, reg, tagTypeNumberRegister);
@@ -302,7 +736,7 @@ ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateNumber(RegisterID reg)
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
{
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
return branchPtr(AboveOrEqual, reg, tagTypeNumberRegister);
#else
return branchTest32(NonZero, reg, Imm32(JSImmediate::TagTypeNumber));
@@ -311,7 +745,7 @@ ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg)
{
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
return branchPtr(Below, reg, tagTypeNumberRegister);
#else
return branchTest32(Zero, reg, Imm32(JSImmediate::TagTypeNumber));
@@ -335,7 +769,7 @@ ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1,
addSlowCase(emitJumpIfNotImmediateIntegers(reg1, reg2, scratch));
}
-#if !USE(ALTERNATE_JSIMMEDIATE)
+#if !USE(JSVALUE64)
ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg)
{
subPtr(Imm32(JSImmediate::TagTypeNumber), reg);
@@ -349,7 +783,7 @@ ALWAYS_INLINE JIT::Jump JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID re
ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest)
{
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
emitFastArithIntToImmNoCheck(src, dest);
#else
if (src != dest)
@@ -360,7 +794,7 @@ ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID d
ALWAYS_INLINE void JIT::emitFastArithImmToInt(RegisterID reg)
{
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
UNUSED_PARAM(reg);
#else
rshiftPtr(Imm32(JSImmediate::IntegerPayloadShift), reg);
@@ -370,7 +804,7 @@ ALWAYS_INLINE void JIT::emitFastArithImmToInt(RegisterID reg)
// operand is int32_t, must have been zero-extended if register is 64-bit.
ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
{
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
if (src != dest)
move(src, dest);
orPtr(tagTypeNumberRegister, dest);
@@ -387,88 +821,26 @@ ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
or32(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), reg);
}
-ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
-{
- ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+/* Deprecated: Please use JITStubCall instead. */
- m_slowCases.append(SlowCaseEntry(jump, m_bytecodeIndex));
-}
-
-ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
-{
- ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
-
- m_jmpTable.append(JumpTable(jump, m_bytecodeIndex + relativeOffset));
-}
-
-ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
-{
- ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
-
- jump.linkTo(m_labels[m_bytecodeIndex + relativeOffset], this);
-}
-
-#if ENABLE(SAMPLING_FLAGS)
-ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag)
-{
- ASSERT(flag >= 1);
- ASSERT(flag <= 32);
- or32(Imm32(1u << (flag - 1)), AbsoluteAddress(&SamplingFlags::s_flags));
-}
-
-ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag)
+// get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
+ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch)
{
- ASSERT(flag >= 1);
- ASSERT(flag <= 32);
- and32(Imm32(~(1u << (flag - 1))), AbsoluteAddress(&SamplingFlags::s_flags));
-}
-#endif
+ unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + 1;
+ if (m_codeBlock->isConstantRegisterIndex(src)) {
+ JSValue value = m_codeBlock->getConstant(src);
+ poke(ImmPtr(JSValue::encode(value)), argumentStackOffset);
+ } else {
+ loadPtr(Address(callFrameRegister, src * sizeof(Register)), scratch);
+ poke(scratch, argumentStackOffset);
+ }
-#if ENABLE(SAMPLING_COUNTERS)
-ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, uint32_t count)
-{
-#if PLATFORM(X86_64) // Or any other 64-bit plattform.
- addPtr(Imm32(count), AbsoluteAddress(&counter.m_counter));
-#elif PLATFORM(X86) // Or any other little-endian 32-bit plattform.
- intptr_t hiWord = reinterpret_cast<intptr_t>(&counter.m_counter) + sizeof(int32_t);
- add32(Imm32(count), AbsoluteAddress(&counter.m_counter));
- addWithCarry32(Imm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord)));
-#else
-#error "SAMPLING_FLAGS not implemented on this platform."
-#endif
+ killLastResultRegister();
}
-#endif
-#if ENABLE(OPCODE_SAMPLING)
-#if PLATFORM(X86_64)
-ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
-{
- move(ImmPtr(m_interpreter->sampler()->sampleSlot()), X86::ecx);
- storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86::ecx);
-}
-#else
-ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
-{
- storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot());
-}
-#endif
-#endif
+#endif // USE(JSVALUE32_64)
-#if ENABLE(CODEBLOCK_SAMPLING)
-#if PLATFORM(X86_64)
-ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
-{
- move(ImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86::ecx);
- storePtr(ImmPtr(codeBlock), X86::ecx);
-}
-#else
-ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
-{
- storePtr(ImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot());
-}
-#endif
-#endif
-}
+} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITOpcodes.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITOpcodes.cpp
index da541c5..34debcb 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITOpcodes.cpp
@@ -32,12 +32,1770 @@
#include "JITStubCall.h"
#include "JSArray.h"
#include "JSCell.h"
+#include "JSFunction.h"
+#include "LinkBuffer.h"
namespace JSC {
+#if USE(JSVALUE32_64)
+
+void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* globalData, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk)
+{
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ // (1) This function provides fast property access for string length
+ Label stringLengthBegin = align();
+
+ // regT0 holds payload, regT1 holds tag
+
+ Jump string_failureCases1 = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+ Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
+
+ // Checks out okay! - get the length from the Ustring.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSString, m_value) + OBJECT_OFFSETOF(UString, m_rep)), regT2);
+ load32(Address(regT2, OBJECT_OFFSETOF(UString::Rep, len)), regT2);
+
+ Jump string_failureCases3 = branch32(Above, regT2, Imm32(INT_MAX));
+ move(regT2, regT0);
+ move(Imm32(JSValue::Int32Tag), regT1);
+
+ ret();
+#endif
+
+ // (2) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
+
+#if ENABLE(JIT_OPTIMIZE_CALL)
+ // VirtualCallLink Trampoline
+ // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
+ Label virtualCallLinkBegin = align();
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+
+ Jump isNativeFunc2 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
+
+ Jump hasCodeBlock2 = branch32(GreaterThan, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
+ preserveReturnAddressAfterCall(regT3);
+ restoreArgumentReference();
+ Call callJSFunction2 = call();
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ emitGetJITStubArg(2, regT1); // argCount
+ restoreReturnAddressBeforeReturn(regT3);
+ hasCodeBlock2.link(this);
+
+ // Check argCount matches callee arity.
+ Jump arityCheckOkay2 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), regT1);
+ preserveReturnAddressAfterCall(regT3);
+ emitPutJITStubArg(regT3, 1); // return address
+ restoreArgumentReference();
+ Call callArityCheck2 = call();
+ move(regT1, callFrameRegister);
+ emitGetJITStubArg(2, regT1); // argCount
+ restoreReturnAddressBeforeReturn(regT3);
+ arityCheckOkay2.link(this);
+
+ isNativeFunc2.link(this);
+
+ compileOpCallInitializeCallFrame();
+
+ preserveReturnAddressAfterCall(regT3);
+ emitPutJITStubArg(regT3, 1); // return address
+ restoreArgumentReference();
+ Call callLazyLinkCall = call();
+ restoreReturnAddressBeforeReturn(regT3);
+ jump(regT0);
+#endif // ENABLE(JIT_OPTIMIZE_CALL)
+
+ // VirtualCall Trampoline
+ // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
+ Label virtualCallBegin = align();
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+
+ Jump isNativeFunc3 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
+
+ Jump hasCodeBlock3 = branch32(GreaterThan, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
+ preserveReturnAddressAfterCall(regT3);
+ restoreArgumentReference();
+ Call callJSFunction1 = call();
+ emitGetJITStubArg(2, regT1); // argCount
+ restoreReturnAddressBeforeReturn(regT3);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ hasCodeBlock3.link(this);
+
+ // Check argCount matches callee arity.
+ Jump arityCheckOkay3 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), regT1);
+ preserveReturnAddressAfterCall(regT3);
+ emitPutJITStubArg(regT3, 1); // return address
+ restoreArgumentReference();
+ Call callArityCheck1 = call();
+ move(regT1, callFrameRegister);
+ emitGetJITStubArg(2, regT1); // argCount
+ restoreReturnAddressBeforeReturn(regT3);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ arityCheckOkay3.link(this);
+
+ isNativeFunc3.link(this);
+
+ compileOpCallInitializeCallFrame();
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCode)), regT0);
+ jump(regT0);
+
+#if PLATFORM(X86)
+ Label nativeCallThunk = align();
+ preserveReturnAddressAfterCall(regT0);
+ emitPutToCallFrameHeader(regT0, RegisterFile::ReturnPC); // Push return address
+
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT1);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT1);
+ emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
+
+ /* We have two structs that we use to describe the stackframe we set up for our
+ * call to native code. NativeCallFrameStructure describes the how we set up the stack
+ * in advance of the call. NativeFunctionCalleeSignature describes the callframe
+ * as the native code expects it. We do this as we are using the fastcall calling
+ * convention which results in the callee popping its arguments off the stack, but
+ * not the rest of the callframe so we need a nice way to ensure we increment the
+ * stack pointer by the right amount after the call.
+ */
+
+#if COMPILER(MSVC) || PLATFORM(LINUX)
+#if COMPILER(MSVC)
+#pragma pack(push)
+#pragma pack(4)
+#endif // COMPILER(MSVC)
+ struct NativeCallFrameStructure {
+ // CallFrame* callFrame; // passed in EDX
+ JSObject* callee;
+ JSValue thisValue;
+ ArgList* argPointer;
+ ArgList args;
+ JSValue result;
+ };
+ struct NativeFunctionCalleeSignature {
+ JSObject* callee;
+ JSValue thisValue;
+ ArgList* argPointer;
+ };
+#if COMPILER(MSVC)
+#pragma pack(pop)
+#endif // COMPILER(MSVC)
+#else
+ struct NativeCallFrameStructure {
+ // CallFrame* callFrame; // passed in ECX
+ // JSObject* callee; // passed in EDX
+ JSValue thisValue;
+ ArgList* argPointer;
+ ArgList args;
+ };
+ struct NativeFunctionCalleeSignature {
+ JSValue thisValue;
+ ArgList* argPointer;
+ };
+#endif
+
+ const int NativeCallFrameSize = (sizeof(NativeCallFrameStructure) + 15) & ~15;
+ // Allocate system stack frame
+ subPtr(Imm32(NativeCallFrameSize), stackPointerRegister);
+
+ // Set up arguments
+ subPtr(Imm32(1), regT0); // Don't include 'this' in argcount
+
+ // push argcount
+ storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_argCount)));
+
+ // Calculate the start of the callframe header, and store in regT1
+ addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int)sizeof(Register)), callFrameRegister, regT1);
+
+ // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT0)
+ mul32(Imm32(sizeof(Register)), regT0, regT0);
+ subPtr(regT0, regT1);
+ storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_args)));
+
+ // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
+ addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, args)), stackPointerRegister, regT0);
+ storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, argPointer)));
+
+ // regT1 currently points to the first argument, regT1 - sizeof(Register) points to 'this'
+ loadPtr(Address(regT1, -(int)sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
+ loadPtr(Address(regT1, -(int)sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT3);
+ storePtr(regT2, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ storePtr(regT3, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+
+#if COMPILER(MSVC) || PLATFORM(LINUX)
+ // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
+ addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, result)), stackPointerRegister, X86Registers::ecx);
+
+ // Plant callee
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::eax);
+ storePtr(X86Registers::eax, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, callee)));
+
+ // Plant callframe
+ move(callFrameRegister, X86Registers::edx);
+
+ call(Address(X86Registers::eax, OBJECT_OFFSETOF(JSFunction, m_data)));
+
+ // JSValue is a non-POD type, so eax points to it
+ emitLoad(0, regT1, regT0, X86Registers::eax);
+#else
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::edx); // callee
+ move(callFrameRegister, X86Registers::ecx); // callFrame
+ call(Address(X86Registers::edx, OBJECT_OFFSETOF(JSFunction, m_data)));
+#endif
+
+ // We've put a few temporaries on the stack in addition to the actual arguments
+ // so pull them off now
+ addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister);
+
+ // Check for an exception
+ // FIXME: Maybe we can optimize this comparison to JSValue().
+ move(ImmPtr(&globalData->exception), regT2);
+ Jump sawException1 = branch32(NotEqual, tagFor(0, regT2), Imm32(JSValue::CellTag));
+ Jump sawException2 = branch32(NonZero, payloadFor(0, regT2), Imm32(0));
+
+ // Grab the return address.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT3);
+
+ // Restore our caller's "r".
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ // Return.
+ restoreReturnAddressBeforeReturn(regT3);
+ ret();
+
+ // Handle an exception
+ sawException1.link(this);
+ sawException2.link(this);
+ // Grab the return address.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+ move(ImmPtr(&globalData->exceptionLocation), regT2);
+ storePtr(regT1, regT2);
+ move(ImmPtr(reinterpret_cast<void*>(ctiVMThrowTrampoline)), regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
+ restoreReturnAddressBeforeReturn(regT2);
+ ret();
+
+#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
+#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
+#else
+ breakpoint();
+#endif
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1);
+ Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2);
+ Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
+#endif
+
+ // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
+ LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
+ patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
+ patchBuffer.link(string_failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
+#endif
+ patchBuffer.link(callArityCheck1, FunctionPtr(cti_op_call_arityCheck));
+ patchBuffer.link(callJSFunction1, FunctionPtr(cti_op_call_JSFunction));
+#if ENABLE(JIT_OPTIMIZE_CALL)
+ patchBuffer.link(callArityCheck2, FunctionPtr(cti_op_call_arityCheck));
+ patchBuffer.link(callJSFunction2, FunctionPtr(cti_op_call_JSFunction));
+ patchBuffer.link(callLazyLinkCall, FunctionPtr(cti_vm_lazyLinkCall));
+#endif
+
+ CodeRef finalCode = patchBuffer.finalizeCode();
+ *executablePool = finalCode.m_executablePool;
+
+ *ctiVirtualCall = trampolineAt(finalCode, virtualCallBegin);
+ *ctiNativeCallThunk = trampolineAt(finalCode, nativeCallThunk);
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ *ctiStringLengthTrampoline = trampolineAt(finalCode, stringLengthBegin);
+#else
+ UNUSED_PARAM(ctiStringLengthTrampoline);
+#endif
+#if ENABLE(JIT_OPTIMIZE_CALL)
+ *ctiVirtualCallLink = trampolineAt(finalCode, virtualCallLinkBegin);
+#else
+ UNUSED_PARAM(ctiVirtualCallLink);
+#endif
+}
+
+void JIT::emit_op_mov(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ if (m_codeBlock->isConstantRegisterIndex(src))
+ emitStore(dst, getConstantOperand(src));
+ else {
+ emitLoad(src, regT1, regT0);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_mov), dst, regT1, regT0);
+ }
+}
+
+void JIT::emit_op_end(Instruction* currentInstruction)
+{
+ if (m_codeBlock->needsFullScopeChain())
+ JITStubCall(this, cti_op_end).call();
+ ASSERT(returnValueRegister != callFrameRegister);
+ emitLoad(currentInstruction[1].u.operand, regT1, regT0);
+ restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
+ ret();
+}
+
+void JIT::emit_op_jmp(Instruction* currentInstruction)
+{
+ unsigned target = currentInstruction[1].u.operand;
+ addJump(jump(), target + 1);
+}
+
+void JIT::emit_op_loop(Instruction* currentInstruction)
+{
+ unsigned target = currentInstruction[1].u.operand;
+ emitTimeoutCheck();
+ addJump(jump(), target + 1);
+}
+
+void JIT::emit_op_loop_if_less(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emitTimeoutCheck();
+
+ if (isOperandConstantImmediateInt(op1)) {
+ emitLoad(op2, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(GreaterThan, regT0, Imm32(getConstantOperand(op1).asInt32())), target + 3);
+ return;
+ }
+
+ if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target + 3);
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThan, regT0, regT2), target + 3);
+}
+
+void JIT::emitSlow_op_loop_if_less(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_loop_if_less);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
+}
+
+void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emitTimeoutCheck();
+
+ if (isOperandConstantImmediateInt(op1)) {
+ emitLoad(op2, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op1).asInt32())), target + 3);
+ return;
+ }
+
+ if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target + 3);
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThanOrEqual, regT0, regT2), target + 3);
+}
+
+void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_loop_if_lesseq);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
+}
+
+void JIT::emit_op_new_object(Instruction* currentInstruction)
+{
+ JITStubCall(this, cti_op_new_object).call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_instanceof(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned value = currentInstruction[2].u.operand;
+ unsigned baseVal = currentInstruction[3].u.operand;
+ unsigned proto = currentInstruction[4].u.operand;
+
+ // Load the operands (baseVal, proto, and value respectively) into registers.
+ // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
+ emitLoadPayload(proto, regT1);
+ emitLoadPayload(baseVal, regT0);
+ emitLoadPayload(value, regT2);
+
+ // Check that baseVal & proto are cells.
+ emitJumpSlowCaseIfNotJSCell(proto);
+ emitJumpSlowCaseIfNotJSCell(baseVal);
+
+ // Check that baseVal is an object, that it 'ImplementsHasInstance' but that it does not 'OverridesHasInstance'.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
+ addSlowCase(branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType))); // FIXME: Maybe remove this test.
+ addSlowCase(branchTest32(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsHasInstance))); // FIXME: TOT checks ImplementsDefaultHasInstance.
+
+ // If value is not an Object, return false.
+ emitLoadTag(value, regT0);
+ Jump valueIsImmediate = branch32(NotEqual, regT0, Imm32(JSValue::CellTag));
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
+ Jump valueIsNotObject = branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)); // FIXME: Maybe remove this test.
+
+ // Check proto is object.
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
+ addSlowCase(branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
+
+ // Optimistically load the result true, and start looping.
+ // Initially, regT1 still contains proto and regT2 still contains value.
+ // As we loop regT2 will be updated with its prototype, recursively walking the prototype chain.
+ move(Imm32(JSValue::TrueTag), regT0);
+ Label loop(this);
+
+ // Load the prototype of the object in regT2. If this is equal to regT1 - WIN!
+ // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ load32(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
+ Jump isInstance = branchPtr(Equal, regT2, regT1);
+ branch32(NotEqual, regT2, Imm32(0), loop);
+
+ // We get here either by dropping out of the loop, or if value was not an Object. Result is false.
+ valueIsImmediate.link(this);
+ valueIsNotObject.link(this);
+ move(Imm32(JSValue::FalseTag), regT0);
+
+ // isInstance jumps right down to here, to skip setting the result to false (it has already set true).
+ isInstance.link(this);
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned value = currentInstruction[2].u.operand;
+ unsigned baseVal = currentInstruction[3].u.operand;
+ unsigned proto = currentInstruction[4].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, baseVal);
+ linkSlowCaseIfNotJSCell(iter, proto);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_instanceof);
+ stubCall.addArgument(value);
+ stubCall.addArgument(baseVal);
+ stubCall.addArgument(proto);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_new_func(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_new_func);
+ stubCall.addArgument(ImmPtr(m_codeBlock->functionDecl(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_get_global_var(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ JSGlobalObject* globalObject = static_cast<JSGlobalObject*>(currentInstruction[2].u.jsCell);
+ ASSERT(globalObject->isGlobalObject());
+ int index = currentInstruction[3].u.operand;
+
+ loadPtr(&globalObject->d()->registers, regT2);
+
+ emitLoad(index, regT1, regT0, regT2);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_get_global_var), dst, regT1, regT0);
+}
+
+void JIT::emit_op_put_global_var(Instruction* currentInstruction)
+{
+ JSGlobalObject* globalObject = static_cast<JSGlobalObject*>(currentInstruction[1].u.jsCell);
+ ASSERT(globalObject->isGlobalObject());
+ int index = currentInstruction[2].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ emitLoad(value, regT1, regT0);
+
+ loadPtr(&globalObject->d()->registers, regT2);
+ emitStore(index, regT1, regT0, regT2);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_put_global_var), value, regT1, regT0);
+}
+
+void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int index = currentInstruction[2].u.operand;
+ int skip = currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain();
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
+ while (skip--)
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
+
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject, d)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), regT2);
+
+ emitLoad(index, regT1, regT0, regT2);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_get_scoped_var), dst, regT1, regT0);
+}
+
+void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
+{
+ int index = currentInstruction[1].u.operand;
+ int skip = currentInstruction[2].u.operand + m_codeBlock->needsFullScopeChain();
+ int value = currentInstruction[3].u.operand;
+
+ emitLoad(value, regT1, regT0);
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
+ while (skip--)
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
+
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject, d)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), regT2);
+
+ emitStore(index, regT1, regT0, regT2);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_put_scoped_var), value, regT1, regT0);
+}
+
+void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_tear_off_activation);
+ stubCall.addArgument(currentInstruction[1].u.operand);
+ stubCall.call();
+}
+
+void JIT::emit_op_tear_off_arguments(Instruction*)
+{
+ JITStubCall(this, cti_op_tear_off_arguments).call();
+}
+
+void JIT::emit_op_new_array(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_new_array);
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_to_primitive(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump isImm = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
+ isImm.link(this);
+
+ if (dst != src)
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_to_primitive), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_to_primitive);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_strcat(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_strcat);
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_loop_if_true(Instruction* currentInstruction)
+{
+ unsigned cond = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitTimeoutCheck();
+
+ emitLoad(cond, regT1, regT0);
+
+ Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
+ addJump(branch32(NotEqual, regT0, Imm32(0)), target + 2);
+ Jump isNotZero = jump();
+
+ isNotInteger.link(this);
+
+ addJump(branch32(Equal, regT1, Imm32(JSValue::TrueTag)), target + 2);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::FalseTag)));
+
+ isNotZero.link(this);
+}
+
+void JIT::emitSlow_op_loop_if_true(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned cond = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_jtrue);
+ stubCall.addArgument(cond);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 2);
+}
+
+void JIT::emit_op_resolve_base(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve_base);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve_skip);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain()));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_global(Instruction* currentInstruction)
+{
+ // FIXME: Optimize to use patching instead of so many memory accesses.
+
+ unsigned dst = currentInstruction[1].u.operand;
+ void* globalObject = currentInstruction[2].u.jsCell;
+
+ unsigned currentIndex = m_globalResolveInfoIndex++;
+ void* structureAddress = &(m_codeBlock->globalResolveInfo(currentIndex).structure);
+ void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset);
+
+ // Verify structure.
+ move(ImmPtr(globalObject), regT0);
+ loadPtr(structureAddress, regT1);
+ addSlowCase(branchPtr(NotEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure))));
+
+ // Load property.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSGlobalObject, m_externalStorage)), regT2);
+ load32(offsetAddr, regT3);
+ load32(BaseIndex(regT2, regT3, TimesEight), regT0); // payload
+ load32(BaseIndex(regT2, regT3, TimesEight, 4), regT1); // tag
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_resolve_global), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ void* globalObject = currentInstruction[2].u.jsCell;
+ Identifier* ident = &m_codeBlock->identifier(currentInstruction[3].u.operand);
+
+ unsigned currentIndex = m_globalResolveInfoIndex++;
+
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_resolve_global);
+ stubCall.addArgument(ImmPtr(globalObject));
+ stubCall.addArgument(ImmPtr(ident));
+ stubCall.addArgument(Imm32(currentIndex));
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_not(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoadTag(src, regT0);
+
+ xor32(Imm32(JSValue::FalseTag), regT0);
+ addSlowCase(branchTest32(NonZero, regT0, Imm32(~1)));
+ xor32(Imm32(JSValue::TrueTag), regT0);
+
+ emitStoreBool(dst, regT0, (dst == src));
+}
+
+void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_not);
+ stubCall.addArgument(src);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_jfalse(Instruction* currentInstruction)
+{
+ unsigned cond = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitLoad(cond, regT1, regT0);
+
+ Jump isTrue = branch32(Equal, regT1, Imm32(JSValue::TrueTag));
+ addJump(branch32(Equal, regT1, Imm32(JSValue::FalseTag)), target + 2);
+
+ Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
+ Jump isTrue2 = branch32(NotEqual, regT0, Imm32(0));
+ addJump(jump(), target + 2);
+
+ isNotInteger.link(this);
+
+ addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+
+ zeroDouble(fpRegT0);
+ emitLoadDouble(cond, fpRegT1);
+ addJump(branchDouble(DoubleEqual, fpRegT0, fpRegT1), target + 2);
+
+ isTrue.link(this);
+ isTrue2.link(this);
+}
+
+void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned cond = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_jtrue);
+ stubCall.addArgument(cond);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target + 2); // Inverted.
+}
+
+void JIT::emit_op_jtrue(Instruction* currentInstruction)
+{
+ unsigned cond = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitLoad(cond, regT1, regT0);
+
+ Jump isFalse = branch32(Equal, regT1, Imm32(JSValue::FalseTag));
+ addJump(branch32(Equal, regT1, Imm32(JSValue::TrueTag)), target + 2);
+
+ Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
+ Jump isFalse2 = branch32(Equal, regT0, Imm32(0));
+ addJump(jump(), target + 2);
+
+ isNotInteger.link(this);
+
+ addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+
+ zeroDouble(fpRegT0);
+ emitLoadDouble(cond, fpRegT1);
+ addJump(branchDouble(DoubleNotEqual, fpRegT0, fpRegT1), target + 2);
+
+ isFalse.link(this);
+ isFalse2.link(this);
+}
+
+void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned cond = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_jtrue);
+ stubCall.addArgument(cond);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 2);
+}
+
+void JIT::emit_op_jeq_null(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+
+ // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ addJump(branchTest32(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
+
+ Jump wasNotImmediate = jump();
+
+ // Now handle the immediate cases - undefined & null
+ isImmediate.link(this);
+
+ set32(Equal, regT1, Imm32(JSValue::NullTag), regT2);
+ set32(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1);
+ or32(regT2, regT1);
+
+ addJump(branchTest32(NonZero, regT1), target + 2);
+
+ wasNotImmediate.link(this);
+}
+
+void JIT::emit_op_jneq_null(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+
+ // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ addJump(branchTest32(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
+
+ Jump wasNotImmediate = jump();
+
+ // Now handle the immediate cases - undefined & null
+ isImmediate.link(this);
+
+ set32(Equal, regT1, Imm32(JSValue::NullTag), regT2);
+ set32(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1);
+ or32(regT2, regT1);
+
+ addJump(branchTest32(Zero, regT1), target + 2);
+
+ wasNotImmediate.link(this);
+}
+
+void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ JSCell* ptr = currentInstruction[2].u.jsCell;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emitLoad(src, regT1, regT0);
+ addJump(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)), target + 3);
+ addJump(branchPtr(NotEqual, regT0, ImmPtr(ptr)), target + 3);
+}
+
+void JIT::emit_op_jsr(Instruction* currentInstruction)
+{
+ int retAddrDst = currentInstruction[1].u.operand;
+ int target = currentInstruction[2].u.operand;
+ DataLabelPtr storeLocation = storePtrWithPatch(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst));
+ addJump(jump(), target + 2);
+ m_jsrSites.append(JSRInfo(storeLocation, label()));
+}
+
+void JIT::emit_op_sret(Instruction* currentInstruction)
+{
+ jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand));
+}
+
+void JIT::emit_op_eq(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, regT3));
+ addSlowCase(branch32(Equal, regT1, Imm32(JSValue::CellTag)));
+ addSlowCase(branch32(Below, regT1, Imm32(JSValue::LowestTag)));
+
+ set8(Equal, regT0, regT2, regT0);
+ or32(Imm32(JSValue::FalseTag), regT0);
+
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ JumpList storeResult;
+ JumpList genericCase;
+
+ genericCase.append(getSlowCase(iter)); // tags not equal
+
+ linkSlowCase(iter); // tags equal and JSCell
+ genericCase.append(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
+ genericCase.append(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsStringVPtr)));
+
+ // String case.
+ JITStubCall stubCallEqStrings(this, cti_op_eq_strings);
+ stubCallEqStrings.addArgument(regT0);
+ stubCallEqStrings.addArgument(regT2);
+ stubCallEqStrings.call();
+ storeResult.append(jump());
+
+ // Generic case.
+ genericCase.append(getSlowCase(iter)); // doubles
+ genericCase.link(this);
+ JITStubCall stubCallEq(this, cti_op_eq);
+ stubCallEq.addArgument(op1);
+ stubCallEq.addArgument(op2);
+ stubCallEq.call(regT0);
+
+ storeResult.link(this);
+ or32(Imm32(JSValue::FalseTag), regT0);
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emit_op_neq(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, regT3));
+ addSlowCase(branch32(Equal, regT1, Imm32(JSValue::CellTag)));
+ addSlowCase(branch32(Below, regT1, Imm32(JSValue::LowestTag)));
+
+ set8(NotEqual, regT0, regT2, regT0);
+ or32(Imm32(JSValue::FalseTag), regT0);
+
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ JumpList storeResult;
+ JumpList genericCase;
+
+ genericCase.append(getSlowCase(iter)); // tags not equal
+
+ linkSlowCase(iter); // tags equal and JSCell
+ genericCase.append(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
+ genericCase.append(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsStringVPtr)));
+
+ // String case.
+ JITStubCall stubCallEqStrings(this, cti_op_eq_strings);
+ stubCallEqStrings.addArgument(regT0);
+ stubCallEqStrings.addArgument(regT2);
+ stubCallEqStrings.call(regT0);
+ storeResult.append(jump());
+
+ // Generic case.
+ genericCase.append(getSlowCase(iter)); // doubles
+ genericCase.link(this);
+ JITStubCall stubCallEq(this, cti_op_eq);
+ stubCallEq.addArgument(regT1, regT0);
+ stubCallEq.addArgument(regT3, regT2);
+ stubCallEq.call(regT0);
+
+ storeResult.link(this);
+ xor32(Imm32(0x1), regT0);
+ or32(Imm32(JSValue::FalseTag), regT0);
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ emitLoadTag(src1, regT0);
+ emitLoadTag(src2, regT1);
+
+ // Jump to a slow case if either operand is double, or if both operands are
+ // cells and/or Int32s.
+ move(regT0, regT2);
+ and32(regT1, regT2);
+ addSlowCase(branch32(Below, regT2, Imm32(JSValue::LowestTag)));
+ addSlowCase(branch32(AboveOrEqual, regT2, Imm32(JSValue::CellTag)));
+
+ if (type == OpStrictEq)
+ set8(Equal, regT0, regT1, regT0);
+ else
+ set8(NotEqual, regT0, regT1, regT0);
+
+ or32(Imm32(JSValue::FalseTag), regT0);
+
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emit_op_stricteq(Instruction* currentInstruction)
+{
+ compileOpStrictEq(currentInstruction, OpStrictEq);
+}
+
+void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_stricteq);
+ stubCall.addArgument(src1);
+ stubCall.addArgument(src2);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_nstricteq(Instruction* currentInstruction)
+{
+ compileOpStrictEq(currentInstruction, OpNStrictEq);
+}
+
+void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_nstricteq);
+ stubCall.addArgument(src1);
+ stubCall.addArgument(src2);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_eq_null(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+ Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1);
+ setTest8(NonZero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT1);
+
+ Jump wasNotImmediate = jump();
+
+ isImmediate.link(this);
+
+ set8(Equal, regT1, Imm32(JSValue::NullTag), regT2);
+ set8(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1);
+ or32(regT2, regT1);
+
+ wasNotImmediate.link(this);
+
+ or32(Imm32(JSValue::FalseTag), regT1);
+
+ emitStoreBool(dst, regT1);
+}
+
+void JIT::emit_op_neq_null(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+ Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1);
+ setTest8(Zero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT1);
+
+ Jump wasNotImmediate = jump();
+
+ isImmediate.link(this);
+
+ set8(NotEqual, regT1, Imm32(JSValue::NullTag), regT2);
+ set8(NotEqual, regT1, Imm32(JSValue::UndefinedTag), regT1);
+ and32(regT2, regT1);
+
+ wasNotImmediate.link(this);
+
+ or32(Imm32(JSValue::FalseTag), regT1);
+
+ emitStoreBool(dst, regT1);
+}
+
+void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve_with_base);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.call(currentInstruction[2].u.operand);
+}
+
+void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_new_func_exp);
+ stubCall.addArgument(ImmPtr(m_codeBlock->functionExpr(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_new_regexp(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_new_regexp);
+ stubCall.addArgument(ImmPtr(m_codeBlock->regexp(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_throw(Instruction* currentInstruction)
+{
+ unsigned exception = currentInstruction[1].u.operand;
+ JITStubCall stubCall(this, cti_op_throw);
+ stubCall.addArgument(exception);
+ stubCall.call();
+
+#ifndef NDEBUG
+ // cti_op_throw always changes it's return address,
+ // this point in the code should never be reached.
+ breakpoint();
+#endif
+}
+
+void JIT::emit_op_next_pname(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int iter = currentInstruction[2].u.operand;
+ int target = currentInstruction[3].u.operand;
+
+ load32(Address(callFrameRegister, (iter * sizeof(Register))), regT0);
+
+ JITStubCall stubCall(this, cti_op_next_pname);
+ stubCall.addArgument(regT0);
+ stubCall.call();
+
+ Jump endOfIter = branchTestPtr(Zero, regT0);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_next_pname), dst, regT1, regT0);
+ addJump(jump(), target + 3);
+ endOfIter.link(this);
+}
+
+void JIT::emit_op_push_scope(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_push_scope);
+ stubCall.addArgument(currentInstruction[1].u.operand);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_pop_scope(Instruction*)
+{
+ JITStubCall(this, cti_op_pop_scope).call();
+}
+
+void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump isInt32 = branch32(Equal, regT1, Imm32(JSValue::Int32Tag));
+ addSlowCase(branch32(AboveOrEqual, regT1, Imm32(JSValue::DeletedValueTag)));
+ isInt32.link(this);
+
+ if (src != dst)
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_to_jsnumber), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_to_jsnumber);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_push_new_scope(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_push_new_scope);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(currentInstruction[3].u.operand);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_catch(Instruction* currentInstruction)
+{
+ unsigned exception = currentInstruction[1].u.operand;
+
+ // This opcode only executes after a return from cti_op_throw.
+
+ // cti_op_throw may have taken us to a call frame further up the stack; reload
+ // the call frame pointer to adjust.
+ peek(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
+
+ // Now store the exception returned by cti_op_throw.
+ emitStore(exception, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_catch), exception, regT1, regT0);
+#ifdef QT_BUILD_SCRIPT_LIB
+ JITStubCall stubCall(this, cti_op_debug_catch);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.call();
+#endif
+}
+
+void JIT::emit_op_jmp_scopes(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_jmp_scopes);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.call();
+ addJump(jump(), currentInstruction[2].u.operand + 2);
+}
+
+void JIT::emit_op_switch_imm(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate));
+ jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+
+ JITStubCall stubCall(this, cti_op_switch_imm);
+ stubCall.addArgument(scrutinee);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_switch_char(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Character));
+ jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+
+ JITStubCall stubCall(this, cti_op_switch_char);
+ stubCall.addArgument(scrutinee);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_switch_string(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset));
+
+ JITStubCall stubCall(this, cti_op_switch_string);
+ stubCall.addArgument(scrutinee);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_new_error(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned type = currentInstruction[2].u.operand;
+ unsigned message = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_new_error);
+ stubCall.addArgument(Imm32(type));
+ stubCall.addArgument(m_codeBlock->getConstant(message));
+ stubCall.addArgument(Imm32(m_bytecodeIndex));
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_debug(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_debug);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[4].u.operand));
+ stubCall.call();
+}
+
+
+void JIT::emit_op_enter(Instruction*)
+{
+ // Even though JIT code doesn't use them, we initialize our constant
+ // registers to zap stale pointers, to avoid unnecessarily prolonging
+ // object lifetime and increasing GC pressure.
+ for (int i = 0; i < m_codeBlock->m_numVars; ++i)
+ emitStore(i, jsUndefined());
+}
+
+void JIT::emit_op_enter_with_activation(Instruction* currentInstruction)
+{
+ emit_op_enter(currentInstruction);
+
+ JITStubCall(this, cti_op_push_activation).call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_create_arguments(Instruction*)
+{
+ Jump argsNotCell = branch32(NotEqual, tagFor(RegisterFile::ArgumentsRegister, callFrameRegister), Imm32(JSValue::CellTag));
+ Jump argsNotNull = branchTestPtr(NonZero, payloadFor(RegisterFile::ArgumentsRegister, callFrameRegister));
+
+ // If we get here the arguments pointer is a null cell - i.e. arguments need lazy creation.
+ if (m_codeBlock->m_numParameters == 1)
+ JITStubCall(this, cti_op_create_arguments_no_params).call();
+ else
+ JITStubCall(this, cti_op_create_arguments).call();
+
+ argsNotCell.link(this);
+ argsNotNull.link(this);
+}
+
+void JIT::emit_op_init_arguments(Instruction*)
+{
+ emitStore(RegisterFile::ArgumentsRegister, JSValue(), callFrameRegister);
+}
+
+void JIT::emit_op_convert_this(Instruction* currentInstruction)
+{
+ unsigned thisRegister = currentInstruction[1].u.operand;
+
+ emitLoad(thisRegister, regT1, regT0);
+
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ addSlowCase(branchTest32(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
+
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_convert_this), thisRegister, regT1, regT0);
+}
+
+void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned thisRegister = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_convert_this);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(thisRegister);
+}
+
+void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
+{
+ peek(regT2, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof (void*));
+ Jump noProfiler = branchTestPtr(Zero, Address(regT2));
+
+ JITStubCall stubCall(this, cti_op_profile_will_call);
+ stubCall.addArgument(currentInstruction[1].u.operand);
+ stubCall.call();
+ noProfiler.link(this);
+}
+
+void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
+{
+ peek(regT2, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof (void*));
+ Jump noProfiler = branchTestPtr(Zero, Address(regT2));
+
+ JITStubCall stubCall(this, cti_op_profile_did_call);
+ stubCall.addArgument(currentInstruction[1].u.operand);
+ stubCall.call();
+ noProfiler.link(this);
+}
+
+#else // USE(JSVALUE32_64)
+
#define RECORD_JUMP_TARGET(targetOffset) \
do { m_labels[m_bytecodeIndex + (targetOffset)].used(); } while (false)
+void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* globalData, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk)
+{
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ // (2) The second function provides fast property access for string length
+ Label stringLengthBegin = align();
+
+ // Check eax is a string
+ Jump string_failureCases1 = emitJumpIfNotJSCell(regT0);
+ Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
+
+ // Checks out okay! - get the length from the Ustring.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSString, m_value) + OBJECT_OFFSETOF(UString, m_rep)), regT0);
+ load32(Address(regT0, OBJECT_OFFSETOF(UString::Rep, len)), regT0);
+
+ Jump string_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImmediateInt));
+
+ // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+
+ ret();
+#endif
+
+ // (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
+ COMPILE_ASSERT(sizeof(CodeType) == 4, CodeTypeEnumMustBe32Bit);
+
+ // VirtualCallLink Trampoline
+ // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
+ Label virtualCallLinkBegin = align();
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+
+ Jump isNativeFunc2 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
+
+ Jump hasCodeBlock2 = branch32(GreaterThan, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
+ preserveReturnAddressAfterCall(regT3);
+ restoreArgumentReference();
+ Call callJSFunction2 = call();
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ emitGetJITStubArg(2, regT1); // argCount
+ restoreReturnAddressBeforeReturn(regT3);
+ hasCodeBlock2.link(this);
+
+ // Check argCount matches callee arity.
+ Jump arityCheckOkay2 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), regT1);
+ preserveReturnAddressAfterCall(regT3);
+ emitPutJITStubArg(regT3, 1); // return address
+ restoreArgumentReference();
+ Call callArityCheck2 = call();
+ move(regT1, callFrameRegister);
+ emitGetJITStubArg(2, regT1); // argCount
+ restoreReturnAddressBeforeReturn(regT3);
+ arityCheckOkay2.link(this);
+
+ isNativeFunc2.link(this);
+
+ compileOpCallInitializeCallFrame();
+ preserveReturnAddressAfterCall(regT3);
+ emitPutJITStubArg(regT3, 1); // return address
+ restoreArgumentReference();
+ Call callLazyLinkCall = call();
+ restoreReturnAddressBeforeReturn(regT3);
+ jump(regT0);
+
+ // VirtualCall Trampoline
+ // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
+ Label virtualCallBegin = align();
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+
+ Jump isNativeFunc3 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
+
+ Jump hasCodeBlock3 = branch32(GreaterThan, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
+ preserveReturnAddressAfterCall(regT3);
+ restoreArgumentReference();
+ Call callJSFunction1 = call();
+ emitGetJITStubArg(2, regT1); // argCount
+ restoreReturnAddressBeforeReturn(regT3);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ hasCodeBlock3.link(this);
+
+ // Check argCount matches callee arity.
+ Jump arityCheckOkay3 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), regT1);
+ preserveReturnAddressAfterCall(regT3);
+ emitPutJITStubArg(regT3, 1); // return address
+ restoreArgumentReference();
+ Call callArityCheck1 = call();
+ move(regT1, callFrameRegister);
+ emitGetJITStubArg(2, regT1); // argCount
+ restoreReturnAddressBeforeReturn(regT3);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ arityCheckOkay3.link(this);
+
+ isNativeFunc3.link(this);
+
+ compileOpCallInitializeCallFrame();
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCode)), regT0);
+ jump(regT0);
+
+ Label nativeCallThunk = align();
+ preserveReturnAddressAfterCall(regT0);
+ emitPutToCallFrameHeader(regT0, RegisterFile::ReturnPC); // Push return address
+
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT1);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT1);
+ emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+
+#if PLATFORM(X86_64)
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, X86Registers::ecx);
+
+ // Allocate stack space for our arglist
+ subPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
+ COMPILE_ASSERT((sizeof(ArgList) & 0xf) == 0, ArgList_should_by_16byte_aligned);
+
+ // Set up arguments
+ subPtr(Imm32(1), X86Registers::ecx); // Don't include 'this' in argcount
+
+ // Push argcount
+ storePtr(X86Registers::ecx, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_argCount)));
+
+ // Calculate the start of the callframe header, and store in edx
+ addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int32_t)sizeof(Register)), callFrameRegister, X86Registers::edx);
+
+ // Calculate start of arguments as callframe header - sizeof(Register) * argcount (ecx)
+ mul32(Imm32(sizeof(Register)), X86Registers::ecx, X86Registers::ecx);
+ subPtr(X86Registers::ecx, X86Registers::edx);
+
+ // push pointer to arguments
+ storePtr(X86Registers::edx, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_args)));
+
+ // ArgList is passed by reference so is stackPointerRegister
+ move(stackPointerRegister, X86Registers::ecx);
+
+ // edx currently points to the first argument, edx-sizeof(Register) points to 'this'
+ loadPtr(Address(X86Registers::edx, -(int32_t)sizeof(Register)), X86Registers::edx);
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::esi);
+
+ move(callFrameRegister, X86Registers::edi);
+
+ call(Address(X86Registers::esi, OBJECT_OFFSETOF(JSFunction, m_data)));
+
+ addPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
+#elif PLATFORM(X86)
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
+
+ /* We have two structs that we use to describe the stackframe we set up for our
+ * call to native code. NativeCallFrameStructure describes the how we set up the stack
+ * in advance of the call. NativeFunctionCalleeSignature describes the callframe
+ * as the native code expects it. We do this as we are using the fastcall calling
+ * convention which results in the callee popping its arguments off the stack, but
+ * not the rest of the callframe so we need a nice way to ensure we increment the
+ * stack pointer by the right amount after the call.
+ */
+#if COMPILER(MSVC) || PLATFORM(LINUX)
+ struct NativeCallFrameStructure {
+ // CallFrame* callFrame; // passed in EDX
+ JSObject* callee;
+ JSValue thisValue;
+ ArgList* argPointer;
+ ArgList args;
+ JSValue result;
+ };
+ struct NativeFunctionCalleeSignature {
+ JSObject* callee;
+ JSValue thisValue;
+ ArgList* argPointer;
+ };
+#else
+ struct NativeCallFrameStructure {
+ // CallFrame* callFrame; // passed in ECX
+ // JSObject* callee; // passed in EDX
+ JSValue thisValue;
+ ArgList* argPointer;
+ ArgList args;
+ };
+ struct NativeFunctionCalleeSignature {
+ JSValue thisValue;
+ ArgList* argPointer;
+ };
+#endif
+ const int NativeCallFrameSize = (sizeof(NativeCallFrameStructure) + 15) & ~15;
+ // Allocate system stack frame
+ subPtr(Imm32(NativeCallFrameSize), stackPointerRegister);
+
+ // Set up arguments
+ subPtr(Imm32(1), regT0); // Don't include 'this' in argcount
+
+ // push argcount
+ storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_argCount)));
+
+ // Calculate the start of the callframe header, and store in regT1
+ addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int)sizeof(Register)), callFrameRegister, regT1);
+
+ // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT0)
+ mul32(Imm32(sizeof(Register)), regT0, regT0);
+ subPtr(regT0, regT1);
+ storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_args)));
+
+ // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
+ addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, args)), stackPointerRegister, regT0);
+ storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, argPointer)));
+
+ // regT1 currently points to the first argument, regT1 - sizeof(Register) points to 'this'
+ loadPtr(Address(regT1, -(int)sizeof(Register)), regT1);
+ storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue)));
+
+#if COMPILER(MSVC) || PLATFORM(LINUX)
+ // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
+ addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, result)), stackPointerRegister, X86Registers::ecx);
+
+ // Plant callee
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::eax);
+ storePtr(X86Registers::eax, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, callee)));
+
+ // Plant callframe
+ move(callFrameRegister, X86Registers::edx);
+
+ call(Address(X86Registers::eax, OBJECT_OFFSETOF(JSFunction, m_data)));
+
+ // JSValue is a non-POD type
+ loadPtr(Address(X86Registers::eax), X86Registers::eax);
+#else
+ // Plant callee
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::edx);
+
+ // Plant callframe
+ move(callFrameRegister, X86Registers::ecx);
+ call(Address(X86Registers::edx, OBJECT_OFFSETOF(JSFunction, m_data)));
+#endif
+
+ // We've put a few temporaries on the stack in addition to the actual arguments
+ // so pull them off now
+ addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister);
+
+#elif PLATFORM(ARM_TRADITIONAL)
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
+
+ // Allocate stack space for our arglist
+ COMPILE_ASSERT((sizeof(ArgList) & 0x7) == 0, ArgList_should_by_8byte_aligned);
+ subPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
+
+ // Set up arguments
+ subPtr(Imm32(1), regT0); // Don't include 'this' in argcount
+
+ // Push argcount
+ storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_argCount)));
+
+ // Calculate the start of the callframe header, and store in regT1
+ move(callFrameRegister, regT1);
+ sub32(Imm32(RegisterFile::CallFrameHeaderSize * (int32_t)sizeof(Register)), regT1);
+
+ // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT1)
+ mul32(Imm32(sizeof(Register)), regT0, regT0);
+ subPtr(regT0, regT1);
+
+ // push pointer to arguments
+ storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_args)));
+
+ // Setup arg3: regT1 currently points to the first argument, regT1-sizeof(Register) points to 'this'
+ loadPtr(Address(regT1, -(int32_t)sizeof(Register)), regT2);
+
+ // Setup arg2:
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT1);
+
+ // Setup arg1:
+ move(callFrameRegister, regT0);
+
+ // Setup arg4: This is a plain hack
+ move(stackPointerRegister, ARMRegisters::S0);
+
+ move(ctiReturnRegister, ARMRegisters::lr);
+ call(Address(regT1, OBJECT_OFFSETOF(JSFunction, m_data)));
+
+ addPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
+
+#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
+#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
+#else
+ breakpoint();
+#endif
+
+ // Check for an exception
+ loadPtr(&(globalData->exception), regT2);
+ Jump exceptionHandler = branchTestPtr(NonZero, regT2);
+
+ // Grab the return address.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+
+ // Restore our caller's "r".
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ // Return.
+ restoreReturnAddressBeforeReturn(regT1);
+ ret();
+
+ // Handle an exception
+ exceptionHandler.link(this);
+ // Grab the return address.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+ move(ImmPtr(&globalData->exceptionLocation), regT2);
+ storePtr(regT1, regT2);
+ move(ImmPtr(reinterpret_cast<void*>(ctiVMThrowTrampoline)), regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
+ restoreReturnAddressBeforeReturn(regT2);
+ ret();
+
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1);
+ Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2);
+ Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
+#endif
+
+ // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
+ LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
+ patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
+ patchBuffer.link(string_failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
+#endif
+ patchBuffer.link(callArityCheck1, FunctionPtr(cti_op_call_arityCheck));
+ patchBuffer.link(callJSFunction1, FunctionPtr(cti_op_call_JSFunction));
+#if ENABLE(JIT_OPTIMIZE_CALL)
+ patchBuffer.link(callArityCheck2, FunctionPtr(cti_op_call_arityCheck));
+ patchBuffer.link(callJSFunction2, FunctionPtr(cti_op_call_JSFunction));
+ patchBuffer.link(callLazyLinkCall, FunctionPtr(cti_vm_lazyLinkCall));
+#endif
+
+ CodeRef finalCode = patchBuffer.finalizeCode();
+ *executablePool = finalCode.m_executablePool;
+
+ *ctiVirtualCallLink = trampolineAt(finalCode, virtualCallLinkBegin);
+ *ctiVirtualCall = trampolineAt(finalCode, virtualCallBegin);
+ *ctiNativeCallThunk = trampolineAt(finalCode, nativeCallThunk);
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ *ctiStringLengthTrampoline = trampolineAt(finalCode, stringLengthBegin);
+#else
+ UNUSED_PARAM(ctiStringLengthTrampoline);
+#endif
+}
+
void JIT::emit_op_mov(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
@@ -62,7 +1820,7 @@ void JIT::emit_op_mov(Instruction* currentInstruction)
void JIT::emit_op_end(Instruction* currentInstruction)
{
if (m_codeBlock->needsFullScopeChain())
- JITStubCall(this, JITStubs::cti_op_end).call();
+ JITStubCall(this, cti_op_end).call();
ASSERT(returnValueRegister != callFrameRegister);
emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
@@ -94,7 +1852,7 @@ void JIT::emit_op_loop_if_less(Instruction* currentInstruction)
if (isOperandConstantImmediateInt(op2)) {
emitGetVirtualRegister(op1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
int32_t op2imm = getConstantOperandImmediateInt(op2);
#else
int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
@@ -103,7 +1861,7 @@ void JIT::emit_op_loop_if_less(Instruction* currentInstruction)
} else if (isOperandConstantImmediateInt(op1)) {
emitGetVirtualRegister(op2, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
int32_t op1imm = getConstantOperandImmediateInt(op1);
#else
int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
@@ -127,7 +1885,7 @@ void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
if (isOperandConstantImmediateInt(op2)) {
emitGetVirtualRegister(op1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
int32_t op2imm = getConstantOperandImmediateInt(op2);
#else
int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
@@ -143,7 +1901,7 @@ void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
void JIT::emit_op_new_object(Instruction* currentInstruction)
{
- JITStubCall(this, JITStubs::cti_op_new_object).call(currentInstruction[1].u.operand);
+ JITStubCall(this, cti_op_new_object).call(currentInstruction[1].u.operand);
}
void JIT::emit_op_instanceof(Instruction* currentInstruction)
@@ -197,8 +1955,8 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
void JIT::emit_op_new_func(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_new_func);
- stubCall.addArgument(ImmPtr(m_codeBlock->function(currentInstruction[2].u.operand)));
+ JITStubCall stubCall(this, cti_op_new_func);
+ stubCall.addArgument(ImmPtr(m_codeBlock->functionDecl(currentInstruction[2].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -214,9 +1972,14 @@ void JIT::emit_op_call_eval(Instruction* currentInstruction)
void JIT::emit_op_load_varargs(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_load_varargs);
- stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
- stubCall.call(currentInstruction[1].u.operand);
+ int argCountDst = currentInstruction[1].u.operand;
+ int argsOffset = currentInstruction[2].u.operand;
+
+ JITStubCall stubCall(this, cti_op_load_varargs);
+ stubCall.addArgument(Imm32(argsOffset));
+ stubCall.call();
+ // Stores a naked int32 in the register file.
+ store32(returnValueRegister, Address(callFrameRegister, argCountDst * sizeof(Register)));
}
void JIT::emit_op_call_varargs(Instruction* currentInstruction)
@@ -273,26 +2036,26 @@ void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_tear_off_activation);
+ JITStubCall stubCall(this, cti_op_tear_off_activation);
stubCall.addArgument(currentInstruction[1].u.operand, regT2);
stubCall.call();
}
void JIT::emit_op_tear_off_arguments(Instruction*)
{
- JITStubCall(this, JITStubs::cti_op_tear_off_arguments).call();
+ JITStubCall(this, cti_op_tear_off_arguments).call();
}
void JIT::emit_op_ret(Instruction* currentInstruction)
{
#ifdef QT_BUILD_SCRIPT_LIB
- JITStubCall stubCall(this, JITStubs::cti_op_debug_return);
+ JITStubCall stubCall(this, cti_op_debug_return);
stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
stubCall.call();
#endif
// We could JIT generate the deref, only calling out to C when the refcount hits zero.
if (m_codeBlock->needsFullScopeChain())
- JITStubCall(this, JITStubs::cti_op_ret_scopeChain).call();
+ JITStubCall(this, cti_op_ret_scopeChain).call();
ASSERT(callFrameRegister != regT1);
ASSERT(regT1 != returnValueRegister);
@@ -314,7 +2077,7 @@ void JIT::emit_op_ret(Instruction* currentInstruction)
void JIT::emit_op_new_array(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_new_array);
+ JITStubCall stubCall(this, cti_op_new_array);
stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
stubCall.call(currentInstruction[1].u.operand);
@@ -322,7 +2085,7 @@ void JIT::emit_op_new_array(Instruction* currentInstruction)
void JIT::emit_op_resolve(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_resolve);
+ JITStubCall stubCall(this, cti_op_resolve);
stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -355,20 +2118,12 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction)
void JIT::emit_op_strcat(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_strcat);
+ JITStubCall stubCall(this, cti_op_strcat);
stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
stubCall.call(currentInstruction[1].u.operand);
}
-void JIT::emit_op_resolve_func(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, JITStubs::cti_op_resolve_func);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
- stubCall.call(currentInstruction[2].u.operand);
-}
-
void JIT::emit_op_loop_if_true(Instruction* currentInstruction)
{
emitTimeoutCheck();
@@ -386,14 +2141,14 @@ void JIT::emit_op_loop_if_true(Instruction* currentInstruction)
};
void JIT::emit_op_resolve_base(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_resolve_base);
+ JITStubCall stubCall(this, cti_op_resolve_base);
stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
}
void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_resolve_skip);
+ JITStubCall stubCall(this, cti_op_resolve_skip);
stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.addArgument(Imm32(currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain()));
stubCall.call(currentInstruction[1].u.operand);
@@ -424,7 +2179,7 @@ void JIT::emit_op_resolve_global(Instruction* currentInstruction)
// Slow case
noMatch.link(this);
- JITStubCall stubCall(this, JITStubs::cti_op_resolve_global);
+ JITStubCall stubCall(this, cti_op_resolve_global);
stubCall.addArgument(ImmPtr(globalObject));
stubCall.addArgument(ImmPtr(ident));
stubCall.addArgument(Imm32(currentIndex));
@@ -540,7 +2295,7 @@ void JIT::emit_op_bitnot(Instruction* currentInstruction)
{
emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
not32(regT0);
emitFastArithIntToImmNoCheck(regT0, regT0);
#else
@@ -551,7 +2306,7 @@ void JIT::emit_op_bitnot(Instruction* currentInstruction)
void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_resolve_with_base);
+ JITStubCall stubCall(this, cti_op_resolve_with_base);
stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
stubCall.call(currentInstruction[2].u.operand);
@@ -559,8 +2314,8 @@ void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_new_func_exp);
- stubCall.addArgument(ImmPtr(m_codeBlock->functionExpression(currentInstruction[2].u.operand)));
+ JITStubCall stubCall(this, cti_op_new_func_exp);
+ stubCall.addArgument(ImmPtr(m_codeBlock->functionExpr(currentInstruction[2].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -601,7 +2356,7 @@ void JIT::emit_op_bitxor(Instruction* currentInstruction)
void JIT::emit_op_new_regexp(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_new_regexp);
+ JITStubCall stubCall(this, cti_op_new_regexp);
stubCall.addArgument(ImmPtr(m_codeBlock->regexp(currentInstruction[2].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -616,7 +2371,7 @@ void JIT::emit_op_bitor(Instruction* currentInstruction)
void JIT::emit_op_throw(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_throw);
+ JITStubCall stubCall(this, cti_op_throw);
stubCall.addArgument(currentInstruction[1].u.operand, regT2);
stubCall.call();
ASSERT(regT0 == returnValueRegister);
@@ -629,7 +2384,7 @@ void JIT::emit_op_throw(Instruction* currentInstruction)
void JIT::emit_op_next_pname(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_next_pname);
+ JITStubCall stubCall(this, cti_op_next_pname);
stubCall.addArgument(currentInstruction[2].u.operand, regT2);
stubCall.call();
Jump endOfIter = branchTestPtr(Zero, regT0);
@@ -640,14 +2395,37 @@ void JIT::emit_op_next_pname(Instruction* currentInstruction)
void JIT::emit_op_push_scope(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_push_scope);
+ JITStubCall stubCall(this, cti_op_push_scope);
stubCall.addArgument(currentInstruction[1].u.operand, regT2);
stubCall.call(currentInstruction[1].u.operand);
}
void JIT::emit_op_pop_scope(Instruction*)
{
- JITStubCall(this, JITStubs::cti_op_pop_scope).call();
+ JITStubCall(this, cti_op_pop_scope).call();
+}
+
+void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegisters(src1, regT0, src2, regT1);
+
+ // Jump to a slow case if either operand is a number, or if both are JSCell*s.
+ move(regT0, regT2);
+ orPtr(regT1, regT2);
+ addSlowCase(emitJumpIfJSCell(regT2));
+ addSlowCase(emitJumpIfImmediateNumber(regT2));
+
+ if (type == OpStrictEq)
+ set32(Equal, regT1, regT0, regT0);
+ else
+ set32(NotEqual, regT1, regT0, regT0);
+ emitTagAsBoolImmediate(regT0);
+
+ emitPutVirtualRegister(dst);
}
void JIT::emit_op_stricteq(Instruction* currentInstruction)
@@ -678,7 +2456,7 @@ void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
void JIT::emit_op_push_new_scope(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_push_new_scope);
+ JITStubCall stubCall(this, cti_op_push_new_scope);
stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.addArgument(currentInstruction[3].u.operand, regT2);
stubCall.call(currentInstruction[1].u.operand);
@@ -690,7 +2468,7 @@ void JIT::emit_op_catch(Instruction* currentInstruction)
peek(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
emitPutVirtualRegister(currentInstruction[1].u.operand);
#ifdef QT_BUILD_SCRIPT_LIB
- JITStubCall stubCall(this, JITStubs::cti_op_debug_catch);
+ JITStubCall stubCall(this, cti_op_debug_catch);
stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
stubCall.call();
#endif
@@ -698,7 +2476,7 @@ void JIT::emit_op_catch(Instruction* currentInstruction)
void JIT::emit_op_jmp_scopes(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_jmp_scopes);
+ JITStubCall stubCall(this, cti_op_jmp_scopes);
stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
stubCall.call();
addJump(jump(), currentInstruction[2].u.operand + 2);
@@ -716,7 +2494,7 @@ void JIT::emit_op_switch_imm(Instruction* currentInstruction)
m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate));
jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
- JITStubCall stubCall(this, JITStubs::cti_op_switch_imm);
+ JITStubCall stubCall(this, cti_op_switch_imm);
stubCall.addArgument(scrutinee, regT2);
stubCall.addArgument(Imm32(tableIndex));
stubCall.call();
@@ -734,7 +2512,7 @@ void JIT::emit_op_switch_char(Instruction* currentInstruction)
m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Character));
jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
- JITStubCall stubCall(this, JITStubs::cti_op_switch_char);
+ JITStubCall stubCall(this, cti_op_switch_char);
stubCall.addArgument(scrutinee, regT2);
stubCall.addArgument(Imm32(tableIndex));
stubCall.call();
@@ -751,7 +2529,7 @@ void JIT::emit_op_switch_string(Instruction* currentInstruction)
StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset));
- JITStubCall stubCall(this, JITStubs::cti_op_switch_string);
+ JITStubCall stubCall(this, cti_op_switch_string);
stubCall.addArgument(scrutinee, regT2);
stubCall.addArgument(Imm32(tableIndex));
stubCall.call();
@@ -760,7 +2538,7 @@ void JIT::emit_op_switch_string(Instruction* currentInstruction)
void JIT::emit_op_new_error(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_new_error);
+ JITStubCall stubCall(this, cti_op_new_error);
stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
stubCall.addArgument(ImmPtr(JSValue::encode(m_codeBlock->getConstant(currentInstruction[3].u.operand))));
stubCall.addArgument(Imm32(m_bytecodeIndex));
@@ -769,7 +2547,7 @@ void JIT::emit_op_new_error(Instruction* currentInstruction)
void JIT::emit_op_debug(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_debug);
+ JITStubCall stubCall(this, cti_op_debug);
stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
@@ -847,16 +2625,16 @@ void JIT::emit_op_enter_with_activation(Instruction* currentInstruction)
for (size_t j = 0; j < count; ++j)
emitInitRegister(j);
- JITStubCall(this, JITStubs::cti_op_push_activation).call(currentInstruction[1].u.operand);
+ JITStubCall(this, cti_op_push_activation).call(currentInstruction[1].u.operand);
}
void JIT::emit_op_create_arguments(Instruction*)
{
Jump argsCreated = branchTestPtr(NonZero, Address(callFrameRegister, sizeof(Register) * RegisterFile::ArgumentsRegister));
if (m_codeBlock->m_numParameters == 1)
- JITStubCall(this, JITStubs::cti_op_create_arguments_no_params).call();
+ JITStubCall(this, cti_op_create_arguments_no_params).call();
else
- JITStubCall(this, JITStubs::cti_op_create_arguments).call();
+ JITStubCall(this, cti_op_create_arguments).call();
argsCreated.link(this);
}
@@ -880,7 +2658,7 @@ void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
peek(regT1, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof (void*));
Jump noProfiler = branchTestPtr(Zero, Address(regT1));
- JITStubCall stubCall(this, JITStubs::cti_op_profile_will_call);
+ JITStubCall stubCall(this, cti_op_profile_will_call);
stubCall.addArgument(currentInstruction[1].u.operand, regT1);
stubCall.call();
noProfiler.link(this);
@@ -892,7 +2670,7 @@ void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
peek(regT1, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof (void*));
Jump noProfiler = branchTestPtr(Zero, Address(regT1));
- JITStubCall stubCall(this, JITStubs::cti_op_profile_did_call);
+ JITStubCall stubCall(this, cti_op_profile_did_call);
stubCall.addArgument(currentInstruction[1].u.operand, regT1);
stubCall.call();
noProfiler.link(this);
@@ -905,7 +2683,7 @@ void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowC
{
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_convert_this);
+ JITStubCall stubCall(this, cti_op_convert_this);
stubCall.addArgument(regT0);
stubCall.call(currentInstruction[1].u.operand);
}
@@ -922,7 +2700,7 @@ void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowC
{
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_to_primitive);
+ JITStubCall stubCall(this, cti_op_to_primitive);
stubCall.addArgument(regT0);
stubCall.call(currentInstruction[1].u.operand);
}
@@ -938,7 +2716,7 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
emitFastArithIntToImmNoCheck(regT1, regT1);
notImm.link(this);
- JITStubCall stubCall(this, JITStubs::cti_op_get_by_val);
+ JITStubCall stubCall(this, cti_op_get_by_val);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.call(currentInstruction[1].u.operand);
@@ -964,14 +2742,14 @@ void JIT::emitSlow_op_loop_if_less(Instruction* currentInstruction, Vector<SlowC
unsigned target = currentInstruction[3].u.operand;
if (isOperandConstantImmediateInt(op2)) {
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_loop_if_less);
+ JITStubCall stubCall(this, cti_op_loop_if_less);
stubCall.addArgument(regT0);
stubCall.addArgument(op2, regT2);
stubCall.call();
emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
} else if (isOperandConstantImmediateInt(op1)) {
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_loop_if_less);
+ JITStubCall stubCall(this, cti_op_loop_if_less);
stubCall.addArgument(op1, regT2);
stubCall.addArgument(regT0);
stubCall.call();
@@ -979,7 +2757,7 @@ void JIT::emitSlow_op_loop_if_less(Instruction* currentInstruction, Vector<SlowC
} else {
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_loop_if_less);
+ JITStubCall stubCall(this, cti_op_loop_if_less);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.call();
@@ -993,7 +2771,7 @@ void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<Slo
unsigned target = currentInstruction[3].u.operand;
if (isOperandConstantImmediateInt(op2)) {
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_loop_if_lesseq);
+ JITStubCall stubCall(this, cti_op_loop_if_lesseq);
stubCall.addArgument(regT0);
stubCall.addArgument(currentInstruction[2].u.operand, regT2);
stubCall.call();
@@ -1001,7 +2779,7 @@ void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<Slo
} else {
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_loop_if_lesseq);
+ JITStubCall stubCall(this, cti_op_loop_if_lesseq);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.call();
@@ -1018,7 +2796,7 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
emitFastArithIntToImmNoCheck(regT1, regT1);
notImm.link(this); {
- JITStubCall stubCall(this, JITStubs::cti_op_put_by_val);
+ JITStubCall stubCall(this, cti_op_put_by_val);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.addArgument(currentInstruction[3].u.operand, regT2);
@@ -1029,7 +2807,7 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
// slow cases for immediate int accesses to arrays
linkSlowCase(iter);
linkSlowCase(iter); {
- JITStubCall stubCall(this, JITStubs::cti_op_put_by_val_array);
+ JITStubCall stubCall(this, cti_op_put_by_val_array);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.addArgument(currentInstruction[3].u.operand, regT2);
@@ -1040,7 +2818,7 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
void JIT::emitSlow_op_loop_if_true(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_jtrue);
+ JITStubCall stubCall(this, cti_op_jtrue);
stubCall.addArgument(regT0);
stubCall.call();
emitJumpSlowToHot(branchTest32(NonZero, regT0), currentInstruction[2].u.operand + 2);
@@ -1050,7 +2828,7 @@ void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>
{
linkSlowCase(iter);
xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), regT0);
- JITStubCall stubCall(this, JITStubs::cti_op_not);
+ JITStubCall stubCall(this, cti_op_not);
stubCall.addArgument(regT0);
stubCall.call(currentInstruction[1].u.operand);
}
@@ -1058,7 +2836,7 @@ void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>
void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_jtrue);
+ JITStubCall stubCall(this, cti_op_jtrue);
stubCall.addArgument(regT0);
stubCall.call();
emitJumpSlowToHot(branchTest32(Zero, regT0), currentInstruction[2].u.operand + 2); // inverted!
@@ -1067,7 +2845,7 @@ void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEnt
void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_bitnot);
+ JITStubCall stubCall(this, cti_op_bitnot);
stubCall.addArgument(regT0);
stubCall.call(currentInstruction[1].u.operand);
}
@@ -1075,7 +2853,7 @@ void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEnt
void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_jtrue);
+ JITStubCall stubCall(this, cti_op_jtrue);
stubCall.addArgument(regT0);
stubCall.call();
emitJumpSlowToHot(branchTest32(NonZero, regT0), currentInstruction[2].u.operand + 2);
@@ -1084,7 +2862,7 @@ void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntr
void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_bitxor);
+ JITStubCall stubCall(this, cti_op_bitxor);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.call(currentInstruction[1].u.operand);
@@ -1093,7 +2871,7 @@ void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEnt
void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_bitor);
+ JITStubCall stubCall(this, cti_op_bitor);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.call(currentInstruction[1].u.operand);
@@ -1102,26 +2880,31 @@ void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntr
void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_eq);
+ JITStubCall stubCall(this, cti_op_eq);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
- stubCall.call(currentInstruction[1].u.operand);
+ stubCall.call();
+ emitTagAsBoolImmediate(regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
}
void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_neq);
+ JITStubCall stubCall(this, cti_op_eq);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
- stubCall.call(currentInstruction[1].u.operand);
+ stubCall.call();
+ xor32(Imm32(0x1), regT0);
+ emitTagAsBoolImmediate(regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
}
void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_stricteq);
+ JITStubCall stubCall(this, cti_op_stricteq);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.call(currentInstruction[1].u.operand);
@@ -1131,7 +2914,7 @@ void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCase
{
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_nstricteq);
+ JITStubCall stubCall(this, cti_op_nstricteq);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.call(currentInstruction[1].u.operand);
@@ -1144,7 +2927,7 @@ void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCas
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_instanceof);
+ JITStubCall stubCall(this, cti_op_instanceof);
stubCall.addArgument(currentInstruction[2].u.operand, regT2);
stubCall.addArgument(currentInstruction[3].u.operand, regT2);
stubCall.addArgument(currentInstruction[4].u.operand, regT2);
@@ -1176,11 +2959,12 @@ void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCa
linkSlowCaseIfNotJSCell(iter, currentInstruction[2].u.operand);
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_to_jsnumber);
+ JITStubCall stubCall(this, cti_op_to_jsnumber);
stubCall.addArgument(regT0);
stubCall.call(currentInstruction[1].u.operand);
}
+#endif // USE(JSVALUE32_64)
} // namespace JSC
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITPropertyAccess.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITPropertyAccess.cpp
index c1e5c29..08b3096 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITPropertyAccess.cpp
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -47,11 +47,920 @@ using namespace std;
namespace JSC {
+#if USE(JSVALUE32_64)
+
+void JIT::emit_op_put_by_index(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_put_by_index);
+ stubCall.addArgument(base);
+ stubCall.addArgument(Imm32(property));
+ stubCall.addArgument(value);
+ stubCall.call();
+}
+
+void JIT::emit_op_put_getter(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned function = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_put_getter);
+ stubCall.addArgument(base);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
+ stubCall.addArgument(function);
+ stubCall.call();
+}
+
+void JIT::emit_op_put_setter(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned function = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_put_setter);
+ stubCall.addArgument(base);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
+ stubCall.addArgument(function);
+ stubCall.call();
+}
+
+void JIT::emit_op_del_by_id(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_del_by_id);
+ stubCall.addArgument(base);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
+ stubCall.call(dst);
+}
+
+
+#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
+
+// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
+void JIT::emit_op_method_check(Instruction*) {}
+void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
+#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
+#error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
+#endif
+
+void JIT::emit_op_get_by_val(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(base);
+ stubCall.addArgument(property);
+ stubCall.call(dst);
+}
+
+void JIT::emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ ASSERT_NOT_REACHED();
+}
+
+void JIT::emit_op_put_by_val(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_put_by_val);
+ stubCall.addArgument(base);
+ stubCall.addArgument(property);
+ stubCall.addArgument(value);
+ stubCall.call();
+}
+
+void JIT::emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ ASSERT_NOT_REACHED();
+}
+
+void JIT::emit_op_get_by_id(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int ident = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_get_by_id_generic);
+ stubCall.addArgument(base);
+ stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
+ stubCall.call(dst);
+
+ m_propertyAccessInstructionIndex++;
+}
+
+void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ m_propertyAccessInstructionIndex++;
+ ASSERT_NOT_REACHED();
+}
+
+void JIT::emit_op_put_by_id(Instruction* currentInstruction)
+{
+ int base = currentInstruction[1].u.operand;
+ int ident = currentInstruction[2].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_put_by_id_generic);
+ stubCall.addArgument(base);
+ stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
+ stubCall.addArgument(value);
+ stubCall.call();
+
+ m_propertyAccessInstructionIndex++;
+}
+
+void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ m_propertyAccessInstructionIndex++;
+ ASSERT_NOT_REACHED();
+}
+
+#else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
+
+#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
+
+void JIT::emit_op_method_check(Instruction* currentInstruction)
+{
+ // Assert that the following instruction is a get_by_id.
+ ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
+
+ currentInstruction += OPCODE_LENGTH(op_method_check);
+
+ // Do the method check - check the object & its prototype's structure inline (this is the common case).
+ m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
+ MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
+
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+
+ emitLoad(base, regT1, regT0);
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+
+ Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT2);
+ Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+
+ // This will be relinked to load the function without doing a load.
+ DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
+ move(Imm32(JSValue::CellTag), regT1);
+ Jump match = jump();
+
+ ASSERT(differenceBetween(info.structureToCompare, protoObj) == patchOffsetMethodCheckProtoObj);
+ ASSERT(differenceBetween(info.structureToCompare, protoStructureToCompare) == patchOffsetMethodCheckProtoStruct);
+ ASSERT(differenceBetween(info.structureToCompare, putFunction) == patchOffsetMethodCheckPutFunction);
+
+ // Link the failure cases here.
+ structureCheck.link(this);
+ protoStructureCheck.link(this);
+
+ // Do a regular(ish) get_by_id (the slow case will be link to
+ // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
+ compileGetByIdHotPath();
+
+ match.link(this);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_method_check), dst, regT1, regT0);
+
+ // We've already generated the following get_by_id, so make sure it's skipped over.
+ m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
+}
+
+void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ currentInstruction += OPCODE_LENGTH(op_method_check);
+
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int ident = currentInstruction[3].u.operand;
+
+ compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter, true);
+
+ // We've already generated the following get_by_id, so make sure it's skipped over.
+ m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
+}
+
+#else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
+
+// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
+void JIT::emit_op_method_check(Instruction*) {}
+void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
+
+#endif
+
+void JIT::emit_op_get_by_val(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ emitLoad2(base, regT1, regT0, property, regT3, regT2);
+
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
+ addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff))));
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT0);
+ load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), regT1); // tag
+ load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); // payload
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ // The slow void JIT::emitSlow_that handles accesses to arrays (below) may jump back up to here.
+ Label callGetByValJITStub(this);
+
+ linkSlowCase(iter); // property int32 check
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ linkSlowCase(iter); // base array check
+
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(base);
+ stubCall.addArgument(property);
+ stubCall.call(dst);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
+
+ linkSlowCase(iter); // array fast cut-off check
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT0);
+ branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength)), callGetByValJITStub);
+
+ // Missed the fast region, but it is still in the vector.
+ load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), regT1); // tag
+ load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); // payload
+
+ // FIXME: Maybe we can optimize this comparison to JSValue().
+ Jump skip = branch32(NotEqual, regT0, Imm32(0));
+ branch32(Equal, regT1, Imm32(JSValue::CellTag), callGetByValJITStub);
+
+ skip.link(this);
+ emitStore(dst, regT1, regT0);
+}
+
+void JIT::emit_op_put_by_val(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ emitLoad2(base, regT1, regT0, property, regT3, regT2);
+
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
+
+ Jump inFastVector = branch32(Below, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff)));
+
+ // Check if the access is within the vector.
+ addSlowCase(branch32(AboveOrEqual, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength))));
+
+ // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
+ // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
+ Jump skip = branch32(NotEqual, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), Imm32(JSValue::CellTag));
+ addSlowCase(branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), Imm32(0)));
+ skip.link(this);
+
+ inFastVector.link(this);
+
+ emitLoad(value, regT1, regT0);
+ store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); // payload
+ store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4)); // tag
+}
+
+void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter); // property int32 check
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ linkSlowCase(iter); // base not array check
+
+ JITStubCall stubPutByValCall(this, cti_op_put_by_val);
+ stubPutByValCall.addArgument(base);
+ stubPutByValCall.addArgument(property);
+ stubPutByValCall.addArgument(value);
+ stubPutByValCall.call();
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
+
+ // Slow cases for immediate int accesses to arrays.
+ linkSlowCase(iter); // in vector check
+ linkSlowCase(iter); // written to slot check
+
+ JITStubCall stubCall(this, cti_op_put_by_val_array);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.addArgument(regT2);
+ stubCall.addArgument(value);
+ stubCall.call();
+}
+
+void JIT::emit_op_get_by_id(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+
+ emitLoad(base, regT1, regT0);
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+ compileGetByIdHotPath();
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0);
+}
+
+void JIT::compileGetByIdHotPath()
+{
+ // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
+ // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
+ // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
+ // to jump back to if one of these trampolies finds a match.
+ Label hotPathBegin(this);
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
+ m_propertyAccessInstructionIndex++;
+
+ DataLabelPtr structureToCompare;
+ Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ addSlowCase(structureCheck);
+ ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure);
+ ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase);
+
+ Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT2);
+ Label externalLoadComplete(this);
+ ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetGetByIdExternalLoad);
+ ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthGetByIdExternalLoad);
+
+ DataLabel32 displacementLabel1 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT0); // payload
+ ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetGetByIdPropertyMapOffset1);
+ DataLabel32 displacementLabel2 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT1); // tag
+ ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetGetByIdPropertyMapOffset2);
+
+ Label putResult(this);
+ ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult);
+}
+
+void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int ident = currentInstruction[3].u.operand;
+
+ compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter);
+}
+
+void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
+{
+ // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
+ // so that we only need track one pointer into the slow case code - we track a pointer to the location
+ // of the call (which we can use to look up the patch information), but should a array-length or
+ // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
+ // the distance from the call to the head of the slow case.
+ linkSlowCaseIfNotJSCell(iter, base);
+ linkSlowCase(iter);
+
+ Label coldPathBegin(this);
+
+ JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.addArgument(ImmPtr(ident));
+ Call call = stubCall.call(dst);
+
+ ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
+
+ // Track the location of the call; this will be used to recover patch information.
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
+ m_propertyAccessInstructionIndex++;
+}
+
+void JIT::emit_op_put_by_id(Instruction* currentInstruction)
+{
+ // In order to be able to patch both the Structure, and the object offset, we store one pointer,
+ // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
+ // such that the Structure & offset are always at the same distance from this.
+
+ int base = currentInstruction[1].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ emitLoad2(base, regT1, regT0, value, regT3, regT2);
+
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+
+ Label hotPathBegin(this);
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
+ m_propertyAccessInstructionIndex++;
+
+ // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
+ DataLabelPtr structureToCompare;
+ addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
+ ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure);
+
+ // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
+ Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
+ Label externalLoadComplete(this);
+ ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetPutByIdExternalLoad);
+ ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad);
+
+ DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT0, patchGetByIdDefaultOffset)); // payload
+ DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT0, patchGetByIdDefaultOffset)); // tag
+ ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetPutByIdPropertyMapOffset1);
+ ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetPutByIdPropertyMapOffset2);
+}
+
+void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int base = currentInstruction[1].u.operand;
+ int ident = currentInstruction[2].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, base);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_put_by_id);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
+ stubCall.addArgument(regT3, regT2);
+ Call call = stubCall.call();
+
+ // Track the location of the call; this will be used to recover patch information.
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
+ m_propertyAccessInstructionIndex++;
+}
+
+// Compile a store into an object's property storage. May overwrite base.
+void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, Structure* structure, size_t cachedOffset)
+{
+ int offset = cachedOffset;
+ if (structure->isUsingInlineStorage())
+ offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register);
+ else
+ loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
+ emitStore(offset, valueTag, valuePayload, base);
+}
+
+// Compile a load from an object's property storage. May overwrite base.
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset)
+{
+ int offset = cachedOffset;
+ if (structure->isUsingInlineStorage())
+ offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register);
+ else
+ loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
+ emitLoad(offset, resultTag, resultPayload, base);
+}
+
+void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset)
+{
+ if (base->isUsingInlineStorage()) {
+ load32(reinterpret_cast<char*>(&base->m_inlineStorage[cachedOffset]), resultPayload);
+ load32(reinterpret_cast<char*>(&base->m_inlineStorage[cachedOffset]) + 4, resultTag);
+ return;
+ }
+
+ size_t offset = cachedOffset * sizeof(JSValue);
+
+ PropertyStorage* protoPropertyStorage = &base->m_externalStorage;
+ loadPtr(static_cast<void*>(protoPropertyStorage), temp);
+ load32(Address(temp, offset), resultPayload);
+ load32(Address(temp, offset + 4), resultTag);
+}
+
+void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress)
+{
+ // It is assumed that regT0 contains the basePayload and regT1 contains the baseTag. The value can be found on the stack.
+
+ JumpList failureCases;
+ failureCases.append(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ failureCases.append(branchPtr(NotEqual, regT2, ImmPtr(oldStructure)));
+
+ // Verify that nothing in the prototype chain has a setter for this property.
+ for (RefPtr<Structure>* it = chain->head(); *it; ++it) {
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ failureCases.append(branchPtr(NotEqual, regT2, ImmPtr(it->get())));
+ }
+
+ // Reallocate property storage if needed.
+ Call callTarget;
+ bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
+ if (willNeedStorageRealloc) {
+ // This trampoline was called to like a JIT stub; before we can can call again we need to
+ // remove the return address from the stack, to prevent the stack from becoming misaligned.
+ preserveReturnAddressAfterCall(regT3);
+
+ JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
+ stubCall.skipArgument(); // base
+ stubCall.skipArgument(); // ident
+ stubCall.skipArgument(); // value
+ stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
+ stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
+ stubCall.call(regT0);
+
+ restoreReturnAddressBeforeReturn(regT3);
+ }
+
+ sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
+ add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
+ storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)));
+
+ load32(Address(stackPointerRegister, offsetof(struct JITStackFrame, args[2]) + sizeof(void*)), regT3);
+ load32(Address(stackPointerRegister, offsetof(struct JITStackFrame, args[2]) + sizeof(void*) + 4), regT2);
+
+ // Write the value
+ compilePutDirectOffset(regT0, regT2, regT3, newStructure, cachedOffset);
+
+ ret();
+
+ ASSERT(!failureCases.empty());
+ failureCases.link(this);
+ restoreArgumentReferenceForTrampoline();
+ Call failureCall = tailRecursiveCall();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ patchBuffer.link(failureCall, FunctionPtr(cti_op_put_by_id_fail));
+
+ if (willNeedStorageRealloc) {
+ ASSERT(m_calls.size() == 1);
+ patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
+ }
+
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
+}
+
+void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
+{
+ RepatchBuffer repatchBuffer(codeBlock);
+
+ // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
+ // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
+
+ int offset = sizeof(JSValue) * cachedOffset;
+
+ // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
+ // and makes the subsequent load's offset automatically correct
+ if (structure->isUsingInlineStorage())
+ repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad));
+
+ // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset1), offset); // payload
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset2), offset + 4); // tag
+}
+
+void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
+{
+ RepatchBuffer repatchBuffer(codeBlock);
+
+ ASSERT(!methodCallLinkInfo.cachedStructure);
+ methodCallLinkInfo.cachedStructure = structure;
+ structure->ref();
+
+ Structure* prototypeStructure = proto->structure();
+ ASSERT(!methodCallLinkInfo.cachedPrototypeStructure);
+ methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure;
+ prototypeStructure->ref();
+
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure);
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
+
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id));
+}
+
+void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
+{
+ RepatchBuffer repatchBuffer(codeBlock);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_put_by_id_generic));
+
+ int offset = sizeof(JSValue) * cachedOffset;
+
+ // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
+ // and makes the subsequent load's offset automatically correct
+ if (structure->isUsingInlineStorage())
+ repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad));
+
+ // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1), offset); // payload
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2), offset + 4); // tag
+}
+
+void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
+{
+ StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
+
+ // regT0 holds a JSCell*
+
+ // Check for array
+ Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
+
+ // Checks out okay! - get the length from the storage
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
+ load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
+
+ Jump failureCases2 = branch32(Above, regT2, Imm32(INT_MAX));
+ move(regT2, regT0);
+ move(Imm32(JSValue::Int32Tag), regT1);
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+ patchBuffer.link(failureCases1, slowCaseBegin);
+ patchBuffer.link(failureCases2, slowCaseBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ // Track the stub we have created so that it will be deleted later.
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
+}
+
+void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+{
+ // regT0 holds a JSCell*
+
+ // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
+ // referencing the prototype object - let's speculatively load it's table nice and early!)
+ JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
+
+ Jump failureCases1 = checkStructure(regT0, structure);
+
+ // Check the prototype object's Structure had not changed.
+ Structure** prototypeStructureAddress = &(protoObject->m_structure);
+#if PLATFORM(X86_64)
+ move(ImmPtr(prototypeStructure), regT3);
+ Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
+#else
+ Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
+#endif
+
+ // Checks out okay! - getDirectOffset
+ compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
+
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+ patchBuffer.link(failureCases1, slowCaseBegin);
+ patchBuffer.link(failureCases2, slowCaseBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ // Track the stub we have created so that it will be deleted later.
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
+}
+
+
+void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
+{
+ // regT0 holds a JSCell*
+
+ Jump failureCase = checkStructure(regT0, structure);
+ compileGetDirectOffset(regT0, regT1, regT0, structure, cachedOffset);
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
+ if (!lastProtoBegin)
+ lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+
+ patchBuffer.link(failureCase, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+
+ structure->ref();
+ polymorphicStructures->list[currentIndex].set(entryLabel, structure);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+}
+
+void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame)
+{
+ // regT0 holds a JSCell*
+
+ // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
+ // referencing the prototype object - let's speculatively load it's table nice and early!)
+ JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
+
+ // Check eax is an object of the right Structure.
+ Jump failureCases1 = checkStructure(regT0, structure);
+
+ // Check the prototype object's Structure had not changed.
+ Structure** prototypeStructureAddress = &(protoObject->m_structure);
+#if PLATFORM(X86_64)
+ move(ImmPtr(prototypeStructure), regT3);
+ Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
+#else
+ Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
+#endif
+
+ compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
+
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
+ patchBuffer.link(failureCases1, lastProtoBegin);
+ patchBuffer.link(failureCases2, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+
+ structure->ref();
+ prototypeStructure->ref();
+ prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+}
+
+void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
+{
+ // regT0 holds a JSCell*
+
+ ASSERT(count);
+
+ JumpList bucketsOfFail;
+
+ // Check eax is an object of the right Structure.
+ bucketsOfFail.append(checkStructure(regT0, structure));
+
+ Structure* currStructure = structure;
+ RefPtr<Structure>* chainEntries = chain->head();
+ JSObject* protoObject = 0;
+ for (unsigned i = 0; i < count; ++i) {
+ protoObject = asObject(currStructure->prototypeForLookup(callFrame));
+ currStructure = chainEntries[i].get();
+
+ // Check the prototype object's Structure had not changed.
+ Structure** prototypeStructureAddress = &(protoObject->m_structure);
+#if PLATFORM(X86_64)
+ move(ImmPtr(currStructure), regT3);
+ bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
+#else
+ bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
+#endif
+ }
+ ASSERT(protoObject);
+
+ compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
+
+ patchBuffer.link(bucketsOfFail, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+
+ // Track the stub we have created so that it will be deleted later.
+ structure->ref();
+ chain->ref();
+ prototypeStructures->list[currentIndex].set(entryLabel, structure, chain);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+}
+
+void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+{
+ // regT0 holds a JSCell*
+
+ ASSERT(count);
+
+ JumpList bucketsOfFail;
+
+ // Check eax is an object of the right Structure.
+ bucketsOfFail.append(checkStructure(regT0, structure));
+
+ Structure* currStructure = structure;
+ RefPtr<Structure>* chainEntries = chain->head();
+ JSObject* protoObject = 0;
+ for (unsigned i = 0; i < count; ++i) {
+ protoObject = asObject(currStructure->prototypeForLookup(callFrame));
+ currStructure = chainEntries[i].get();
+
+ // Check the prototype object's Structure had not changed.
+ Structure** prototypeStructureAddress = &(protoObject->m_structure);
+#if PLATFORM(X86_64)
+ move(ImmPtr(currStructure), regT3);
+ bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
+#else
+ bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
+#endif
+ }
+ ASSERT(protoObject);
+
+ compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ // Track the stub we have created so that it will be deleted later.
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
+}
+
+/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
+
+#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+#else // USE(JSVALUE32_64)
+
void JIT::emit_op_get_by_val(Instruction* currentInstruction)
{
emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT1);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
// This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
// We check the value as if it was a uint32 against the m_fastAccessCutoff - which will always fail if
// number was signed since m_fastAccessCutoff is always less than intmax (since the total allocation
@@ -78,7 +987,7 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
{
emitGetVirtualRegisters(currentInstruction[1].u.operand, regT0, currentInstruction[2].u.operand, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT1);
-#if USE(ALTERNATE_JSIMMEDIATE)
+#if USE(JSVALUE64)
// See comment in op_get_by_val.
zeroExtend32ToPtr(regT1, regT1);
#else
@@ -105,7 +1014,7 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
void JIT::emit_op_put_by_index(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_put_by_index);
+ JITStubCall stubCall(this, cti_op_put_by_index);
stubCall.addArgument(currentInstruction[1].u.operand, regT2);
stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
stubCall.addArgument(currentInstruction[3].u.operand, regT2);
@@ -114,7 +1023,7 @@ void JIT::emit_op_put_by_index(Instruction* currentInstruction)
void JIT::emit_op_put_getter(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_put_getter);
+ JITStubCall stubCall(this, cti_op_put_getter);
stubCall.addArgument(currentInstruction[1].u.operand, regT2);
stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.addArgument(currentInstruction[3].u.operand, regT2);
@@ -123,7 +1032,7 @@ void JIT::emit_op_put_getter(Instruction* currentInstruction)
void JIT::emit_op_put_setter(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_put_setter);
+ JITStubCall stubCall(this, cti_op_put_setter);
stubCall.addArgument(currentInstruction[1].u.operand, regT2);
stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.addArgument(currentInstruction[3].u.operand, regT2);
@@ -132,7 +1041,7 @@ void JIT::emit_op_put_setter(Instruction* currentInstruction)
void JIT::emit_op_del_by_id(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, JITStubs::cti_op_del_by_id);
+ JITStubCall stubCall(this, cti_op_del_by_id);
stubCall.addArgument(currentInstruction[2].u.operand, regT2);
stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
@@ -157,7 +1066,7 @@ void JIT::emit_op_get_by_id(Instruction* currentInstruction)
Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
emitGetVirtualRegister(baseVReg, regT0);
- JITStubCall stubCall(this, JITStubs::cti_op_get_by_id_generic);
+ JITStubCall stubCall(this, cti_op_get_by_id_generic);
stubCall.addArgument(regT0);
stubCall.addArgument(ImmPtr(ident));
stubCall.call(resultVReg);
@@ -178,7 +1087,7 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
- JITStubCall stubCall(this, JITStubs::cti_op_put_by_id_generic);
+ JITStubCall stubCall(this, cti_op_put_by_id_generic);
stubCall.addArgument(regT0);
stubCall.addArgument(ImmPtr(ident));
stubCall.addArgument(regT1);
@@ -213,13 +1122,20 @@ void JIT::emit_op_method_check(Instruction* currentInstruction)
// Do the method check - check the object & its prototype's structure inline (this is the common case).
m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
+
Jump notCell = emitJumpIfNotJSCell(regT0);
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
+
Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT1);
Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
// This will be relinked to load the function without doing a load.
DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
+
Jump match = jump();
ASSERT(differenceBetween(info.structureToCompare, protoObj) == patchOffsetMethodCheckProtoObj);
@@ -249,7 +1165,7 @@ void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowC
unsigned baseVReg = currentInstruction[2].u.operand;
Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
- compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, m_propertyAccessInstructionIndex++, true);
+ compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, true);
// We've already generated the following get_by_id, so make sure it's skipped over.
m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
@@ -283,6 +1199,8 @@ void JIT::compileGetByIdHotPath(int, int baseVReg, Identifier*, unsigned propert
emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
+
Label hotPathBegin(this);
m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
@@ -301,6 +1219,9 @@ void JIT::compileGetByIdHotPath(int, int baseVReg, Identifier*, unsigned propert
ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetGetByIdPropertyMapOffset);
Label putResult(this);
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
+
ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult);
}
@@ -310,10 +1231,10 @@ void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCase
unsigned baseVReg = currentInstruction[2].u.operand;
Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
- compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, m_propertyAccessInstructionIndex++, false);
+ compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, false);
}
-void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex, bool isMethodCheck)
+void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
{
// As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
// so that we only need track one pointer into the slow case code - we track a pointer to the location
@@ -324,18 +1245,23 @@ void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident
linkSlowCaseIfNotJSCell(iter, baseVReg);
linkSlowCase(iter);
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
+
#ifndef NDEBUG
Label coldPathBegin(this);
#endif
- JITStubCall stubCall(this, isMethodCheck ? JITStubs::cti_op_get_by_id_method_check : JITStubs::cti_op_get_by_id);
+ JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
stubCall.addArgument(regT0);
stubCall.addArgument(ImmPtr(ident));
Call call = stubCall.call(resultVReg);
+ END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
+
ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
// Track the location of the call; this will be used to recover patch information.
- m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
+ m_propertyAccessInstructionIndex++;
}
void JIT::emit_op_put_by_id(Instruction* currentInstruction)
@@ -354,6 +1280,8 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
// Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
+
Label hotPathBegin(this);
m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
@@ -369,6 +1297,9 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad);
DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT0, patchGetByIdDefaultOffset));
+
+ END_UNINTERRUPTED_SEQUENCE(sequencePutById);
+
ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetPutByIdPropertyMapOffset);
}
@@ -382,7 +1313,7 @@ void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCase
linkSlowCaseIfNotJSCell(iter, baseVReg);
linkSlowCase(iter);
- JITStubCall stubCall(this, JITStubs::cti_op_put_by_id);
+ JITStubCall stubCall(this, cti_op_put_by_id);
stubCall.addArgument(regT0);
stubCall.addArgument(ImmPtr(ident));
stubCall.addArgument(regT1);
@@ -465,13 +1396,14 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
// remove the return address from the stack, to prevent the stack from becoming misaligned.
preserveReturnAddressAfterCall(regT3);
- JITStubCall stubCall(this, JITStubs::cti_op_put_by_id_transition_realloc);
- stubCall.addArgument(regT0);
+ JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
+ stubCall.skipArgument(); // base
+ stubCall.skipArgument(); // ident
+ stubCall.skipArgument(); // value
stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
- stubCall.addArgument(regT1); // This argument is not used in the stub; we set it up on the stack so that it can be restored, below.
stubCall.call(regT0);
- emitGetJITStubArg(4, regT1);
+ emitGetJITStubArg(2, regT1);
restoreReturnAddressBeforeReturn(regT3);
}
@@ -494,11 +1426,11 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
- patchBuffer.link(failureCall, FunctionPtr(JITStubs::cti_op_put_by_id_fail));
+ patchBuffer.link(failureCall, FunctionPtr(cti_op_put_by_id_fail));
if (willNeedStorageRealloc) {
ASSERT(m_calls.size() == 1);
- patchBuffer.link(m_calls[0].from, FunctionPtr(JITStubs::cti_op_put_by_id_transition_realloc));
+ patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
}
CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
@@ -512,8 +1444,8 @@ void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, St
RepatchBuffer repatchBuffer(codeBlock);
// We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
- // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_self_fail));
+ // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
int offset = sizeof(JSValue) * cachedOffset;
@@ -527,7 +1459,7 @@ void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, St
repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset), offset);
}
-void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto)
+void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
{
RepatchBuffer repatchBuffer(codeBlock);
@@ -544,6 +1476,8 @@ void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodC
repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
+
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id));
}
void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
@@ -551,8 +1485,8 @@ void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo,
RepatchBuffer repatchBuffer(codeBlock);
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- // Should probably go to JITStubs::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(JITStubs::cti_op_put_by_id_generic));
+ // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_put_by_id_generic));
int offset = sizeof(JSValue) * cachedOffset;
@@ -602,7 +1536,7 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
repatchBuffer.relink(jumpLocation, entryLabel);
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_array_fail));
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
}
void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
@@ -648,7 +1582,7 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
repatchBuffer.relink(jumpLocation, entryLabel);
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_proto_list));
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
}
void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
@@ -827,13 +1761,15 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
repatchBuffer.relink(jumpLocation, entryLabel);
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_proto_list));
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
}
/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+#endif // USE(JSVALUE32_64)
+
} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubCall.h b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubCall.h
index bc07178..cb5354b 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubCall.h
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubCall.h
@@ -37,32 +37,40 @@ namespace JSC {
JITStubCall(JIT* jit, JSObject* (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
: m_jit(jit)
, m_stub(reinterpret_cast<void*>(stub))
- , m_returnType(Value)
- , m_argumentIndex(1) // Index 0 is reserved for restoreArgumentReference();
+ , m_returnType(Cell)
+ , m_stackIndex(stackIndexStart)
{
}
JITStubCall(JIT* jit, JSPropertyNameIterator* (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
: m_jit(jit)
, m_stub(reinterpret_cast<void*>(stub))
- , m_returnType(Value)
- , m_argumentIndex(1) // Index 0 is reserved for restoreArgumentReference();
+ , m_returnType(Cell)
+ , m_stackIndex(stackIndexStart)
{
}
JITStubCall(JIT* jit, void* (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
: m_jit(jit)
, m_stub(reinterpret_cast<void*>(stub))
- , m_returnType(Value)
- , m_argumentIndex(1) // Index 0 is reserved for restoreArgumentReference();
+ , m_returnType(VoidPtr)
+ , m_stackIndex(stackIndexStart)
{
}
JITStubCall(JIT* jit, int (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
: m_jit(jit)
, m_stub(reinterpret_cast<void*>(stub))
- , m_returnType(Value)
- , m_argumentIndex(1) // Index 0 is reserved for restoreArgumentReference();
+ , m_returnType(Int)
+ , m_stackIndex(stackIndexStart)
+ {
+ }
+
+ JITStubCall(JIT* jit, bool (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
+ : m_jit(jit)
+ , m_stub(reinterpret_cast<void*>(stub))
+ , m_returnType(Int)
+ , m_stackIndex(stackIndexStart)
{
}
@@ -70,30 +78,78 @@ namespace JSC {
: m_jit(jit)
, m_stub(reinterpret_cast<void*>(stub))
, m_returnType(Void)
- , m_argumentIndex(1) // Index 0 is reserved for restoreArgumentReference();
+ , m_stackIndex(stackIndexStart)
+ {
+ }
+
+#if USE(JSVALUE32_64)
+ JITStubCall(JIT* jit, EncodedJSValue (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
+ : m_jit(jit)
+ , m_stub(reinterpret_cast<void*>(stub))
+ , m_returnType(Value)
+ , m_stackIndex(stackIndexStart)
{
}
+#endif
// Arguments are added first to last.
+ void skipArgument()
+ {
+ m_stackIndex += stackIndexStep;
+ }
+
void addArgument(JIT::Imm32 argument)
{
- m_jit->poke(argument, m_argumentIndex);
- ++m_argumentIndex;
+ m_jit->poke(argument, m_stackIndex);
+ m_stackIndex += stackIndexStep;
}
void addArgument(JIT::ImmPtr argument)
{
- m_jit->poke(argument, m_argumentIndex);
- ++m_argumentIndex;
+ m_jit->poke(argument, m_stackIndex);
+ m_stackIndex += stackIndexStep;
}
void addArgument(JIT::RegisterID argument)
{
- m_jit->poke(argument, m_argumentIndex);
- ++m_argumentIndex;
+ m_jit->poke(argument, m_stackIndex);
+ m_stackIndex += stackIndexStep;
+ }
+
+ void addArgument(const JSValue& value)
+ {
+ m_jit->poke(JIT::Imm32(value.payload()), m_stackIndex);
+ m_jit->poke(JIT::Imm32(value.tag()), m_stackIndex + 1);
+ m_stackIndex += stackIndexStep;
+ }
+
+ void addArgument(JIT::RegisterID tag, JIT::RegisterID payload)
+ {
+ m_jit->poke(payload, m_stackIndex);
+ m_jit->poke(tag, m_stackIndex + 1);
+ m_stackIndex += stackIndexStep;
+ }
+
+#if USE(JSVALUE32_64)
+ void addArgument(unsigned srcVirtualRegister)
+ {
+ if (m_jit->m_codeBlock->isConstantRegisterIndex(srcVirtualRegister)) {
+ addArgument(m_jit->getConstantOperand(srcVirtualRegister));
+ return;
+ }
+
+ m_jit->emitLoad(srcVirtualRegister, JIT::regT1, JIT::regT0);
+ addArgument(JIT::regT1, JIT::regT0);
}
+ void getArgument(size_t argumentNumber, JIT::RegisterID tag, JIT::RegisterID payload)
+ {
+ size_t stackIndex = stackIndexStart + (argumentNumber * stackIndexStep);
+ m_jit->peek(payload, stackIndex);
+ m_jit->peek(tag, stackIndex + 1);
+ }
+#else
void addArgument(unsigned src, JIT::RegisterID scratchRegister) // src is a virtual register.
{
if (m_jit->m_codeBlock->isConstantRegisterIndex(src))
@@ -104,6 +160,7 @@ namespace JSC {
}
m_jit->killLastResultRegister();
}
+#endif
JIT::Call call()
{
@@ -121,21 +178,42 @@ namespace JSC {
m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeIndex, false);
#endif
+#if USE(JSVALUE32_64)
+ m_jit->unmap();
+#else
m_jit->killLastResultRegister();
+#endif
return call;
}
+#if USE(JSVALUE32_64)
+ JIT::Call call(unsigned dst) // dst is a virtual register.
+ {
+ ASSERT(m_returnType == Value || m_returnType == Cell);
+ JIT::Call call = this->call();
+ if (m_returnType == Value)
+ m_jit->emitStore(dst, JIT::regT1, JIT::regT0);
+ else
+ m_jit->emitStoreCell(dst, JIT::returnValueRegister);
+ return call;
+ }
+#else
JIT::Call call(unsigned dst) // dst is a virtual register.
{
- ASSERT(m_returnType == Value);
+ ASSERT(m_returnType == VoidPtr || m_returnType == Cell);
JIT::Call call = this->call();
m_jit->emitPutVirtualRegister(dst);
return call;
}
+#endif
- JIT::Call call(JIT::RegisterID dst)
+ JIT::Call call(JIT::RegisterID dst) // dst is a machine register.
{
- ASSERT(m_returnType == Value);
+#if USE(JSVALUE32_64)
+ ASSERT(m_returnType == Value || m_returnType == VoidPtr || m_returnType == Int || m_returnType == Cell);
+#else
+ ASSERT(m_returnType == VoidPtr || m_returnType == Int || m_returnType == Cell);
+#endif
JIT::Call call = this->call();
if (dst != JIT::returnValueRegister)
m_jit->move(JIT::returnValueRegister, dst);
@@ -143,25 +221,13 @@ namespace JSC {
}
private:
+ static const size_t stackIndexStep = sizeof(EncodedJSValue) == 2 * sizeof(void*) ? 2 : 1;
+ static const size_t stackIndexStart = 1; // Index 0 is reserved for restoreArgumentReference().
+
JIT* m_jit;
void* m_stub;
- enum { Value, Void } m_returnType;
- size_t m_argumentIndex;
- };
-
- class CallEvalJITStub : public JITStubCall {
- public:
- CallEvalJITStub(JIT* jit, Instruction* instruction)
- : JITStubCall(jit, JITStubs::cti_op_call_eval)
- {
- int callee = instruction[2].u.operand;
- int argCount = instruction[3].u.operand;
- int registerOffset = instruction[4].u.operand;
-
- addArgument(callee, JIT::regT2);
- addArgument(JIT::Imm32(registerOffset));
- addArgument(JIT::Imm32(argCount));
- }
+ enum { Void, VoidPtr, Int, Value, Cell } m_returnType;
+ size_t m_stackIndex;
};
}
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.cpp
index 7928593..a110dcd 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.cpp
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.cpp
@@ -56,6 +56,7 @@
#include "RegExpPrototype.h"
#include "Register.h"
#include "SamplingTool.h"
+#include <stdarg.h>
#include <stdio.h>
#ifdef QT_BUILD_SCRIPT_LIB
@@ -66,13 +67,18 @@ using namespace std;
namespace JSC {
-
#if PLATFORM(DARWIN) || PLATFORM(WIN_OS)
#define SYMBOL_STRING(name) "_" #name
#else
#define SYMBOL_STRING(name) #name
#endif
+#if PLATFORM(IPHONE)
+#define THUMB_FUNC_PARAM(name) SYMBOL_STRING(name)
+#else
+#define THUMB_FUNC_PARAM(name)
+#endif
+
#if PLATFORM(DARWIN)
// Mach-O platform
#define HIDE_SYMBOL(name) ".private_extern _" #name
@@ -86,6 +92,271 @@ namespace JSC {
#define HIDE_SYMBOL(name)
#endif
+#if USE(JSVALUE32_64)
+
+#if COMPILER(GCC) && PLATFORM(X86)
+
+// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
+// need to change the assembly trampolines below to match.
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) % 16 == 0x0, JITStackFrame_maintains_16byte_stack_alignment);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x3c, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x58, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x50, JITStackFrame_code_offset_matches_ctiTrampoline);
+
+asm volatile (
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+HIDE_SYMBOL(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "pushl %ebp" "\n"
+ "movl %esp, %ebp" "\n"
+ "pushl %esi" "\n"
+ "pushl %edi" "\n"
+ "pushl %ebx" "\n"
+ "subl $0x3c, %esp" "\n"
+ "movl $512, %esi" "\n"
+ "movl 0x58(%esp), %edi" "\n"
+ "call *0x50(%esp)" "\n"
+ "addl $0x3c, %esp" "\n"
+ "popl %ebx" "\n"
+ "popl %edi" "\n"
+ "popl %esi" "\n"
+ "popl %ebp" "\n"
+ "ret" "\n"
+);
+
+asm volatile (
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+#if !USE(JIT_STUB_ARGUMENT_VA_LIST)
+ "movl %esp, %ecx" "\n"
+#endif
+ "call " SYMBOL_STRING(cti_vm_throw) "\n"
+ "addl $0x3c, %esp" "\n"
+ "popl %ebx" "\n"
+ "popl %edi" "\n"
+ "popl %esi" "\n"
+ "popl %ebp" "\n"
+ "ret" "\n"
+);
+
+asm volatile (
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "addl $0x3c, %esp" "\n"
+ "popl %ebx" "\n"
+ "popl %edi" "\n"
+ "popl %esi" "\n"
+ "popl %ebp" "\n"
+ "ret" "\n"
+);
+
+#elif COMPILER(GCC) && PLATFORM(X86_64)
+
+#if USE(JIT_STUB_ARGUMENT_VA_LIST)
+#error "JIT_STUB_ARGUMENT_VA_LIST not supported on x86-64."
+#endif
+
+// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
+// need to change the assembly trampolines below to match.
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) % 32 == 0x0, JITStackFrame_maintains_32byte_stack_alignment);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, savedRBX) == 0x48, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x90, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x80, JITStackFrame_code_offset_matches_ctiTrampoline);
+
+asm volatile (
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+HIDE_SYMBOL(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "pushq %rbp" "\n"
+ "movq %rsp, %rbp" "\n"
+ "pushq %r12" "\n"
+ "pushq %r13" "\n"
+ "pushq %r14" "\n"
+ "pushq %r15" "\n"
+ "pushq %rbx" "\n"
+ "subq $0x48, %rsp" "\n"
+ "movq $512, %r12" "\n"
+ "movq $0xFFFF000000000000, %r14" "\n"
+ "movq $0xFFFF000000000002, %r15" "\n"
+ "movq 0x90(%rsp), %r13" "\n"
+ "call *0x80(%rsp)" "\n"
+ "addq $0x48, %rsp" "\n"
+ "popq %rbx" "\n"
+ "popq %r15" "\n"
+ "popq %r14" "\n"
+ "popq %r13" "\n"
+ "popq %r12" "\n"
+ "popq %rbp" "\n"
+ "ret" "\n"
+);
+
+asm volatile (
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+ "movq %rsp, %rdi" "\n"
+ "call " SYMBOL_STRING(cti_vm_throw) "\n"
+ "addq $0x48, %rsp" "\n"
+ "popq %rbx" "\n"
+ "popq %r15" "\n"
+ "popq %r14" "\n"
+ "popq %r13" "\n"
+ "popq %r12" "\n"
+ "popq %rbp" "\n"
+ "ret" "\n"
+);
+
+asm volatile (
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "addq $0x48, %rsp" "\n"
+ "popq %rbx" "\n"
+ "popq %r15" "\n"
+ "popq %r14" "\n"
+ "popq %r13" "\n"
+ "popq %r12" "\n"
+ "popq %rbp" "\n"
+ "ret" "\n"
+);
+
+#elif COMPILER(GCC) && PLATFORM(ARM_THUMB2)
+
+#if USE(JIT_STUB_ARGUMENT_VA_LIST)
+#error "JIT_STUB_ARGUMENT_VA_LIST not supported on ARMv7."
+#endif
+
+asm volatile (
+".text" "\n"
+".align 2" "\n"
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+".thumb" "\n"
+".thumb_func " THUMB_FUNC_PARAM(ctiTrampoline) "\n"
+HIDE_SYMBOL(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "sub sp, sp, #0x3c" "\n"
+ "str lr, [sp, #0x20]" "\n"
+ "str r4, [sp, #0x24]" "\n"
+ "str r5, [sp, #0x28]" "\n"
+ "str r6, [sp, #0x2c]" "\n"
+ "str r1, [sp, #0x30]" "\n"
+ "str r2, [sp, #0x34]" "\n"
+ "str r3, [sp, #0x38]" "\n"
+ "cpy r5, r2" "\n"
+ "mov r6, #512" "\n"
+ "blx r0" "\n"
+ "ldr r6, [sp, #0x2c]" "\n"
+ "ldr r5, [sp, #0x28]" "\n"
+ "ldr r4, [sp, #0x24]" "\n"
+ "ldr lr, [sp, #0x20]" "\n"
+ "add sp, sp, #0x3c" "\n"
+ "bx lr" "\n"
+);
+
+asm volatile (
+".text" "\n"
+".align 2" "\n"
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+".thumb" "\n"
+".thumb_func " THUMB_FUNC_PARAM(ctiVMThrowTrampoline) "\n"
+HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+ "cpy r0, sp" "\n"
+ "bl " SYMBOL_STRING(cti_vm_throw) "\n"
+ "ldr r6, [sp, #0x2c]" "\n"
+ "ldr r5, [sp, #0x28]" "\n"
+ "ldr r4, [sp, #0x24]" "\n"
+ "ldr lr, [sp, #0x20]" "\n"
+ "add sp, sp, #0x3c" "\n"
+ "bx lr" "\n"
+);
+
+asm volatile (
+".text" "\n"
+".align 2" "\n"
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+".thumb" "\n"
+".thumb_func " THUMB_FUNC_PARAM(ctiOpThrowNotCaught) "\n"
+HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "ldr r6, [sp, #0x2c]" "\n"
+ "ldr r5, [sp, #0x28]" "\n"
+ "ldr r4, [sp, #0x24]" "\n"
+ "ldr lr, [sp, #0x20]" "\n"
+ "add sp, sp, #0x3c" "\n"
+ "bx lr" "\n"
+);
+
+#elif COMPILER(MSVC)
+
+#if USE(JIT_STUB_ARGUMENT_VA_LIST)
+#error "JIT_STUB_ARGUMENT_VA_LIST configuration not supported on MSVC."
+#endif
+
+// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
+// need to change the assembly trampolines below to match.
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) % 16 == 0x0, JITStackFrame_maintains_16byte_stack_alignment);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x3c, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x58, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x50, JITStackFrame_code_offset_matches_ctiTrampoline);
+
+extern "C" {
+
+ __declspec(naked) EncodedJSValue ctiTrampoline(void* code, RegisterFile*, CallFrame*, JSValue* exception, Profiler**, JSGlobalData*)
+ {
+ __asm {
+ push ebp;
+ mov ebp, esp;
+ push esi;
+ push edi;
+ push ebx;
+ sub esp, 0x3c;
+ mov esi, 512;
+ mov ecx, esp;
+ mov edi, [esp + 0x58];
+ call [esp + 0x50];
+ add esp, 0x3c;
+ pop ebx;
+ pop edi;
+ pop esi;
+ pop ebp;
+ ret;
+ }
+ }
+
+ __declspec(naked) void ctiVMThrowTrampoline()
+ {
+ __asm {
+ mov ecx, esp;
+ call cti_vm_throw;
+ add esp, 0x3c;
+ pop ebx;
+ pop edi;
+ pop esi;
+ pop ebp;
+ ret;
+ }
+ }
+
+ __declspec(naked) void ctiOpThrowNotCaught()
+ {
+ __asm {
+ add esp, 0x3c;
+ pop ebx;
+ pop edi;
+ pop esi;
+ pop ebp;
+ ret;
+ }
+ }
+}
+
+#endif // COMPILER(GCC) && PLATFORM(X86)
+
+#else // USE(JSVALUE32_64)
+
#if COMPILER(GCC) && PLATFORM(X86)
// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
@@ -151,9 +422,9 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
// need to change the assembly trampolines below to match.
-COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x90, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x80, JITStackFrame_code_offset_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, savedRBX) == 0x48, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x58, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x48, JITStackFrame_code_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, savedRBX) == 0x78, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
asm volatile (
".globl " SYMBOL_STRING(ctiTrampoline) "\n"
@@ -166,13 +437,20 @@ SYMBOL_STRING(ctiTrampoline) ":" "\n"
"pushq %r14" "\n"
"pushq %r15" "\n"
"pushq %rbx" "\n"
+ // Form the JIT stubs area
+ "pushq %r9" "\n"
+ "pushq %r8" "\n"
+ "pushq %rcx" "\n"
+ "pushq %rdx" "\n"
+ "pushq %rsi" "\n"
+ "pushq %rdi" "\n"
"subq $0x48, %rsp" "\n"
"movq $512, %r12" "\n"
"movq $0xFFFF000000000000, %r14" "\n"
"movq $0xFFFF000000000002, %r15" "\n"
- "movq 0x90(%rsp), %r13" "\n"
- "call *0x80(%rsp)" "\n"
- "addq $0x48, %rsp" "\n"
+ "movq %rdx, %r13" "\n"
+ "call *%rdi" "\n"
+ "addq $0x78, %rsp" "\n"
"popq %rbx" "\n"
"popq %r15" "\n"
"popq %r14" "\n"
@@ -188,7 +466,7 @@ HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
"movq %rsp, %rdi" "\n"
"call " SYMBOL_STRING(cti_vm_throw) "\n"
- "addq $0x48, %rsp" "\n"
+ "addq $0x78, %rsp" "\n"
"popq %rbx" "\n"
"popq %r15" "\n"
"popq %r14" "\n"
@@ -202,7 +480,7 @@ asm volatile (
".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
- "addq $0x48, %rsp" "\n"
+ "addq $0x78, %rsp" "\n"
"popq %rbx" "\n"
"popq %r15" "\n"
"popq %r14" "\n"
@@ -212,7 +490,7 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
"ret" "\n"
);
-#elif COMPILER(GCC) && PLATFORM_ARM_ARCH(7)
+#elif COMPILER(GCC) && PLATFORM(ARM_THUMB2)
#if USE(JIT_STUB_ARGUMENT_VA_LIST)
#error "JIT_STUB_ARGUMENT_VA_LIST not supported on ARMv7."
@@ -224,9 +502,9 @@ asm volatile (
".globl " SYMBOL_STRING(ctiTrampoline) "\n"
HIDE_SYMBOL(ctiTrampoline) "\n"
".thumb" "\n"
-".thumb_func " SYMBOL_STRING(ctiTrampoline) "\n"
+".thumb_func " THUMB_FUNC_PARAM(ctiTrampoline) "\n"
SYMBOL_STRING(ctiTrampoline) ":" "\n"
- "sub sp, sp, #0x3c" "\n"
+ "sub sp, sp, #0x40" "\n"
"str lr, [sp, #0x20]" "\n"
"str r4, [sp, #0x24]" "\n"
"str r5, [sp, #0x28]" "\n"
@@ -241,7 +519,7 @@ SYMBOL_STRING(ctiTrampoline) ":" "\n"
"ldr r5, [sp, #0x28]" "\n"
"ldr r4, [sp, #0x24]" "\n"
"ldr lr, [sp, #0x20]" "\n"
- "add sp, sp, #0x3c" "\n"
+ "add sp, sp, #0x40" "\n"
"bx lr" "\n"
);
@@ -251,7 +529,7 @@ asm volatile (
".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
".thumb" "\n"
-".thumb_func " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+".thumb_func " THUMB_FUNC_PARAM(ctiVMThrowTrampoline) "\n"
SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
"cpy r0, sp" "\n"
"bl " SYMBOL_STRING(cti_vm_throw) "\n"
@@ -259,7 +537,7 @@ SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
"ldr r5, [sp, #0x28]" "\n"
"ldr r4, [sp, #0x24]" "\n"
"ldr lr, [sp, #0x20]" "\n"
- "add sp, sp, #0x3c" "\n"
+ "add sp, sp, #0x40" "\n"
"bx lr" "\n"
);
@@ -269,7 +547,7 @@ asm volatile (
".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
".thumb" "\n"
-".thumb_func " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+".thumb_func " THUMB_FUNC_PARAM(ctiOpThrowNotCaught) "\n"
SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
"ldr r6, [sp, #0x2c]" "\n"
"ldr r5, [sp, #0x28]" "\n"
@@ -279,6 +557,49 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
"bx lr" "\n"
);
+#elif COMPILER(GCC) && PLATFORM(ARM_TRADITIONAL)
+
+asm volatile (
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "stmdb sp!, {r1-r3}" "\n"
+ "stmdb sp!, {r4-r8, lr}" "\n"
+ "mov r6, pc" "\n"
+ "add r6, r6, #40" "\n"
+ "sub sp, sp, #32" "\n"
+ "ldr r4, [sp, #60]" "\n"
+ "mov r5, #512" "\n"
+ // r0 contains the code
+ "add r8, pc, #4" "\n"
+ "str r8, [sp, #-4]!" "\n"
+ "mov pc, r0" "\n"
+ "add sp, sp, #32" "\n"
+ "ldmia sp!, {r4-r8, lr}" "\n"
+ "add sp, sp, #12" "\n"
+ "mov pc, lr" "\n"
+
+ // the return instruction
+ "ldr pc, [sp], #4" "\n"
+);
+
+asm volatile (
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+ "mov r0, sp" "\n"
+ "mov lr, r6" "\n"
+ "add r8, pc, #4" "\n"
+ "str r8, [sp, #-4]!" "\n"
+ "b " SYMBOL_STRING(cti_vm_throw) "\n"
+
+// Both has the same return sequence
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "add sp, sp, #32" "\n"
+ "ldmia sp!, {r4-r8, lr}" "\n"
+ "add sp, sp, #12" "\n"
+ "mov pc, lr" "\n"
+);
+
#elif COMPILER(MSVC)
#if USE(JIT_STUB_ARGUMENT_VA_LIST)
@@ -319,7 +640,7 @@ extern "C" {
{
__asm {
mov ecx, esp;
- call JITStubs::cti_vm_throw;
+ call cti_vm_throw;
add esp, 0x1c;
pop ebx;
pop edi;
@@ -342,7 +663,9 @@ extern "C" {
}
}
-#endif
+#endif // COMPILER(GCC) && PLATFORM(X86)
+
+#endif // USE(JSVALUE32_64)
#if ENABLE(OPCODE_SAMPLING)
#define CTI_SAMPLER stackFrame.globalData->interpreter->sampler()
@@ -352,9 +675,9 @@ extern "C" {
JITThunks::JITThunks(JSGlobalData* globalData)
{
- JIT::compileCTIMachineTrampolines(globalData, &m_executablePool, &m_ctiArrayLengthTrampoline, &m_ctiStringLengthTrampoline, &m_ctiVirtualCallPreLink, &m_ctiVirtualCallLink, &m_ctiVirtualCall, &m_ctiNativeCallThunk);
+ JIT::compileCTIMachineTrampolines(globalData, &m_executablePool, &m_ctiStringLengthTrampoline, &m_ctiVirtualCallLink, &m_ctiVirtualCall, &m_ctiNativeCallThunk);
-#if PLATFORM_ARM_ARCH(7)
+#if PLATFORM(ARM_THUMB2)
// Unfortunate the arm compiler does not like the use of offsetof on JITStackFrame (since it contains non POD types),
// and the OBJECT_OFFSETOF macro does not appear constantish enough for it to be happy with its use in COMPILE_ASSERT
// macros.
@@ -367,7 +690,7 @@ JITThunks::JITThunks(JSGlobalData* globalData)
ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, callFrame) == 0x34);
ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, exception) == 0x38);
// The fifth argument is the first item already on the stack.
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, enabledProfilerReference) == 0x3c);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, enabledProfilerReference) == 0x40);
ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, thunkReturnAddress) == 0x1C);
#endif
@@ -375,7 +698,7 @@ JITThunks::JITThunks(JSGlobalData* globalData)
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot& slot)
+NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot& slot, StructureStubInfo* stubInfo)
{
// The interpreter checks for recursion here; I do not believe this can occur in CTI.
@@ -384,33 +707,31 @@ NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* co
// Uncacheable: give up.
if (!slot.isCacheable()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(JITStubs::cti_op_put_by_id_generic));
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_put_by_id_generic));
return;
}
JSCell* baseCell = asCell(baseValue);
Structure* structure = baseCell->structure();
- if (structure->isDictionary()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(JITStubs::cti_op_put_by_id_generic));
+ if (structure->isUncacheableDictionary()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_put_by_id_generic));
return;
}
// If baseCell != base, then baseCell must be a proxy for another object.
if (baseCell != slot.base()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(JITStubs::cti_op_put_by_id_generic));
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_put_by_id_generic));
return;
}
- StructureStubInfo* stubInfo = &codeBlock->getStubInfo(returnAddress);
-
// Cache hit: Specialize instruction and ref Structures.
// Structure transition, cache transition info
if (slot.type() == PutPropertySlot::NewProperty) {
StructureChain* prototypeChain = structure->prototypeChain(callFrame);
if (!prototypeChain->isCacheable()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(JITStubs::cti_op_put_by_id_generic));
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_put_by_id_generic));
return;
}
stubInfo->initPutByIdTransition(structure->previousID(), structure, prototypeChain);
@@ -423,14 +744,14 @@ NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* co
JIT::patchPutByIdReplace(codeBlock, stubInfo, structure, slot.cachedOffset(), returnAddress);
}
-NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot)
+NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo* stubInfo)
{
// FIXME: Write a test that proves we need to check for recursion here just
// like the interpreter does, then add a check for recursion.
// FIXME: Cache property access for immediates.
if (!baseValue.isCell()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_generic));
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
return;
}
@@ -450,23 +771,18 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co
// Uncacheable: give up.
if (!slot.isCacheable()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_generic));
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
return;
}
JSCell* baseCell = asCell(baseValue);
Structure* structure = baseCell->structure();
- if (structure->isDictionary()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_generic));
+ if (structure->isUncacheableDictionary()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
return;
}
- // In the interpreter the last structure is trapped here; in CTI we use the
- // *_second method to achieve a similar (but not quite the same) effect.
-
- StructureStubInfo* stubInfo = &codeBlock->getStubInfo(returnAddress);
-
// Cache hit: Specialize instruction and ref Structures.
if (slot.slotBase() == baseValue) {
@@ -495,20 +811,20 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co
size_t count = countPrototypeChainEntriesAndCheckForProxies(callFrame, baseValue, slot);
if (!count) {
- stubInfo->opcodeID = op_get_by_id_generic;
+ stubInfo->accessType = access_get_by_id_generic;
return;
}
StructureChain* prototypeChain = structure->prototypeChain(callFrame);
if (!prototypeChain->isCacheable()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_generic));
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
return;
}
stubInfo->initGetByIdChain(structure, prototypeChain);
JIT::compileGetByIdChain(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, prototypeChain, count, slot.cachedOffset(), returnAddress);
}
-#endif
+#endif // ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
#if USE(JIT_STUB_ARGUMENT_VA_LIST)
#define SETUP_VA_LISTL_ARGS va_list vl_args; va_start(vl_args, args)
@@ -585,25 +901,23 @@ static NEVER_INLINE void throwStackOverflowError(CallFrame* callFrame, JSGlobalD
#define CHECK_FOR_EXCEPTION() \
do { \
- if (UNLIKELY(stackFrame.globalData->exception != JSValue())) \
+ if (UNLIKELY(stackFrame.globalData->exception)) \
VM_THROW_EXCEPTION(); \
} while (0)
#define CHECK_FOR_EXCEPTION_AT_END() \
do { \
- if (UNLIKELY(stackFrame.globalData->exception != JSValue())) \
+ if (UNLIKELY(stackFrame.globalData->exception)) \
VM_THROW_EXCEPTION_AT_END(); \
} while (0)
#define CHECK_FOR_EXCEPTION_VOID() \
do { \
- if (UNLIKELY(stackFrame.globalData->exception != JSValue())) { \
+ if (UNLIKELY(stackFrame.globalData->exception)) { \
VM_THROW_EXCEPTION_AT_END(); \
return; \
} \
} while (0)
-namespace JITStubs {
-
-#if PLATFORM_ARM_ARCH(7)
+#if PLATFORM(ARM_THUMB2)
#define DEFINE_STUB_FUNCTION(rtype, op) \
extern "C" { \
@@ -615,7 +929,7 @@ namespace JITStubs {
".globl " SYMBOL_STRING(cti_##op) "\n" \
HIDE_SYMBOL(cti_##op) "\n" \
".thumb" "\n" \
- ".thumb_func " SYMBOL_STRING(cti_##op) "\n" \
+ ".thumb_func " THUMB_FUNC_PARAM(cti_##op) "\n" \
SYMBOL_STRING(cti_##op) ":" "\n" \
"str lr, [sp, #0x1c]" "\n" \
"bl " SYMBOL_STRING(JITStubThunked_##op) "\n" \
@@ -628,7 +942,7 @@ namespace JITStubs {
#define DEFINE_STUB_FUNCTION(rtype, op) rtype JIT_STUB cti_##op(STUB_ARGS_DECLARATION)
#endif
-DEFINE_STUB_FUNCTION(JSObject*, op_convert_this)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_convert_this)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -637,7 +951,7 @@ DEFINE_STUB_FUNCTION(JSObject*, op_convert_this)
JSObject* result = v1.toThisObject(callFrame);
CHECK_FOR_EXCEPTION_AT_END();
- return result;
+ return JSValue::encode(result);
}
DEFINE_STUB_FUNCTION(void, op_end)
@@ -677,8 +991,8 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_add)
}
if (rightIsNumber & leftIsString) {
- RefPtr<UString::Rep> value = v2.isInt32Fast() ?
- concatenate(asString(v1)->value().rep(), v2.getInt32Fast()) :
+ RefPtr<UString::Rep> value = v2.isInt32() ?
+ concatenate(asString(v1)->value().rep(), v2.asInt32()) :
concatenate(asString(v1)->value().rep(), right);
if (UNLIKELY(!value)) {
@@ -717,6 +1031,7 @@ DEFINE_STUB_FUNCTION(int, timeout_check)
globalData->exception = createInterruptedExecutionException(globalData);
VM_THROW_EXCEPTION_AT_END();
}
+
CHECK_FOR_EXCEPTION_AT_END();
return timeoutChecker->ticksUntilNextCheck();
}
@@ -797,25 +1112,19 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_generic)
DEFINE_STUB_FUNCTION(void, op_put_by_id)
{
STUB_INIT_STACK_FRAME(stackFrame);
-
CallFrame* callFrame = stackFrame.callFrame;
Identifier& ident = stackFrame.args[1].identifier();
PutPropertySlot slot;
stackFrame.args[0].jsValue().put(callFrame, ident, stackFrame.args[2].jsValue(), slot);
- ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_put_by_id_second));
-
- CHECK_FOR_EXCEPTION_AT_END();
-}
-
-DEFINE_STUB_FUNCTION(void, op_put_by_id_second)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
+ CodeBlock* codeBlock = stackFrame.callFrame->codeBlock();
+ StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
+ if (!stubInfo->seenOnce())
+ stubInfo->setSeen();
+ else
+ JITThunks::tryCachePutByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, stackFrame.args[0].jsValue(), slot, stubInfo);
- PutPropertySlot slot;
- stackFrame.args[0].jsValue().put(stackFrame.callFrame, stackFrame.args[1].identifier(), stackFrame.args[2].jsValue(), slot);
- JITThunks::tryCachePutByID(stackFrame.callFrame, stackFrame.callFrame->codeBlock(), STUB_RETURN_ADDRESS, stackFrame.args[0].jsValue(), slot);
CHECK_FOR_EXCEPTION_AT_END();
}
@@ -832,36 +1141,19 @@ DEFINE_STUB_FUNCTION(void, op_put_by_id_fail)
CHECK_FOR_EXCEPTION_AT_END();
}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_put_by_id_transition_realloc)
+DEFINE_STUB_FUNCTION(JSObject*, op_put_by_id_transition_realloc)
{
STUB_INIT_STACK_FRAME(stackFrame);
JSValue baseValue = stackFrame.args[0].jsValue();
- int32_t oldSize = stackFrame.args[1].int32();
- int32_t newSize = stackFrame.args[2].int32();
+ int32_t oldSize = stackFrame.args[3].int32();
+ int32_t newSize = stackFrame.args[4].int32();
ASSERT(baseValue.isObject());
- asObject(baseValue)->allocatePropertyStorage(oldSize, newSize);
-
- return JSValue::encode(baseValue);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- Identifier& ident = stackFrame.args[1].identifier();
+ JSObject* base = asObject(baseValue);
+ base->allocatePropertyStorage(oldSize, newSize);
- JSValue baseValue = stackFrame.args[0].jsValue();
- PropertySlot slot(baseValue);
- JSValue result = baseValue.get(callFrame, ident, slot);
-
- ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_second));
-
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
+ return base;
}
DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check)
@@ -874,25 +1166,15 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check)
JSValue baseValue = stackFrame.args[0].jsValue();
PropertySlot slot(baseValue);
JSValue result = baseValue.get(callFrame, ident, slot);
+ CHECK_FOR_EXCEPTION();
- ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_method_check_second));
-
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check_second)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- Identifier& ident = stackFrame.args[1].identifier();
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- PropertySlot slot(baseValue);
- JSValue result = baseValue.get(callFrame, ident, slot);
+ CodeBlock* codeBlock = stackFrame.callFrame->codeBlock();
+ MethodCallLinkInfo& methodCallLinkInfo = codeBlock->getMethodCallLinkInfo(STUB_RETURN_ADDRESS);
- CHECK_FOR_EXCEPTION();
+ if (!methodCallLinkInfo.seenOnce()) {
+ methodCallLinkInfo.setSeen();
+ return JSValue::encode(result);
+ }
// If we successfully got something, then the base from which it is being accessed must
// be an object. (Assertion to ensure asObject() call below is safe, which comes after
@@ -909,7 +1191,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check_second)
JSObject* slotBaseObject;
if (baseValue.isCell()
&& slot.isCacheable()
- && !(structure = asCell(baseValue)->structure())->isDictionary()
+ && !(structure = asCell(baseValue)->structure())->isUncacheableDictionary()
&& (slotBaseObject = asObject(slot.slotBase()))->getPropertySpecificValue(callFrame, ident, specific)
&& specific
) {
@@ -923,33 +1205,33 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check_second)
// The result fetched should always be the callee!
ASSERT(result == JSValue(callee));
- MethodCallLinkInfo& methodCallLinkInfo = callFrame->codeBlock()->getMethodCallLinkInfo(STUB_RETURN_ADDRESS);
// Check to see if the function is on the object's prototype. Patch up the code to optimize.
- if (slot.slotBase() == structure->prototypeForLookup(callFrame))
- JIT::patchMethodCallProto(callFrame->codeBlock(), methodCallLinkInfo, callee, structure, slotBaseObject);
+ if (slot.slotBase() == structure->prototypeForLookup(callFrame)) {
+ JIT::patchMethodCallProto(codeBlock, methodCallLinkInfo, callee, structure, slotBaseObject, STUB_RETURN_ADDRESS);
+ return JSValue::encode(result);
+ }
+
// Check to see if the function is on the object itself.
// Since we generate the method-check to check both the structure and a prototype-structure (since this
// is the common case) we have a problem - we need to patch the prototype structure check to do something
// useful. We could try to nop it out altogether, but that's a little messy, so lets do something simpler
// for now. For now it performs a check on a special object on the global object only used for this
// purpose. The object is in no way exposed, and as such the check will always pass.
- else if (slot.slotBase() == baseValue)
- JIT::patchMethodCallProto(callFrame->codeBlock(), methodCallLinkInfo, callee, structure, callFrame->scopeChain()->globalObject()->methodCallDummy());
-
- // For now let any other case be cached as a normal get_by_id.
+ if (slot.slotBase() == baseValue) {
+ JIT::patchMethodCallProto(codeBlock, methodCallLinkInfo, callee, structure, callFrame->scopeChain()->globalObject()->methodCallDummy(), STUB_RETURN_ADDRESS);
+ return JSValue::encode(result);
+ }
}
// Revert the get_by_id op back to being a regular get_by_id - allow it to cache like normal, if it needs to.
- ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id));
-
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id));
return JSValue::encode(result);
}
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_second)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id)
{
STUB_INIT_STACK_FRAME(stackFrame);
-
CallFrame* callFrame = stackFrame.callFrame;
Identifier& ident = stackFrame.args[1].identifier();
@@ -957,7 +1239,12 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_second)
PropertySlot slot(baseValue);
JSValue result = baseValue.get(callFrame, ident, slot);
- JITThunks::tryCacheGetByID(callFrame, callFrame->codeBlock(), STUB_RETURN_ADDRESS, baseValue, ident, slot);
+ CodeBlock* codeBlock = stackFrame.callFrame->codeBlock();
+ StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
+ if (!stubInfo->seenOnce())
+ stubInfo->setSeen();
+ else
+ JITThunks::tryCacheGetByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, baseValue, ident, slot, stubInfo);
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
@@ -978,7 +1265,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_self_fail)
if (baseValue.isCell()
&& slot.isCacheable()
- && !asCell(baseValue)->structure()->isDictionary()
+ && !asCell(baseValue)->structure()->isUncacheableDictionary()
&& slot.slotBase() == baseValue) {
CodeBlock* codeBlock = callFrame->codeBlock();
@@ -989,7 +1276,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_self_fail)
PolymorphicAccessStructureList* polymorphicStructureList;
int listIndex = 1;
- if (stubInfo->opcodeID == op_get_by_id_self) {
+ if (stubInfo->accessType == access_get_by_id_self) {
ASSERT(!stubInfo->stubRoutine);
polymorphicStructureList = new PolymorphicAccessStructureList(CodeLocationLabel(), stubInfo->u.getByIdSelf.baseObjectStructure);
stubInfo->initGetByIdSelfList(polymorphicStructureList, 2);
@@ -1013,18 +1300,18 @@ static PolymorphicAccessStructureList* getPolymorphicAccessStructureListSlot(Str
PolymorphicAccessStructureList* prototypeStructureList = 0;
listIndex = 1;
- switch (stubInfo->opcodeID) {
- case op_get_by_id_proto:
+ switch (stubInfo->accessType) {
+ case access_get_by_id_proto:
prototypeStructureList = new PolymorphicAccessStructureList(stubInfo->stubRoutine, stubInfo->u.getByIdProto.baseObjectStructure, stubInfo->u.getByIdProto.prototypeStructure);
stubInfo->stubRoutine = CodeLocationLabel();
stubInfo->initGetByIdProtoList(prototypeStructureList, 2);
break;
- case op_get_by_id_chain:
+ case access_get_by_id_chain:
prototypeStructureList = new PolymorphicAccessStructureList(stubInfo->stubRoutine, stubInfo->u.getByIdChain.baseObjectStructure, stubInfo->u.getByIdChain.chain);
stubInfo->stubRoutine = CodeLocationLabel();
stubInfo->initGetByIdProtoList(prototypeStructureList, 2);
break;
- case op_get_by_id_proto_list:
+ case access_get_by_id_proto_list:
prototypeStructureList = stubInfo->u.getByIdProtoList.structureList;
listIndex = stubInfo->u.getByIdProtoList.listSize;
stubInfo->u.getByIdProtoList.listSize++;
@@ -1049,7 +1336,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list)
CHECK_FOR_EXCEPTION();
- if (!baseValue.isCell() || !slot.isCacheable() || asCell(baseValue)->structure()->isDictionary()) {
+ if (!baseValue.isCell() || !slot.isCacheable() || asCell(baseValue)->structure()->isUncacheableDictionary()) {
ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
return JSValue::encode(result);
}
@@ -1143,7 +1430,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_string_fail)
return JSValue::encode(result);
}
-#endif
+#endif // ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
DEFINE_STUB_FUNCTION(EncodedJSValue, op_instanceof)
{
@@ -1223,7 +1510,7 @@ DEFINE_STUB_FUNCTION(JSObject*, op_new_func)
{
STUB_INIT_STACK_FRAME(stackFrame);
- return stackFrame.args[0].funcDeclNode()->makeFunction(stackFrame.callFrame, stackFrame.callFrame->scopeChain());
+ return stackFrame.args[0].function()->make(stackFrame.callFrame, stackFrame.callFrame->scopeChain());
}
DEFINE_STUB_FUNCTION(void*, op_call_JSFunction)
@@ -1237,11 +1524,11 @@ DEFINE_STUB_FUNCTION(void*, op_call_JSFunction)
JSFunction* function = asFunction(stackFrame.args[0].jsValue());
ASSERT(!function->isHostFunction());
- FunctionBodyNode* body = function->body();
+ FunctionExecutable* executable = function->jsExecutable();
ScopeChainNode* callDataScopeChain = function->scope().node();
- body->jitCode(callDataScopeChain);
+ executable->jitCode(stackFrame.callFrame, callDataScopeChain);
- return &(body->generatedBytecode());
+ return function;
}
DEFINE_STUB_FUNCTION(VoidPtrPair, op_call_arityCheck)
@@ -1249,8 +1536,9 @@ DEFINE_STUB_FUNCTION(VoidPtrPair, op_call_arityCheck)
STUB_INIT_STACK_FRAME(stackFrame);
CallFrame* callFrame = stackFrame.callFrame;
- CodeBlock* newCodeBlock = stackFrame.args[3].codeBlock();
- ASSERT(newCodeBlock->codeType() != NativeCode);
+ JSFunction* callee = asFunction(stackFrame.args[0].jsValue());
+ ASSERT(!callee->isHostFunction());
+ CodeBlock* newCodeBlock = &callee->jsExecutable()->generatedBytecode();
int argCount = stackFrame.args[2].int32();
ASSERT(argCount != newCodeBlock->m_numParameters);
@@ -1287,45 +1575,36 @@ DEFINE_STUB_FUNCTION(VoidPtrPair, op_call_arityCheck)
callFrame->setCallerFrame(oldCallFrame);
}
- RETURN_POINTER_PAIR(newCodeBlock, callFrame);
-}
-
-DEFINE_STUB_FUNCTION(void*, vm_dontLazyLinkCall)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSGlobalData* globalData = stackFrame.globalData;
- JSFunction* callee = asFunction(stackFrame.args[0].jsValue());
-
- ctiPatchNearCallByReturnAddress(stackFrame.callFrame->callerFrame()->codeBlock(), stackFrame.args[1].returnAddress(), globalData->jitStubs.ctiVirtualCallLink());
-
- return callee->body()->generatedJITCode().addressForCall().executableAddress();
+ RETURN_POINTER_PAIR(callee, callFrame);
}
+#if ENABLE(JIT_OPTIMIZE_CALL)
DEFINE_STUB_FUNCTION(void*, vm_lazyLinkCall)
{
STUB_INIT_STACK_FRAME(stackFrame);
-
JSFunction* callee = asFunction(stackFrame.args[0].jsValue());
- JITCode& jitCode = callee->body()->generatedJITCode();
+ ExecutableBase* executable = callee->executable();
+ JITCode& jitCode = executable->generatedJITCode();
CodeBlock* codeBlock = 0;
- if (!callee->isHostFunction())
- codeBlock = &callee->body()->bytecode(callee->scope().node());
- else
- codeBlock = &callee->body()->generatedBytecode();
-
+ if (!executable->isHostFunction())
+ codeBlock = &static_cast<FunctionExecutable*>(executable)->bytecode(stackFrame.callFrame, callee->scope().node());
CallLinkInfo* callLinkInfo = &stackFrame.callFrame->callerFrame()->codeBlock()->getCallLinkInfo(stackFrame.args[1].returnAddress());
- JIT::linkCall(callee, stackFrame.callFrame->callerFrame()->codeBlock(), codeBlock, jitCode, callLinkInfo, stackFrame.args[2].int32(), stackFrame.globalData);
+
+ if (!callLinkInfo->seenOnce())
+ callLinkInfo->setSeen();
+ else
+ JIT::linkCall(callee, stackFrame.callFrame->callerFrame()->codeBlock(), codeBlock, jitCode, callLinkInfo, stackFrame.args[2].int32(), stackFrame.globalData);
return jitCode.addressForCall().executableAddress();
}
+#endif // !ENABLE(JIT_OPTIMIZE_CALL)
DEFINE_STUB_FUNCTION(JSObject*, op_push_activation)
{
STUB_INIT_STACK_FRAME(stackFrame);
- JSActivation* activation = new (stackFrame.globalData) JSActivation(stackFrame.callFrame, static_cast<FunctionBodyNode*>(stackFrame.callFrame->codeBlock()->ownerNode()));
+ JSActivation* activation = new (stackFrame.globalData) JSActivation(stackFrame.callFrame, static_cast<FunctionExecutable*>(stackFrame.callFrame->codeBlock()->ownerExecutable()));
stackFrame.callFrame->setScopeChain(stackFrame.callFrame->scopeChain()->copy()->push(activation));
return activation;
}
@@ -1385,7 +1664,7 @@ DEFINE_STUB_FUNCTION(void, op_create_arguments)
Arguments* arguments = new (stackFrame.globalData) Arguments(stackFrame.callFrame);
stackFrame.callFrame->setCalleeArguments(arguments);
- stackFrame.callFrame[RegisterFile::ArgumentsRegister] = arguments;
+ stackFrame.callFrame[RegisterFile::ArgumentsRegister] = JSValue(arguments);
}
DEFINE_STUB_FUNCTION(void, op_create_arguments_no_params)
@@ -1394,7 +1673,7 @@ DEFINE_STUB_FUNCTION(void, op_create_arguments_no_params)
Arguments* arguments = new (stackFrame.globalData) Arguments(stackFrame.callFrame, Arguments::NoParameters);
stackFrame.callFrame->setCalleeArguments(arguments);
- stackFrame.callFrame[RegisterFile::ArgumentsRegister] = arguments;
+ stackFrame.callFrame[RegisterFile::ArgumentsRegister] = JSValue(arguments);
}
DEFINE_STUB_FUNCTION(void, op_tear_off_activation)
@@ -1550,8 +1829,8 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val)
JSValue result;
- if (LIKELY(subscript.isUInt32Fast())) {
- uint32_t i = subscript.getUInt32Fast();
+ if (LIKELY(subscript.isUInt32())) {
+ uint32_t i = subscript.asUInt32();
if (isJSArray(globalData, baseValue)) {
JSArray* jsArray = asArray(baseValue);
if (jsArray->canGetIndex(i))
@@ -1589,8 +1868,8 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val_string)
JSValue result;
- if (LIKELY(subscript.isUInt32Fast())) {
- uint32_t i = subscript.getUInt32Fast();
+ if (LIKELY(subscript.isUInt32())) {
+ uint32_t i = subscript.asUInt32();
if (isJSString(globalData, baseValue) && asString(baseValue)->canGetIndex(i))
result = asString(baseValue)->getIndex(stackFrame.globalData, i);
else {
@@ -1607,7 +1886,6 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val_string)
return JSValue::encode(result);
}
-
DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val_byte_array)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -1620,8 +1898,8 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val_byte_array)
JSValue result;
- if (LIKELY(subscript.isUInt32Fast())) {
- uint32_t i = subscript.getUInt32Fast();
+ if (LIKELY(subscript.isUInt32())) {
+ uint32_t i = subscript.asUInt32();
if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
// All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
return JSValue::encode(asByteArray(baseValue)->getIndex(callFrame, i));
@@ -1639,50 +1917,6 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val_byte_array)
return JSValue::encode(result);
}
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_func)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- ScopeChainNode* scopeChain = callFrame->scopeChain();
-
- ScopeChainIterator iter = scopeChain->begin();
- ScopeChainIterator end = scopeChain->end();
-
- // FIXME: add scopeDepthIsZero optimization
-
- ASSERT(iter != end);
-
- Identifier& ident = stackFrame.args[0].identifier();
- JSObject* base;
- do {
- base = *iter;
- PropertySlot slot(base);
- if (base->getPropertySlot(callFrame, ident, slot)) {
- // ECMA 11.2.3 says that if we hit an activation the this value should be null.
- // However, section 10.2.3 says that in the case where the value provided
- // by the caller is null, the global object should be used. It also says
- // that the section does not apply to internal functions, but for simplicity
- // of implementation we use the global object anyway here. This guarantees
- // that in host objects you always get a valid object for this.
- // We also handle wrapper substitution for the global object at the same time.
- JSObject* thisObj = base->toThisObject(callFrame);
- JSValue result = slot.getValue(callFrame, ident);
- CHECK_FOR_EXCEPTION_AT_END();
-
- callFrame->registers()[stackFrame.args[1].int32()] = JSValue(thisObj);
- return JSValue::encode(result);
- }
- ++iter;
- } while (iter != end);
-
- CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
- stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock);
- VM_THROW_EXCEPTION_AT_END();
- return JSValue::encode(JSValue());
-}
-
DEFINE_STUB_FUNCTION(EncodedJSValue, op_sub)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -1712,8 +1946,8 @@ DEFINE_STUB_FUNCTION(void, op_put_by_val)
JSValue subscript = stackFrame.args[1].jsValue();
JSValue value = stackFrame.args[2].jsValue();
- if (LIKELY(subscript.isUInt32Fast())) {
- uint32_t i = subscript.getUInt32Fast();
+ if (LIKELY(subscript.isUInt32())) {
+ uint32_t i = subscript.asUInt32();
if (isJSArray(globalData, baseValue)) {
JSArray* jsArray = asArray(baseValue);
if (jsArray->canSetIndex(i))
@@ -1724,8 +1958,8 @@ DEFINE_STUB_FUNCTION(void, op_put_by_val)
JSByteArray* jsByteArray = asByteArray(baseValue);
ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_put_by_val_byte_array));
// All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
- if (value.isInt32Fast()) {
- jsByteArray->setIndex(i, value.getInt32Fast());
+ if (value.isInt32()) {
+ jsByteArray->setIndex(i, value.asInt32());
return;
} else {
double dValue = 0;
@@ -1763,14 +1997,9 @@ DEFINE_STUB_FUNCTION(void, op_put_by_val_array)
if (LIKELY(i >= 0))
asArray(baseValue)->JSArray::put(callFrame, i, value);
else {
- // This should work since we're re-boxing an immediate unboxed in JIT code.
- ASSERT(JSValue::makeInt32Fast(i));
- Identifier property(callFrame, JSValue::makeInt32Fast(i).toString(callFrame));
- // FIXME: can toString throw an exception here?
- if (!stackFrame.globalData->exception) { // Don't put to an object if toString threw an exception.
- PutPropertySlot slot;
- baseValue.put(callFrame, property, value, slot);
- }
+ Identifier property(callFrame, UString::from(i));
+ PutPropertySlot slot;
+ baseValue.put(callFrame, property, value, slot);
}
CHECK_FOR_EXCEPTION_AT_END();
@@ -1787,14 +2016,14 @@ DEFINE_STUB_FUNCTION(void, op_put_by_val_byte_array)
JSValue subscript = stackFrame.args[1].jsValue();
JSValue value = stackFrame.args[2].jsValue();
- if (LIKELY(subscript.isUInt32Fast())) {
- uint32_t i = subscript.getUInt32Fast();
+ if (LIKELY(subscript.isUInt32())) {
+ uint32_t i = subscript.asUInt32();
if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
JSByteArray* jsByteArray = asByteArray(baseValue);
// All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
- if (value.isInt32Fast()) {
- jsByteArray->setIndex(i, value.getInt32Fast());
+ if (value.isInt32()) {
+ jsByteArray->setIndex(i, value.asInt32());
return;
} else {
double dValue = 0;
@@ -1845,6 +2074,7 @@ DEFINE_STUB_FUNCTION(int, op_loop_if_true)
DEFINE_STUB_FUNCTION(int, op_load_varargs)
{
STUB_INIT_STACK_FRAME(stackFrame);
+
CallFrame* callFrame = stackFrame.callFrame;
RegisterFile* registerFile = stackFrame.registerFile;
int argsOffset = stackFrame.args[0].int32();
@@ -1859,7 +2089,7 @@ DEFINE_STUB_FUNCTION(int, op_load_varargs)
stackFrame.globalData->exception = createStackOverflowError(callFrame);
VM_THROW_EXCEPTION();
}
- int32_t expectedParams = asFunction(callFrame->registers()[RegisterFile::Callee].jsValue())->body()->parameterCount();
+ int32_t expectedParams = asFunction(callFrame->callee())->jsExecutable()->parameterCount();
int32_t inplaceArgs = min(providedParams, expectedParams);
Register* inplaceArgsDst = callFrame->registers() + argsOffset;
@@ -1991,7 +2221,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_global)
STUB_INIT_STACK_FRAME(stackFrame);
CallFrame* callFrame = stackFrame.callFrame;
- JSGlobalObject* globalObject = asGlobalObject(stackFrame.args[0].jsValue());
+ JSGlobalObject* globalObject = stackFrame.args[0].globalObject();
Identifier& ident = stackFrame.args[1].identifier();
unsigned globalResolveInfoIndex = stackFrame.args[2].int32();
ASSERT(globalObject->isGlobalObject());
@@ -1999,7 +2229,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_global)
PropertySlot slot(globalObject);
if (globalObject->getPropertySlot(callFrame, ident, slot)) {
JSValue result = slot.getValue(callFrame, ident);
- if (slot.isCacheable() && !globalObject->structure()->isDictionary() && slot.slotBase() == globalObject) {
+ if (slot.isCacheable() && !globalObject->structure()->isUncacheableDictionary() && slot.slotBase() == globalObject) {
GlobalResolveInfo& globalResolveInfo = callFrame->codeBlock()->globalResolveInfo(globalResolveInfoIndex);
if (globalResolveInfo.structure)
globalResolveInfo.structure->deref();
@@ -2115,7 +2345,112 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_post_inc)
return JSValue::encode(number);
}
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_eq)
+#if USE(JSVALUE32_64)
+
+DEFINE_STUB_FUNCTION(int, op_eq)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+
+ start:
+ if (src2.isUndefined()) {
+ return src1.isNull() ||
+ (src1.isCell() && asCell(src1)->structure()->typeInfo().masqueradesAsUndefined()) ||
+ src1.isUndefined();
+ }
+
+ if (src2.isNull()) {
+ return src1.isUndefined() ||
+ (src1.isCell() && asCell(src1)->structure()->typeInfo().masqueradesAsUndefined()) ||
+ src1.isNull();
+ }
+
+ if (src1.isInt32()) {
+ if (src2.isDouble())
+ return src1.asInt32() == src2.asDouble();
+ double d = src2.toNumber(stackFrame.callFrame);
+ CHECK_FOR_EXCEPTION();
+ return src1.asInt32() == d;
+ }
+
+ if (src1.isDouble()) {
+ if (src2.isInt32())
+ return src1.asDouble() == src2.asInt32();
+ double d = src2.toNumber(stackFrame.callFrame);
+ CHECK_FOR_EXCEPTION();
+ return src1.asDouble() == d;
+ }
+
+ if (src1.isTrue()) {
+ if (src2.isFalse())
+ return false;
+ double d = src2.toNumber(stackFrame.callFrame);
+ CHECK_FOR_EXCEPTION();
+ return d == 1.0;
+ }
+
+ if (src1.isFalse()) {
+ if (src2.isTrue())
+ return false;
+ double d = src2.toNumber(stackFrame.callFrame);
+ CHECK_FOR_EXCEPTION();
+ return d == 0.0;
+ }
+
+ if (src1.isUndefined())
+ return src2.isCell() && asCell(src2)->structure()->typeInfo().masqueradesAsUndefined();
+
+ if (src1.isNull())
+ return src2.isCell() && asCell(src2)->structure()->typeInfo().masqueradesAsUndefined();
+
+ JSCell* cell1 = asCell(src1);
+
+ if (cell1->isString()) {
+ if (src2.isInt32())
+ return static_cast<JSString*>(cell1)->value().toDouble() == src2.asInt32();
+
+ if (src2.isDouble())
+ return static_cast<JSString*>(cell1)->value().toDouble() == src2.asDouble();
+
+ if (src2.isTrue())
+ return static_cast<JSString*>(cell1)->value().toDouble() == 1.0;
+
+ if (src2.isFalse())
+ return static_cast<JSString*>(cell1)->value().toDouble() == 0.0;
+
+ JSCell* cell2 = asCell(src2);
+ if (cell2->isString())
+ return static_cast<JSString*>(cell1)->value() == static_cast<JSString*>(cell2)->value();
+
+ src2 = asObject(cell2)->toPrimitive(stackFrame.callFrame);
+ CHECK_FOR_EXCEPTION();
+ goto start;
+ }
+
+ if (src2.isObject())
+ return asObject(cell1) == asObject(src2);
+ src1 = asObject(cell1)->toPrimitive(stackFrame.callFrame);
+ CHECK_FOR_EXCEPTION();
+ goto start;
+}
+
+DEFINE_STUB_FUNCTION(int, op_eq_strings)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSString* string1 = stackFrame.args[0].jsString();
+ JSString* string2 = stackFrame.args[1].jsString();
+
+ ASSERT(string1->isString());
+ ASSERT(string2->isString());
+ return string1->value() == string2->value();
+}
+
+#else // USE(JSVALUE32_64)
+
+DEFINE_STUB_FUNCTION(int, op_eq)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -2124,12 +2459,13 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_eq)
CallFrame* callFrame = stackFrame.callFrame;
- ASSERT(!JSValue::areBothInt32Fast(src1, src2));
- JSValue result = jsBoolean(JSValue::equalSlowCaseInline(callFrame, src1, src2));
+ bool result = JSValue::equalSlowCaseInline(callFrame, src1, src2);
CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
+ return result;
}
+#endif // USE(JSVALUE32_64)
+
DEFINE_STUB_FUNCTION(EncodedJSValue, op_lshift)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -2137,13 +2473,6 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_lshift)
JSValue val = stackFrame.args[0].jsValue();
JSValue shift = stackFrame.args[1].jsValue();
- int32_t left;
- uint32_t right;
- if (JSValue::areBothInt32Fast(val, shift))
- return JSValue::encode(jsNumber(stackFrame.globalData, val.getInt32Fast() << (shift.getInt32Fast() & 0x1f)));
- if (val.numberToInt32(left) && shift.numberToUInt32(right))
- return JSValue::encode(jsNumber(stackFrame.globalData, left << (right & 0x1f)));
-
CallFrame* callFrame = stackFrame.callFrame;
JSValue result = jsNumber(stackFrame.globalData, (val.toInt32(callFrame)) << (shift.toUInt32(callFrame) & 0x1f));
CHECK_FOR_EXCEPTION_AT_END();
@@ -2157,11 +2486,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitand)
JSValue src1 = stackFrame.args[0].jsValue();
JSValue src2 = stackFrame.args[1].jsValue();
- int32_t left;
- int32_t right;
- if (src1.numberToInt32(left) && src2.numberToInt32(right))
- return JSValue::encode(jsNumber(stackFrame.globalData, left & right));
-
+ ASSERT(!src1.isInt32() || !src2.isInt32());
CallFrame* callFrame = stackFrame.callFrame;
JSValue result = jsNumber(stackFrame.globalData, src1.toInt32(callFrame) & src2.toInt32(callFrame));
CHECK_FOR_EXCEPTION_AT_END();
@@ -2175,15 +2500,9 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_rshift)
JSValue val = stackFrame.args[0].jsValue();
JSValue shift = stackFrame.args[1].jsValue();
- int32_t left;
- uint32_t right;
- if (JSFastMath::canDoFastRshift(val, shift))
- return JSValue::encode(JSFastMath::rightShiftImmediateNumbers(val, shift));
- if (val.numberToInt32(left) && shift.numberToUInt32(right))
- return JSValue::encode(jsNumber(stackFrame.globalData, left >> (right & 0x1f)));
-
CallFrame* callFrame = stackFrame.callFrame;
JSValue result = jsNumber(stackFrame.globalData, (val.toInt32(callFrame)) >> (shift.toUInt32(callFrame) & 0x1f));
+
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
}
@@ -2194,10 +2513,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitnot)
JSValue src = stackFrame.args[0].jsValue();
- int value;
- if (src.numberToInt32(value))
- return JSValue::encode(jsNumber(stackFrame.globalData, ~value));
-
+ ASSERT(!src.isInt32());
CallFrame* callFrame = stackFrame.callFrame;
JSValue result = jsNumber(stackFrame.globalData, ~src.toInt32(callFrame));
CHECK_FOR_EXCEPTION_AT_END();
@@ -2243,8 +2559,24 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_with_base)
DEFINE_STUB_FUNCTION(JSObject*, op_new_func_exp)
{
STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ FunctionExecutable* function = stackFrame.args[0].function();
+ JSFunction* func = function->make(callFrame, callFrame->scopeChain());
+
+ /*
+ The Identifier in a FunctionExpression can be referenced from inside
+ the FunctionExpression's FunctionBody to allow the function to call
+ itself recursively. However, unlike in a FunctionDeclaration, the
+ Identifier in a FunctionExpression cannot be referenced from and
+ does not affect the scope enclosing the FunctionExpression.
+ */
+ if (!function->name().isNull()) {
+ JSStaticScopeObject* functionScopeObject = new (callFrame) JSStaticScopeObject(callFrame, function->name(), func, ReadOnly | DontDelete);
+ func->scope().push(functionScopeObject);
+ }
- return stackFrame.args[0].funcExprNode()->makeFunction(stackFrame.callFrame, stackFrame.callFrame->scopeChain());
+ return func;
}
DEFINE_STUB_FUNCTION(EncodedJSValue, op_mod)
@@ -2271,21 +2603,6 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_less)
return JSValue::encode(result);
}
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_neq)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
-
- ASSERT(!JSValue::areBothInt32Fast(src1, src2));
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsBoolean(!JSValue::equalSlowCaseInline(callFrame, src1, src2));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
DEFINE_STUB_FUNCTION(EncodedJSValue, op_post_dec)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -2309,14 +2626,9 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_urshift)
JSValue shift = stackFrame.args[1].jsValue();
CallFrame* callFrame = stackFrame.callFrame;
-
- if (JSFastMath::canDoFastUrshift(val, shift))
- return JSValue::encode(JSFastMath::rightShiftImmediateNumbers(val, shift));
- else {
- JSValue result = jsNumber(stackFrame.globalData, (val.toUInt32(callFrame)) >> (shift.toUInt32(callFrame) & 0x1f));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
- }
+ JSValue result = jsNumber(stackFrame.globalData, (val.toUInt32(callFrame)) >> (shift.toUInt32(callFrame) & 0x1f));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
}
DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitxor)
@@ -2375,7 +2687,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_eval)
if (thisValue == globalObject && funcVal == globalObject->evalFunction()) {
JSValue exceptionValue;
JSValue result = interpreter->callEval(callFrame, registerFile, argv, argCount, registerOffset, exceptionValue);
- if (UNLIKELY(exceptionValue != JSValue())) {
+ if (UNLIKELY(exceptionValue)) {
stackFrame.globalData->exception = exceptionValue;
VM_THROW_EXCEPTION_AT_END();
}
@@ -2613,8 +2925,8 @@ DEFINE_STUB_FUNCTION(void*, op_switch_imm)
CallFrame* callFrame = stackFrame.callFrame;
CodeBlock* codeBlock = callFrame->codeBlock();
- if (scrutinee.isInt32Fast())
- return codeBlock->immediateSwitchJumpTable(tableIndex).ctiForValue(scrutinee.getInt32Fast()).executableAddress();
+ if (scrutinee.isInt32())
+ return codeBlock->immediateSwitchJumpTable(tableIndex).ctiForValue(scrutinee.asInt32()).executableAddress();
else {
double value;
int32_t intValue;
@@ -2724,7 +3036,7 @@ DEFINE_STUB_FUNCTION(JSObject*, op_new_error)
unsigned bytecodeOffset = stackFrame.args[2].int32();
unsigned lineNumber = codeBlock->lineNumberForBytecodeOffset(callFrame, bytecodeOffset);
- return Error::create(callFrame, static_cast<ErrorType>(type), message.toString(callFrame), lineNumber, codeBlock->ownerNode()->sourceID(), codeBlock->ownerNode()->sourceURL());
+ return Error::create(callFrame, static_cast<ErrorType>(type), message.toString(callFrame), lineNumber, codeBlock->ownerExecutable()->sourceID(), codeBlock->ownerExecutable()->sourceURL());
}
DEFINE_STUB_FUNCTION(void, op_debug)
@@ -2749,7 +3061,7 @@ DEFINE_STUB_FUNCTION(void, op_debug_catch)
if (JSC::Debugger* debugger = callFrame->lexicalGlobalObject()->debugger() ) {
JSValue exceptionValue = callFrame->r(stackFrame.args[0].int32()).jsValue();
DebuggerCallFrame debuggerCallFrame(callFrame, exceptionValue);
- debugger->exceptionCatch(debuggerCallFrame, callFrame->codeBlock()->ownerNode()->sourceID());
+ debugger->exceptionCatch(debuggerCallFrame, callFrame->codeBlock()->source()->asID());
}
}
@@ -2759,7 +3071,7 @@ DEFINE_STUB_FUNCTION(void, op_debug_return)
CallFrame* callFrame = stackFrame.callFrame;
if (JSC::Debugger* debugger = callFrame->lexicalGlobalObject()->debugger() ) {
JSValue returnValue = callFrame->r(stackFrame.args[0].int32()).jsValue();
- intptr_t sourceID = callFrame->codeBlock()->ownerNode()->sourceID();
+ intptr_t sourceID = callFrame->codeBlock()->source()->asID();
debugger->functionExit(returnValue, sourceID);
}
}
@@ -2794,8 +3106,6 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, vm_throw)
return JSValue::encode(exceptionValue);
}
-} // namespace JITStubs
-
} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.h b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.h
index 325c3fd..3ae8f24 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.h
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.h
@@ -38,8 +38,11 @@
namespace JSC {
+ struct StructureStubInfo;
+
class CodeBlock;
class ExecutablePool;
+ class FunctionExecutable;
class Identifier;
class JSGlobalData;
class JSGlobalData;
@@ -51,8 +54,7 @@ namespace JSC {
class PropertySlot;
class PutPropertySlot;
class RegisterFile;
- class FuncDeclNode;
- class FuncExprNode;
+ class JSGlobalObject;
class RegExp;
union JITStubArg {
@@ -64,17 +66,26 @@ namespace JSC {
Identifier& identifier() { return *static_cast<Identifier*>(asPointer); }
int32_t int32() { return asInt32; }
CodeBlock* codeBlock() { return static_cast<CodeBlock*>(asPointer); }
- FuncDeclNode* funcDeclNode() { return static_cast<FuncDeclNode*>(asPointer); }
- FuncExprNode* funcExprNode() { return static_cast<FuncExprNode*>(asPointer); }
+ FunctionExecutable* function() { return static_cast<FunctionExecutable*>(asPointer); }
RegExp* regExp() { return static_cast<RegExp*>(asPointer); }
JSPropertyNameIterator* propertyNameIterator() { return static_cast<JSPropertyNameIterator*>(asPointer); }
+ JSGlobalObject* globalObject() { return static_cast<JSGlobalObject*>(asPointer); }
+ JSString* jsString() { return static_cast<JSString*>(asPointer); }
ReturnAddressPtr returnAddress() { return ReturnAddressPtr(asPointer); }
};
#if PLATFORM(X86_64)
struct JITStackFrame {
- JITStubArg padding; // Unused
- JITStubArg args[8];
+ void* reserved; // Unused
+ JITStubArg args[6];
+ void* padding[2]; // Maintain 32-byte stack alignment (possibly overkill).
+
+ void* code;
+ RegisterFile* registerFile;
+ CallFrame* callFrame;
+ JSValue* exception;
+ Profiler** enabledProfilerReference;
+ JSGlobalData* globalData;
void* savedRBX;
void* savedR15;
@@ -84,20 +95,20 @@ namespace JSC {
void* savedRBP;
void* savedRIP;
- void* code;
- RegisterFile* registerFile;
- CallFrame* callFrame;
- JSValue* exception;
- Profiler** enabledProfilerReference;
- JSGlobalData* globalData;
-
// When JIT code makes a call, it pushes its return address just below the rest of the stack.
ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; }
};
#elif PLATFORM(X86)
+#if COMPILER(MSVC)
+#pragma pack(push)
+#pragma pack(4)
+#endif // COMPILER(MSVC)
struct JITStackFrame {
- JITStubArg padding; // Unused
+ void* reserved; // Unused
JITStubArg args[6];
+#if USE(JSVALUE32_64)
+ void* padding[2]; // Maintain 16-byte stack alignment.
+#endif
void* savedEBX;
void* savedEDI;
@@ -115,10 +126,16 @@ namespace JSC {
// When JIT code makes a call, it pushes its return address just below the rest of the stack.
ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; }
};
-#elif PLATFORM_ARM_ARCH(7)
+#if COMPILER(MSVC)
+#pragma pack(pop)
+#endif // COMPILER(MSVC)
+#elif PLATFORM(ARM_THUMB2)
struct JITStackFrame {
- JITStubArg padding; // Unused
+ void* reserved; // Unused
JITStubArg args[6];
+#if USE(JSVALUE32_64)
+ void* padding[2]; // Maintain 16-byte stack alignment.
+#endif
ReturnAddressPtr thunkReturnAddress;
@@ -132,12 +149,35 @@ namespace JSC {
CallFrame* callFrame;
JSValue* exception;
+ void* padding2;
+
// These arguments passed on the stack.
Profiler** enabledProfilerReference;
JSGlobalData* globalData;
ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
};
+#elif PLATFORM(ARM_TRADITIONAL)
+ struct JITStackFrame {
+ JITStubArg padding; // Unused
+ JITStubArg args[7];
+
+ void* preservedR4;
+ void* preservedR5;
+ void* preservedR6;
+ void* preservedR7;
+ void* preservedR8;
+ void* preservedLink;
+
+ RegisterFile* registerFile;
+ CallFrame* callFrame;
+ JSValue* exception;
+ Profiler** enabledProfilerReference;
+ JSGlobalData* globalData;
+
+ // When JIT code makes a call, it pushes its return address just below the rest of the stack.
+ ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; }
+ };
#else
#error "JITStackFrame not defined for this platform."
#endif
@@ -183,24 +223,16 @@ namespace JSC {
extern "C" void ctiVMThrowTrampoline();
extern "C" void ctiOpThrowNotCaught();
- extern "C" EncodedJSValue ctiTrampoline(
-#if PLATFORM(X86_64)
- // FIXME: (bug #22910) this will force all arguments onto the stack (regparm(0) does not appear to have any effect).
- // We can allow register passing here, and move the writes of these values into the trampoline.
- void*, void*, void*, void*, void*, void*,
-#endif
- void* code, RegisterFile*, CallFrame*, JSValue* exception, Profiler**, JSGlobalData*);
+ extern "C" EncodedJSValue ctiTrampoline(void* code, RegisterFile*, CallFrame*, JSValue* exception, Profiler**, JSGlobalData*);
class JITThunks {
public:
JITThunks(JSGlobalData*);
- static void tryCacheGetByID(CallFrame*, CodeBlock*, ReturnAddressPtr returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot&);
- static void tryCachePutByID(CallFrame*, CodeBlock*, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot&);
-
- MacroAssemblerCodePtr ctiArrayLengthTrampoline() { return m_ctiArrayLengthTrampoline; }
+ static void tryCacheGetByID(CallFrame*, CodeBlock*, ReturnAddressPtr returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot&, StructureStubInfo* stubInfo);
+ static void tryCachePutByID(CallFrame*, CodeBlock*, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot&, StructureStubInfo* stubInfo);
+
MacroAssemblerCodePtr ctiStringLengthTrampoline() { return m_ctiStringLengthTrampoline; }
- MacroAssemblerCodePtr ctiVirtualCallPreLink() { return m_ctiVirtualCallPreLink; }
MacroAssemblerCodePtr ctiVirtualCallLink() { return m_ctiVirtualCallLink; }
MacroAssemblerCodePtr ctiVirtualCall() { return m_ctiVirtualCall; }
MacroAssemblerCodePtr ctiNativeCallThunk() { return m_ctiNativeCallThunk; }
@@ -208,68 +240,13 @@ namespace JSC {
private:
RefPtr<ExecutablePool> m_executablePool;
- MacroAssemblerCodePtr m_ctiArrayLengthTrampoline;
MacroAssemblerCodePtr m_ctiStringLengthTrampoline;
- MacroAssemblerCodePtr m_ctiVirtualCallPreLink;
MacroAssemblerCodePtr m_ctiVirtualCallLink;
MacroAssemblerCodePtr m_ctiVirtualCall;
MacroAssemblerCodePtr m_ctiNativeCallThunk;
};
-namespace JITStubs { extern "C" {
-
- void JIT_STUB cti_op_create_arguments(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_create_arguments_no_params(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_debug(STUB_ARGS_DECLARATION);
-#ifdef QT_BUILD_SCRIPT_LIB
- void JIT_STUB cti_op_debug_catch(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_debug_return(STUB_ARGS_DECLARATION);
-#endif
- void JIT_STUB cti_op_end(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_jmp_scopes(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_pop_scope(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_profile_did_call(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_profile_will_call(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_by_id(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_by_id_fail(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_by_id_generic(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_by_id_second(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_by_index(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_by_val(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_by_val_array(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_by_val_byte_array(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_getter(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_setter(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_ret_scopeChain(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_tear_off_activation(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_tear_off_arguments(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_register_file_check(STUB_ARGS_DECLARATION);
- int JIT_STUB cti_op_jless(STUB_ARGS_DECLARATION);
- int JIT_STUB cti_op_jlesseq(STUB_ARGS_DECLARATION);
- int JIT_STUB cti_op_jtrue(STUB_ARGS_DECLARATION);
- int JIT_STUB cti_op_load_varargs(STUB_ARGS_DECLARATION);
- int JIT_STUB cti_op_loop_if_less(STUB_ARGS_DECLARATION);
- int JIT_STUB cti_op_loop_if_lesseq(STUB_ARGS_DECLARATION);
- int JIT_STUB cti_op_loop_if_true(STUB_ARGS_DECLARATION);
- int JIT_STUB cti_timeout_check(STUB_ARGS_DECLARATION);
- void* JIT_STUB cti_op_call_JSFunction(STUB_ARGS_DECLARATION);
- void* JIT_STUB cti_op_switch_char(STUB_ARGS_DECLARATION);
- void* JIT_STUB cti_op_switch_imm(STUB_ARGS_DECLARATION);
- void* JIT_STUB cti_op_switch_string(STUB_ARGS_DECLARATION);
- void* JIT_STUB cti_vm_dontLazyLinkCall(STUB_ARGS_DECLARATION);
- void* JIT_STUB cti_vm_lazyLinkCall(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_construct_JSConstruct(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_convert_this(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_new_array(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_new_error(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_new_func(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_new_func_exp(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_new_object(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_new_regexp(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_push_activation(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_push_new_scope(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_push_scope(STUB_ARGS_DECLARATION);
- JSPropertyNameIterator* JIT_STUB cti_op_get_pnames(STUB_ARGS_DECLARATION);
+extern "C" {
EncodedJSValue JIT_STUB cti_op_add(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_bitand(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_bitnot(STUB_ARGS_DECLARATION);
@@ -278,25 +255,22 @@ namespace JITStubs { extern "C" {
EncodedJSValue JIT_STUB cti_op_call_NotJSFunction(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_call_eval(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_construct_NotJSConstruct(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_convert_this(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_del_by_id(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_del_by_val(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_div(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_eq(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_get_by_id_method_check(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_get_by_id_method_check_second(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id_array_fail(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id_generic(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_method_check(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id_proto_fail(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id_proto_list(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id_proto_list_full(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_get_by_id_second(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id_self_fail(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id_string_fail(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_val(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_val_byte_array(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_val_string(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_put_by_id_transition_realloc(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_in(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_instanceof(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_is_boolean(STUB_ARGS_DECLARATION);
@@ -311,16 +285,18 @@ namespace JITStubs { extern "C" {
EncodedJSValue JIT_STUB cti_op_mod(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_mul(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_negate(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_neq(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_next_pname(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_not(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_nstricteq(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_post_dec(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_post_inc(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_pre_dec(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_pre_inc(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_resolve(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_resolve_base(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_resolve_global(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_resolve_skip(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve_with_base(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_rshift(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_strcat(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_stricteq(STUB_ARGS_DECLARATION);
@@ -331,13 +307,62 @@ namespace JITStubs { extern "C" {
EncodedJSValue JIT_STUB cti_op_typeof(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_urshift(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_vm_throw(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_post_dec(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_post_inc(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_resolve_func(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_resolve_with_base(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_construct_JSConstruct(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_array(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_error(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_func(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_func_exp(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_object(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_regexp(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_push_activation(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_push_new_scope(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_push_scope(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_put_by_id_transition_realloc(STUB_ARGS_DECLARATION);
+ JSPropertyNameIterator* JIT_STUB cti_op_get_pnames(STUB_ARGS_DECLARATION);
VoidPtrPair JIT_STUB cti_op_call_arityCheck(STUB_ARGS_DECLARATION);
-
-}; } // extern "C" namespace JITStubs
+ int JIT_STUB cti_op_eq(STUB_ARGS_DECLARATION);
+#if USE(JSVALUE32_64)
+ int JIT_STUB cti_op_eq_strings(STUB_ARGS_DECLARATION);
+#endif
+ int JIT_STUB cti_op_jless(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_jlesseq(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_jtrue(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_load_varargs(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_loop_if_less(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_loop_if_lesseq(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_loop_if_true(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_timeout_check(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_create_arguments(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_create_arguments_no_params(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_debug(STUB_ARGS_DECLARATION);
+#ifdef QT_BUILD_SCRIPT_LIB
+ void JIT_STUB cti_op_debug_catch(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_debug_return(STUB_ARGS_DECLARATION);
+#endif
+ void JIT_STUB cti_op_end(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_jmp_scopes(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_pop_scope(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_profile_did_call(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_profile_will_call(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id_fail(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id_generic(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_index(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_val(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_val_array(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_val_byte_array(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_getter(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_setter(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_ret_scopeChain(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_tear_off_activation(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_tear_off_arguments(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_register_file_check(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_call_JSFunction(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_switch_char(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_switch_imm(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_switch_string(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_vm_lazyLinkCall(STUB_ARGS_DECLARATION);
+} // extern "C"
} // namespace JSC