summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/javascriptcore/JavaScriptCore/jit
diff options
context:
space:
mode:
authorKent Hansen <kent.hansen@nokia.com>2010-02-11 10:55:52 (GMT)
committerKent Hansen <kent.hansen@nokia.com>2010-03-10 09:19:43 (GMT)
commitd73e11d56a094544f036fac3f6e4483d1104261e (patch)
treec292c8f53c660dae3f7681dc7acbbe788730a580 /src/3rdparty/javascriptcore/JavaScriptCore/jit
parent20e2b87b5194abf7e9f08b7c42c030a57e2d6b28 (diff)
downloadQt-d73e11d56a094544f036fac3f6e4483d1104261e.zip
Qt-d73e11d56a094544f036fac3f6e4483d1104261e.tar.gz
Qt-d73e11d56a094544f036fac3f6e4483d1104261e.tar.bz2
Update src/3rdparty/javascriptcore and adapt src/script to the changes
- Update qscriptvalueiterator test to expect length property when iterating arrays and strings. - Use EvalExecutable::create() instead of EvalExecutable constructor. The constructor is private. - Reimplement getOwnPropertyDescriptor() in all custom script objects. - Remove all reimplementations of getPropertyAttributes(). It doesn't exist in trunk anymore (getOwnPropertyDescriptor() is used instead). - Remove checkDontDelete argument from deleteProperty() reimplementations. The purpose of this argument was to support deleting properties with attribute Undeletable from C++. But it was quite an invasive patch to JavaScriptCore, and it doesn't seem worth it. If this feature is really crucial it should be re-done upstream. One of the tests needed to be updated so it's not sensitive to the C++ undeletability. - Adapt getOwnPropertyNames() reimplementations to signature change. - Add missing QScriptObject structure flags, otherwise we don't get all virtual calls. - Remove our patch for reporting column numbers in the debugger callbacks. It was just too intrusive. As with the checkDontDelete issue, this should be redone upstream if it's really important. In 4.7, QScriptEngineAgent will always report a column number of 1. Other compilation fixes: - InternalFunction::name() takes an ExecState* argument, not GlobalData* - ScopeChain::globalObject is no longer a function but a member variable - ScopeChainNode constructor takes a GlobalObject argument - Heap::collect() is called collectAllGarbage() - JSValue::strictEqual() takes an ExecState* argument - Debugger::exception() takes a bool hasHandler argument - Debugger no longer reports column number (we decided to drop that patch from JSC) - UString doesn't have operator+=(char*) - Update the autotests to reflect the columnNumber=1 change. - Add helper class to avoid crashing inside JSC. Ever since r52856 in WebKit trunk, this is needed. There are probably a lot of other public API functions that need this guard as well, but I'll add them as they are discovered. - Update mkdist-javascriptcore tag, exclude a few more files. - Set ENABLE_JSC_MULTIPLE_THREADS=0 define on Mac due to r52355 in trunk. Reviewed-by: Simon Hausmann
Diffstat (limited to 'src/3rdparty/javascriptcore/JavaScriptCore/jit')
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocator.h45
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp2
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp6
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorSymbian.cpp75
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorWin.cpp2
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.cpp19
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.h143
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JITArithmetic.cpp383
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JITCall.cpp18
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JITInlineMethods.h37
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JITOpcodes.cpp674
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JITPropertyAccess.cpp240
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubCall.h37
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.cpp457
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.h28
15 files changed, 1444 insertions, 722 deletions
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocator.h b/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocator.h
index 3274fcc..1fb8ff7 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocator.h
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocator.h
@@ -26,6 +26,7 @@
#ifndef ExecutableAllocator_h
#define ExecutableAllocator_h
+#include <stddef.h> // for ptrdiff_t
#include <limits>
#include <wtf/Assertions.h>
#include <wtf/PassRefPtr.h>
@@ -33,15 +34,21 @@
#include <wtf/UnusedParam.h>
#include <wtf/Vector.h>
-#if PLATFORM(IPHONE)
+#if OS(IPHONE_OS)
#include <libkern/OSCacheControl.h>
#include <sys/mman.h>
#endif
-#if PLATFORM(SYMBIAN)
+#if OS(SYMBIAN)
#include <e32std.h>
#endif
+#if OS(WINCE)
+// From pkfuncs.h (private header file from the Platform Builder)
+#define CACHE_SYNC_ALL 0x07F
+extern "C" __declspec(dllimport) void CacheRangeFlush(LPVOID pAddr, DWORD dwLength, DWORD dwFlags);
+#endif
+
#define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize)
#define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4)
@@ -78,6 +85,9 @@ private:
struct Allocation {
char* pages;
size_t size;
+#if OS(SYMBIAN)
+ RChunk* chunk;
+#endif
};
typedef Vector<Allocation, 2> AllocationList;
@@ -176,22 +186,38 @@ public:
#endif
-#if PLATFORM(X86) || PLATFORM(X86_64)
+#if CPU(X86) || CPU(X86_64)
static void cacheFlush(void*, size_t)
{
}
-#elif PLATFORM(ARM_THUMB2) && PLATFORM(IPHONE)
+#elif CPU(ARM_THUMB2) && OS(IPHONE_OS)
static void cacheFlush(void* code, size_t size)
{
sys_dcache_flush(code, size);
sys_icache_invalidate(code, size);
}
-#elif PLATFORM(SYMBIAN)
+#elif CPU(ARM_THUMB2) && OS(LINUX)
+ static void cacheFlush(void* code, size_t size)
+ {
+ asm volatile (
+ "push {r7}\n"
+ "mov r0, %0\n"
+ "mov r1, %1\n"
+ "movw r7, #0x2\n"
+ "movt r7, #0xf\n"
+ "movs r2, #0x0\n"
+ "svc 0x0\n"
+ "pop {r7}\n"
+ :
+ : "r" (code), "r" (reinterpret_cast<char*>(code) + size)
+ : "r0", "r1", "r2");
+ }
+#elif OS(SYMBIAN)
static void cacheFlush(void* code, size_t size)
{
User::IMB_Range(code, static_cast<char*>(code) + size);
}
-#elif PLATFORM(ARM_TRADITIONAL) && PLATFORM(LINUX)
+#elif CPU(ARM_TRADITIONAL) && OS(LINUX)
static void cacheFlush(void* code, size_t size)
{
asm volatile (
@@ -205,7 +231,12 @@ public:
"pop {r7}\n"
:
: "r" (code), "r" (reinterpret_cast<char*>(code) + size)
- : "r0", "r1");
+ : "r0", "r1", "r2");
+ }
+#elif OS(WINCE)
+ static void cacheFlush(void* code, size_t size)
+ {
+ CacheRangeFlush(code, size, CACHE_SYNC_ALL);
}
#else
#error "The cacheFlush support is missing on this platform."
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
index 7682b9c..dd1db4e 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
@@ -29,7 +29,7 @@
#include <errno.h>
-#if ENABLE(ASSEMBLER) && PLATFORM(MAC) && PLATFORM(X86_64)
+#if ENABLE(ASSEMBLER) && OS(DARWIN) && CPU(X86_64)
#include "TCSpinLock.h"
#include <mach/mach_init.h>
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp
index 13a8626..2eb0c87 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp
@@ -27,7 +27,7 @@
#include "ExecutableAllocator.h"
-#if ENABLE(ASSEMBLER)
+#if ENABLE(ASSEMBLER) && OS(UNIX) && !OS(SYMBIAN)
#include <sys/mman.h>
#include <unistd.h>
@@ -35,7 +35,7 @@
namespace JSC {
-#if !(PLATFORM(MAC) && PLATFORM(X86_64))
+#if !(OS(DARWIN) && !PLATFORM(QT) && CPU(X86_64))
void ExecutableAllocator::intializePageSize()
{
@@ -57,7 +57,7 @@ void ExecutablePool::systemRelease(const ExecutablePool::Allocation& alloc)
ASSERT_UNUSED(result, !result);
}
-#endif // !(PLATFORM(MAC) && PLATFORM(X86_64))
+#endif // !(OS(DARWIN) && !PLATFORM(QT) && CPU(X86_64))
#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSeting setting)
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorSymbian.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorSymbian.cpp
new file mode 100644
index 0000000..e82975c
--- /dev/null
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorSymbian.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies)
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301 USA
+ *
+ */
+
+#include "config.h"
+
+#include "ExecutableAllocator.h"
+
+#if ENABLE(ASSEMBLER) && OS(SYMBIAN)
+
+#include <e32hal.h>
+#include <e32std.h>
+
+// Set the page size to 256 Kb to compensate for moving memory model limitation
+const size_t MOVING_MEM_PAGE_SIZE = 256 * 1024;
+
+namespace JSC {
+
+void ExecutableAllocator::intializePageSize()
+{
+#if CPU(ARMV5_OR_LOWER)
+ // The moving memory model (as used in ARMv5 and earlier platforms)
+ // on Symbian OS limits the number of chunks for each process to 16.
+ // To mitigate this limitation increase the pagesize to
+ // allocate less of larger chunks.
+ ExecutableAllocator::pageSize = MOVING_MEM_PAGE_SIZE;
+#else
+ TInt page_size;
+ UserHal::PageSizeInBytes(page_size);
+ ExecutableAllocator::pageSize = page_size;
+#endif
+}
+
+ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t n)
+{
+ RChunk* codeChunk = new RChunk();
+
+ TInt errorCode = codeChunk->CreateLocalCode(n, n);
+
+ char* allocation = reinterpret_cast<char*>(codeChunk->Base());
+ if (!allocation)
+ CRASH();
+ ExecutablePool::Allocation alloc = { allocation, n, codeChunk };
+ return alloc;
+}
+
+void ExecutablePool::systemRelease(const ExecutablePool::Allocation& alloc)
+{
+ alloc.chunk->Close();
+ delete alloc.chunk;
+}
+
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+#error "ASSEMBLER_WX_EXCLUSIVE not yet suported on this platform."
+#endif
+
+}
+
+#endif // HAVE(ASSEMBLER)
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorWin.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorWin.cpp
index e6ac855..e38323c 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorWin.cpp
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/ExecutableAllocatorWin.cpp
@@ -27,7 +27,7 @@
#include "ExecutableAllocator.h"
-#if ENABLE(ASSEMBLER)
+#if ENABLE(ASSEMBLER) && OS(WINDOWS)
#include "windows.h"
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.cpp
index ea8434e..c0da66d 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.cpp
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.cpp
@@ -27,7 +27,7 @@
#include "JIT.h"
// This probably does not belong here; adding here for now as a quick Windows build fix.
-#if ENABLE(ASSEMBLER) && PLATFORM(X86) && !PLATFORM(MAC)
+#if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X)
#include "MacroAssembler.h"
JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2;
#endif
@@ -37,7 +37,6 @@ JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse
#include "CodeBlock.h"
#include "Interpreter.h"
#include "JITInlineMethods.h"
-#include "JITStubs.h"
#include "JITStubCall.h"
#include "JSArray.h"
#include "JSFunction.h"
@@ -202,7 +201,6 @@ void JIT::privateCompileMainPass()
DEFINE_BINARY_OP(op_less)
DEFINE_BINARY_OP(op_lesseq)
DEFINE_BINARY_OP(op_urshift)
- DEFINE_UNARY_OP(op_get_pnames)
DEFINE_UNARY_OP(op_is_boolean)
DEFINE_UNARY_OP(op_is_function)
DEFINE_UNARY_OP(op_is_number)
@@ -240,7 +238,9 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_eq_null)
DEFINE_OP(op_get_by_id)
DEFINE_OP(op_get_by_val)
+ DEFINE_OP(op_get_by_pname)
DEFINE_OP(op_get_global_var)
+ DEFINE_OP(op_get_pnames)
DEFINE_OP(op_get_scoped_var)
DEFINE_OP(op_instanceof)
DEFINE_OP(op_jeq_null)
@@ -250,6 +250,7 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_jneq_null)
DEFINE_OP(op_jneq_ptr)
DEFINE_OP(op_jnless)
+ DEFINE_OP(op_jless)
DEFINE_OP(op_jnlesseq)
DEFINE_OP(op_jsr)
DEFINE_OP(op_jtrue)
@@ -258,6 +259,7 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_loop_if_less)
DEFINE_OP(op_loop_if_lesseq)
DEFINE_OP(op_loop_if_true)
+ DEFINE_OP(op_loop_if_false)
DEFINE_OP(op_lshift)
DEFINE_OP(op_method_check)
DEFINE_OP(op_mod)
@@ -385,14 +387,17 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_eq)
DEFINE_SLOWCASE_OP(op_get_by_id)
DEFINE_SLOWCASE_OP(op_get_by_val)
+ DEFINE_SLOWCASE_OP(op_get_by_pname)
DEFINE_SLOWCASE_OP(op_instanceof)
DEFINE_SLOWCASE_OP(op_jfalse)
DEFINE_SLOWCASE_OP(op_jnless)
+ DEFINE_SLOWCASE_OP(op_jless)
DEFINE_SLOWCASE_OP(op_jnlesseq)
DEFINE_SLOWCASE_OP(op_jtrue)
DEFINE_SLOWCASE_OP(op_loop_if_less)
DEFINE_SLOWCASE_OP(op_loop_if_lesseq)
DEFINE_SLOWCASE_OP(op_loop_if_true)
+ DEFINE_SLOWCASE_OP(op_loop_if_false)
DEFINE_SLOWCASE_OP(op_lshift)
DEFINE_SLOWCASE_OP(op_method_check)
DEFINE_SLOWCASE_OP(op_mod)
@@ -489,21 +494,21 @@ JITCode JIT::privateCompile()
ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character);
ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
- record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeIndex + 3 + record.defaultOffset]);
+ record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeIndex + record.defaultOffset]);
for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
- record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeIndex + 3 + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
+ record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeIndex + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
}
} else {
ASSERT(record.type == SwitchRecord::String);
- record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeIndex + 3 + record.defaultOffset]);
+ record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeIndex + record.defaultOffset]);
StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end();
for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) {
unsigned offset = it->second.branchOffset;
- it->second.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeIndex + 3 + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
+ it->second.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeIndex + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
}
}
}
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.h b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.h
index fcbc45e..8e0c9ac 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.h
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.h
@@ -38,6 +38,8 @@
#define JIT_CLASS_ALIGNMENT
#endif
+#define ASSERT_JIT_OFFSET(actual, expected) ASSERT_WITH_MESSAGE(actual == expected, "JIT Offset \"%s\" should be %d, not %d.\n", #expected, static_cast<int>(actual), static_cast<int>(expected));
+
#include "CodeBlock.h"
#include "Interpreter.h"
#include "JITCode.h"
@@ -190,7 +192,7 @@ namespace JSC {
// on x86/x86-64 it is ecx for performance reasons, since the
// MacroAssembler will need to plant register swaps if it is not -
// however the code will still function correctly.
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
static const RegisterID returnValueRegister = X86Registers::eax;
static const RegisterID cachedResultRegister = X86Registers::eax;
static const RegisterID firstArgumentRegister = X86Registers::edi;
@@ -208,7 +210,7 @@ namespace JSC {
static const FPRegisterID fpRegT0 = X86Registers::xmm0;
static const FPRegisterID fpRegT1 = X86Registers::xmm1;
static const FPRegisterID fpRegT2 = X86Registers::xmm2;
-#elif PLATFORM(X86)
+#elif CPU(X86)
static const RegisterID returnValueRegister = X86Registers::eax;
static const RegisterID cachedResultRegister = X86Registers::eax;
// On x86 we always use fastcall conventions = but on
@@ -226,7 +228,7 @@ namespace JSC {
static const FPRegisterID fpRegT0 = X86Registers::xmm0;
static const FPRegisterID fpRegT1 = X86Registers::xmm1;
static const FPRegisterID fpRegT2 = X86Registers::xmm2;
-#elif PLATFORM(ARM_THUMB2)
+#elif CPU(ARM_THUMB2)
static const RegisterID returnValueRegister = ARMRegisters::r0;
static const RegisterID cachedResultRegister = ARMRegisters::r0;
static const RegisterID firstArgumentRegister = ARMRegisters::r0;
@@ -242,14 +244,13 @@ namespace JSC {
static const FPRegisterID fpRegT0 = ARMRegisters::d0;
static const FPRegisterID fpRegT1 = ARMRegisters::d1;
static const FPRegisterID fpRegT2 = ARMRegisters::d2;
-#elif PLATFORM(ARM_TRADITIONAL)
+#elif CPU(ARM_TRADITIONAL)
static const RegisterID returnValueRegister = ARMRegisters::r0;
static const RegisterID cachedResultRegister = ARMRegisters::r0;
static const RegisterID firstArgumentRegister = ARMRegisters::r0;
static const RegisterID timeoutCheckRegister = ARMRegisters::r5;
static const RegisterID callFrameRegister = ARMRegisters::r4;
- static const RegisterID ctiReturnRegister = ARMRegisters::r6;
static const RegisterID regT0 = ARMRegisters::r0;
static const RegisterID regT1 = ARMRegisters::r1;
@@ -386,6 +387,8 @@ namespace JSC {
Address addressFor(unsigned index, RegisterID base = callFrameRegister);
+ void testPrototype(Structure*, JumpList& failureCases);
+
#if USE(JSVALUE32_64)
Address tagFor(unsigned index, RegisterID base = callFrameRegister);
Address payloadFor(unsigned index, RegisterID base = callFrameRegister);
@@ -425,6 +428,7 @@ namespace JSC {
#endif
void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset);
void compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset);
+ void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID structure, RegisterID offset);
void compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, Structure* structure, size_t cachedOffset);
// Arithmetic opcode helpers
@@ -432,7 +436,7 @@ namespace JSC {
void emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType);
void emitBinaryDoubleOp(OpcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters = true, bool op2IsInRegisters = true);
-#if PLATFORM(X86)
+#if CPU(X86)
// These architecture specific value are used to enable patching - see comment on op_put_by_id.
static const int patchOffsetPutByIdStructure = 7;
static const int patchOffsetPutByIdExternalLoad = 13;
@@ -461,6 +465,47 @@ namespace JSC {
static const int patchOffsetMethodCheckProtoObj = 11;
static const int patchOffsetMethodCheckProtoStruct = 18;
static const int patchOffsetMethodCheckPutFunction = 29;
+#elif CPU(ARM_TRADITIONAL)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 4;
+ static const int patchOffsetPutByIdExternalLoad = 16;
+ static const int patchLengthPutByIdExternalLoad = 4;
+ static const int patchOffsetPutByIdPropertyMapOffset1 = 20;
+ static const int patchOffsetPutByIdPropertyMapOffset2 = 28;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 4;
+ static const int patchOffsetGetByIdBranchToSlowCase = 16;
+ static const int patchOffsetGetByIdExternalLoad = 16;
+ static const int patchLengthGetByIdExternalLoad = 4;
+ static const int patchOffsetGetByIdPropertyMapOffset1 = 20;
+ static const int patchOffsetGetByIdPropertyMapOffset2 = 28;
+ static const int patchOffsetGetByIdPutResult = 36;
+#if ENABLE(OPCODE_SAMPLING)
+ #error "OPCODE_SAMPLING is not yet supported"
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 32;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 12;
+
+ static const int patchOffsetMethodCheckProtoObj = 12;
+ static const int patchOffsetMethodCheckProtoStruct = 20;
+ static const int patchOffsetMethodCheckPutFunction = 32;
+
+ // sequenceOpCall
+ static const int sequenceOpCallInstructionSpace = 12;
+ static const int sequenceOpCallConstantSpace = 2;
+ // sequenceMethodCheck
+ static const int sequenceMethodCheckInstructionSpace = 40;
+ static const int sequenceMethodCheckConstantSpace = 6;
+ // sequenceGetByIdHotPath
+ static const int sequenceGetByIdHotPathInstructionSpace = 36;
+ static const int sequenceGetByIdHotPathConstantSpace = 4;
+ // sequenceGetByIdSlowCase
+ static const int sequenceGetByIdSlowCaseInstructionSpace = 40;
+ static const int sequenceGetByIdSlowCaseConstantSpace = 2;
+ // sequencePutById
+ static const int sequencePutByIdInstructionSpace = 36;
+ static const int sequencePutByIdConstantSpace = 4;
#else
#error "JSVALUE32_64 not supported on this platform."
#endif
@@ -526,9 +571,10 @@ namespace JSC {
#endif
void compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset);
void compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID result, size_t cachedOffset);
+ void compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID structure, RegisterID offset, RegisterID scratch);
void compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
// These architecture specific value are used to enable patching - see comment on op_put_by_id.
static const int patchOffsetPutByIdStructure = 10;
static const int patchOffsetPutByIdExternalLoad = 20;
@@ -542,7 +588,7 @@ namespace JSC {
static const int patchOffsetGetByIdPropertyMapOffset = 31;
static const int patchOffsetGetByIdPutResult = 31;
#if ENABLE(OPCODE_SAMPLING)
- static const int patchOffsetGetByIdSlowCaseCall = 63;
+ static const int patchOffsetGetByIdSlowCaseCall = 64;
#else
static const int patchOffsetGetByIdSlowCaseCall = 41;
#endif
@@ -551,7 +597,7 @@ namespace JSC {
static const int patchOffsetMethodCheckProtoObj = 20;
static const int patchOffsetMethodCheckProtoStruct = 30;
static const int patchOffsetMethodCheckPutFunction = 50;
-#elif PLATFORM(X86)
+#elif CPU(X86)
// These architecture specific value are used to enable patching - see comment on op_put_by_id.
static const int patchOffsetPutByIdStructure = 7;
static const int patchOffsetPutByIdExternalLoad = 13;
@@ -578,30 +624,30 @@ namespace JSC {
static const int patchOffsetMethodCheckProtoObj = 11;
static const int patchOffsetMethodCheckProtoStruct = 18;
static const int patchOffsetMethodCheckPutFunction = 29;
-#elif PLATFORM(ARM_THUMB2)
+#elif CPU(ARM_THUMB2)
// These architecture specific value are used to enable patching - see comment on op_put_by_id.
static const int patchOffsetPutByIdStructure = 10;
- static const int patchOffsetPutByIdExternalLoad = 20;
+ static const int patchOffsetPutByIdExternalLoad = 26;
static const int patchLengthPutByIdExternalLoad = 12;
- static const int patchOffsetPutByIdPropertyMapOffset = 40;
+ static const int patchOffsetPutByIdPropertyMapOffset = 46;
// These architecture specific value are used to enable patching - see comment on op_get_by_id.
static const int patchOffsetGetByIdStructure = 10;
- static const int patchOffsetGetByIdBranchToSlowCase = 20;
- static const int patchOffsetGetByIdExternalLoad = 20;
+ static const int patchOffsetGetByIdBranchToSlowCase = 26;
+ static const int patchOffsetGetByIdExternalLoad = 26;
static const int patchLengthGetByIdExternalLoad = 12;
- static const int patchOffsetGetByIdPropertyMapOffset = 40;
- static const int patchOffsetGetByIdPutResult = 44;
+ static const int patchOffsetGetByIdPropertyMapOffset = 46;
+ static const int patchOffsetGetByIdPutResult = 50;
#if ENABLE(OPCODE_SAMPLING)
static const int patchOffsetGetByIdSlowCaseCall = 0; // FIMXE
#else
static const int patchOffsetGetByIdSlowCaseCall = 28;
#endif
- static const int patchOffsetOpCallCompareToJump = 10;
+ static const int patchOffsetOpCallCompareToJump = 16;
- static const int patchOffsetMethodCheckProtoObj = 18;
- static const int patchOffsetMethodCheckProtoStruct = 28;
- static const int patchOffsetMethodCheckPutFunction = 46;
-#elif PLATFORM(ARM_TRADITIONAL)
+ static const int patchOffsetMethodCheckProtoObj = 24;
+ static const int patchOffsetMethodCheckProtoStruct = 34;
+ static const int patchOffsetMethodCheckPutFunction = 58;
+#elif CPU(ARM_TRADITIONAL)
// These architecture specific value are used to enable patching - see comment on op_put_by_id.
static const int patchOffsetPutByIdStructure = 4;
static const int patchOffsetPutByIdExternalLoad = 16;
@@ -617,17 +663,14 @@ namespace JSC {
#if ENABLE(OPCODE_SAMPLING)
#error "OPCODE_SAMPLING is not yet supported"
#else
- static const int patchOffsetGetByIdSlowCaseCall = 36;
+ static const int patchOffsetGetByIdSlowCaseCall = 28;
#endif
static const int patchOffsetOpCallCompareToJump = 12;
static const int patchOffsetMethodCheckProtoObj = 12;
static const int patchOffsetMethodCheckProtoStruct = 20;
static const int patchOffsetMethodCheckPutFunction = 32;
-#endif
-#endif // USE(JSVALUE32_64)
-#if PLATFORM(ARM_TRADITIONAL)
// sequenceOpCall
static const int sequenceOpCallInstructionSpace = 12;
static const int sequenceOpCallConstantSpace = 2;
@@ -638,12 +681,13 @@ namespace JSC {
static const int sequenceGetByIdHotPathInstructionSpace = 28;
static const int sequenceGetByIdHotPathConstantSpace = 3;
// sequenceGetByIdSlowCase
- static const int sequenceGetByIdSlowCaseInstructionSpace = 40;
+ static const int sequenceGetByIdSlowCaseInstructionSpace = 32;
static const int sequenceGetByIdSlowCaseConstantSpace = 2;
// sequencePutById
static const int sequencePutByIdInstructionSpace = 28;
static const int sequencePutByIdConstantSpace = 3;
#endif
+#endif // USE(JSVALUE32_64)
#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
#define BEGIN_UNINTERRUPTED_SEQUENCE(name) beginUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace)
@@ -680,6 +724,7 @@ namespace JSC {
void emit_op_eq_null(Instruction*);
void emit_op_get_by_id(Instruction*);
void emit_op_get_by_val(Instruction*);
+ void emit_op_get_by_pname(Instruction*);
void emit_op_get_global_var(Instruction*);
void emit_op_get_scoped_var(Instruction*);
void emit_op_init_arguments(Instruction*);
@@ -691,6 +736,7 @@ namespace JSC {
void emit_op_jneq_null(Instruction*);
void emit_op_jneq_ptr(Instruction*);
void emit_op_jnless(Instruction*);
+ void emit_op_jless(Instruction*);
void emit_op_jnlesseq(Instruction*);
void emit_op_jsr(Instruction*);
void emit_op_jtrue(Instruction*);
@@ -699,6 +745,7 @@ namespace JSC {
void emit_op_loop_if_less(Instruction*);
void emit_op_loop_if_lesseq(Instruction*);
void emit_op_loop_if_true(Instruction*);
+ void emit_op_loop_if_false(Instruction*);
void emit_op_lshift(Instruction*);
void emit_op_method_check(Instruction*);
void emit_op_mod(Instruction*);
@@ -713,6 +760,7 @@ namespace JSC {
void emit_op_new_func_exp(Instruction*);
void emit_op_new_object(Instruction*);
void emit_op_new_regexp(Instruction*);
+ void emit_op_get_pnames(Instruction*);
void emit_op_next_pname(Instruction*);
void emit_op_not(Instruction*);
void emit_op_nstricteq(Instruction*);
@@ -768,14 +816,17 @@ namespace JSC {
void emitSlow_op_eq(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_get_by_pname(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_instanceof(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_jfalse(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_jnless(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jless(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_jnlesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_jtrue(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_loop_if_less(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_loop_if_lesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_loop_if_true(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_loop_if_false(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_lshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&);
@@ -903,6 +954,46 @@ namespace JSC {
#endif
#endif
} JIT_CLASS_ALIGNMENT;
+
+ inline void JIT::emit_op_loop(Instruction* currentInstruction)
+ {
+ emitTimeoutCheck();
+ emit_op_jmp(currentInstruction);
+ }
+
+ inline void JIT::emit_op_loop_if_true(Instruction* currentInstruction)
+ {
+ emitTimeoutCheck();
+ emit_op_jtrue(currentInstruction);
+ }
+
+ inline void JIT::emitSlow_op_loop_if_true(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+ {
+ emitSlow_op_jtrue(currentInstruction, iter);
+ }
+
+ inline void JIT::emit_op_loop_if_false(Instruction* currentInstruction)
+ {
+ emitTimeoutCheck();
+ emit_op_jfalse(currentInstruction);
+ }
+
+ inline void JIT::emitSlow_op_loop_if_false(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+ {
+ emitSlow_op_jfalse(currentInstruction, iter);
+ }
+
+ inline void JIT::emit_op_loop_if_less(Instruction* currentInstruction)
+ {
+ emitTimeoutCheck();
+ emit_op_jless(currentInstruction);
+ }
+
+ inline void JIT::emitSlow_op_loop_if_less(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+ {
+ emitSlow_op_jless(currentInstruction, iter);
+ }
+
} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITArithmetic.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITArithmetic.cpp
index 7afc1f2..feee8d2 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITArithmetic.cpp
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITArithmetic.cpp
@@ -98,16 +98,16 @@ void JIT::emit_op_jnless(Instruction* currentInstruction)
if (isOperandConstantImmediateInt(op1)) {
emitLoad(op2, regT3, regT2);
notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(LessThanOrEqual, regT2, Imm32(getConstantOperand(op1).asInt32())), target + 3);
+ addJump(branch32(LessThanOrEqual, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
} else if (isOperandConstantImmediateInt(op2)) {
emitLoad(op1, regT1, regT0);
notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target + 3);
+ addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
} else {
emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(GreaterThanOrEqual, regT0, regT2), target + 3);
+ addJump(branch32(GreaterThanOrEqual, regT0, regT2), target);
}
if (!supportsFloatingPoint()) {
@@ -145,7 +145,70 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
stubCall.addArgument(op1);
stubCall.addArgument(op2);
stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target);
+}
+
+void JIT::emit_op_jless(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ // Int32 less.
+ if (isOperandConstantImmediateInt(op1)) {
+ emitLoad(op2, regT3, regT2);
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(GreaterThan, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
+ } else {
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThan, regT0, regT2), target);
+ }
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double less.
+ emitBinaryDoubleOp(op_jless, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
+ end.link(this);
+}
+
+void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ if (!supportsFloatingPoint()) {
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter); // double check
+ linkSlowCase(iter); // int32 check
+ }
+ if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // double check
+ }
+
+ JITStubCall stubCall(this, cti_op_jless);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
}
void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
@@ -161,16 +224,16 @@ void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
if (isOperandConstantImmediateInt(op1)) {
emitLoad(op2, regT3, regT2);
notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(LessThan, regT2, Imm32(getConstantOperand(op1).asInt32())), target + 3);
+ addJump(branch32(LessThan, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
} else if (isOperandConstantImmediateInt(op2)) {
emitLoad(op1, regT1, regT0);
notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addJump(branch32(GreaterThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target + 3);
+ addJump(branch32(GreaterThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
} else {
emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(GreaterThan, regT0, regT2), target + 3);
+ addJump(branch32(GreaterThan, regT0, regT2), target);
}
if (!supportsFloatingPoint()) {
@@ -208,7 +271,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
stubCall.addArgument(op1);
stubCall.addArgument(op2);
stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target);
}
// LeftShift (<<)
@@ -829,11 +892,15 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsi
break;
case op_jnless:
emitLoadDouble(op1, fpRegT2);
- addJump(branchDouble(DoubleLessThanOrEqual, fpRegT0, fpRegT2), dst + 3);
+ addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst);
+ break;
+ case op_jless:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleLessThan, fpRegT2, fpRegT0), dst);
break;
case op_jnlesseq:
emitLoadDouble(op1, fpRegT2);
- addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT2), dst + 3);
+ addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT0, fpRegT2), dst);
break;
default:
ASSERT_NOT_REACHED();
@@ -882,11 +949,15 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsi
break;
case op_jnless:
emitLoadDouble(op2, fpRegT1);
- addJump(branchDouble(DoubleLessThanOrEqual, fpRegT1, fpRegT0), dst + 3);
+ addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst);
+ break;
+ case op_jless:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), dst);
break;
case op_jnlesseq:
emitLoadDouble(op2, fpRegT1);
- addJump(branchDouble(DoubleLessThan, fpRegT1, fpRegT0), dst + 3);
+ addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), dst);
break;
default:
ASSERT_NOT_REACHED();
@@ -1000,20 +1071,11 @@ void JIT::emit_op_div(Instruction* currentInstruction)
divDouble(fpRegT1, fpRegT0);
JumpList doubleResult;
- if (!isOperandConstantImmediateInt(op1) || getConstantOperand(op1).asInt32() > 1) {
- m_assembler.cvttsd2si_rr(fpRegT0, regT0);
- convertInt32ToDouble(regT0, fpRegT1);
- m_assembler.ucomisd_rr(fpRegT1, fpRegT0);
-
- doubleResult.append(m_assembler.jne());
- doubleResult.append(m_assembler.jp());
-
- doubleResult.append(branchTest32(Zero, regT0));
+ branchConvertDoubleToInt32(fpRegT0, regT0, doubleResult, fpRegT1);
- // Int32 result.
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
- end.append(jump());
- }
+ // Int32 result.
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+ end.append(jump());
// Double result.
doubleResult.link(this);
@@ -1054,7 +1116,7 @@ void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>
/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
-#if PLATFORM(X86) || PLATFORM(X86_64)
+#if CPU(X86) || CPU(X86_64)
void JIT::emit_op_mod(Instruction* currentInstruction)
{
@@ -1116,7 +1178,7 @@ void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>
stubCall.call(dst);
}
-#else // PLATFORM(X86) || PLATFORM(X86_64)
+#else // CPU(X86) || CPU(X86_64)
void JIT::emit_op_mod(Instruction* currentInstruction)
{
@@ -1134,7 +1196,7 @@ void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&)
{
}
-#endif // PLATFORM(X86) || PLATFORM(X86_64)
+#endif // CPU(X86) || CPU(X86_64)
/* ------------------------------ END: OP_MOD ------------------------------ */
@@ -1152,13 +1214,8 @@ void JIT::emit_op_lshift(Instruction* currentInstruction)
emitJumpSlowCaseIfNotImmediateInteger(regT2);
emitFastArithImmToInt(regT0);
emitFastArithImmToInt(regT2);
-#if !PLATFORM(X86)
- // Mask with 0x1f as per ecma-262 11.7.2 step 7.
- // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
- and32(Imm32(0x1f), regT2);
-#endif
lshift32(regT2, regT0);
-#if !USE(JSVALUE64)
+#if USE(JSVALUE32)
addSlowCase(branchAdd32(Overflow, regT0, regT0));
signExtend32ToPtr(regT0, regT0);
#endif
@@ -1203,11 +1260,7 @@ void JIT::emit_op_rshift(Instruction* currentInstruction)
emitGetVirtualRegister(op1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
// Mask with 0x1f as per ecma-262 11.7.2 step 7.
-#if USE(JSVALUE64)
rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
-#else
- rshiftPtr(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
-#endif
} else {
emitGetVirtualRegisters(op1, regT0, op2, regT2);
if (supportsFloatingPointTruncate()) {
@@ -1234,15 +1287,9 @@ void JIT::emit_op_rshift(Instruction* currentInstruction)
emitJumpSlowCaseIfNotImmediateInteger(regT2);
}
emitFastArithImmToInt(regT2);
-#if !PLATFORM(X86)
- // Mask with 0x1f as per ecma-262 11.7.2 step 7.
- // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
- and32(Imm32(0x1f), regT2);
-#endif
-#if USE(JSVALUE64)
rshift32(regT2, regT0);
-#else
- rshiftPtr(regT2, regT0);
+#if USE(JSVALUE32)
+ signExtend32ToPtr(regT0, regT0);
#endif
}
#if USE(JSVALUE64)
@@ -1313,7 +1360,7 @@ void JIT::emit_op_jnless(Instruction* currentInstruction)
#else
int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
#endif
- addJump(branch32(GreaterThanOrEqual, regT0, Imm32(op2imm)), target + 3);
+ addJump(branch32(GreaterThanOrEqual, regT0, Imm32(op2imm)), target);
} else if (isOperandConstantImmediateInt(op1)) {
emitGetVirtualRegister(op2, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT1);
@@ -1322,13 +1369,13 @@ void JIT::emit_op_jnless(Instruction* currentInstruction)
#else
int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
#endif
- addJump(branch32(LessThanOrEqual, regT1, Imm32(op1imm)), target + 3);
+ addJump(branch32(LessThanOrEqual, regT1, Imm32(op1imm)), target);
} else {
emitGetVirtualRegisters(op1, regT0, op2, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT1);
- addJump(branch32(GreaterThanOrEqual, regT0, regT1), target + 3);
+ addJump(branch32(GreaterThanOrEqual, regT0, regT1), target);
}
}
@@ -1365,7 +1412,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
move(Imm32(op2imm), regT1);
convertInt32ToDouble(regT1, fpRegT1);
- emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqual, fpRegT1, fpRegT0), target + 3);
+ emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), target);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
@@ -1382,7 +1429,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
stubCall.addArgument(regT0);
stubCall.addArgument(op2, regT2);
stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target);
} else if (isOperandConstantImmediateInt(op1)) {
linkSlowCase(iter);
@@ -1406,7 +1453,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
move(Imm32(op1imm), regT0);
convertInt32ToDouble(regT0, fpRegT0);
- emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqual, fpRegT1, fpRegT0), target + 3);
+ emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), target);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
@@ -1423,7 +1470,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
stubCall.addArgument(op1, regT2);
stubCall.addArgument(regT1);
stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target);
} else {
linkSlowCase(iter);
@@ -1452,7 +1499,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
#endif
- emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqual, fpRegT1, fpRegT0), target + 3);
+ emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), target);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
@@ -1475,7 +1522,192 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target);
+ }
+}
+
+void JIT::emit_op_jless(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ // We generate inline code for the following cases in the fast path:
+ // - int immediate to constant int immediate
+ // - constant int immediate to int immediate
+ // - int immediate to int immediate
+
+ if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+#if USE(JSVALUE64)
+ int32_t op2imm = getConstantOperandImmediateInt(op2);
+#else
+ int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
+#endif
+ addJump(branch32(LessThan, regT0, Imm32(op2imm)), target);
+ } else if (isOperandConstantImmediateInt(op1)) {
+ emitGetVirtualRegister(op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+#if USE(JSVALUE64)
+ int32_t op1imm = getConstantOperandImmediateInt(op1);
+#else
+ int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
+#endif
+ addJump(branch32(GreaterThan, regT1, Imm32(op1imm)), target);
+ } else {
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+
+ addJump(branch32(LessThan, regT0, regT1), target);
+ }
+}
+
+void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ // We generate inline code for the following cases in the slow path:
+ // - floating-point number to constant int immediate
+ // - constant int immediate to floating-point number
+ // - floating-point number to floating-point number.
+
+ if (isOperandConstantImmediateInt(op2)) {
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+#if USE(JSVALUE64)
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT0);
+#else
+ Jump fail1;
+ if (!m_codeBlock->isKnownNotImmediate(op1))
+ fail1 = emitJumpIfNotJSCell(regT0);
+
+ Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
+ loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
+#endif
+
+ int32_t op2imm = getConstantOperand(op2).asInt32();
+
+ move(Imm32(op2imm), regT1);
+ convertInt32ToDouble(regT1, fpRegT1);
+
+ emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), target);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
+
+#if USE(JSVALUE64)
+ fail1.link(this);
+#else
+ if (!m_codeBlock->isKnownNotImmediate(op1))
+ fail1.link(this);
+ fail2.link(this);
+#endif
+ }
+
+ JITStubCall stubCall(this, cti_op_jless);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
+
+ } else if (isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+#if USE(JSVALUE64)
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT1, fpRegT1);
+#else
+ Jump fail1;
+ if (!m_codeBlock->isKnownNotImmediate(op2))
+ fail1 = emitJumpIfNotJSCell(regT1);
+
+ Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
+ loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
+#endif
+
+ int32_t op1imm = getConstantOperand(op1).asInt32();
+
+ move(Imm32(op1imm), regT0);
+ convertInt32ToDouble(regT0, fpRegT0);
+
+ emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), target);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
+
+#if USE(JSVALUE64)
+ fail1.link(this);
+#else
+ if (!m_codeBlock->isKnownNotImmediate(op2))
+ fail1.link(this);
+ fail2.link(this);
+#endif
+ }
+
+ JITStubCall stubCall(this, cti_op_jless);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
+
+ } else {
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+#if USE(JSVALUE64)
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
+ Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
+ Jump fail3 = emitJumpIfImmediateInteger(regT1);
+ addPtr(tagTypeNumberRegister, regT0);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT0, fpRegT0);
+ movePtrToDouble(regT1, fpRegT1);
+#else
+ Jump fail1;
+ if (!m_codeBlock->isKnownNotImmediate(op1))
+ fail1 = emitJumpIfNotJSCell(regT0);
+
+ Jump fail2;
+ if (!m_codeBlock->isKnownNotImmediate(op2))
+ fail2 = emitJumpIfNotJSCell(regT1);
+
+ Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
+ Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
+ loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
+ loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
+#endif
+
+ emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), target);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
+
+#if USE(JSVALUE64)
+ fail1.link(this);
+ fail2.link(this);
+ fail3.link(this);
+#else
+ if (!m_codeBlock->isKnownNotImmediate(op1))
+ fail1.link(this);
+ if (!m_codeBlock->isKnownNotImmediate(op2))
+ fail2.link(this);
+ fail3.link(this);
+ fail4.link(this);
+#endif
+ }
+
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_jless);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
}
}
@@ -1498,7 +1730,7 @@ void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
#else
int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
#endif
- addJump(branch32(GreaterThan, regT0, Imm32(op2imm)), target + 3);
+ addJump(branch32(GreaterThan, regT0, Imm32(op2imm)), target);
} else if (isOperandConstantImmediateInt(op1)) {
emitGetVirtualRegister(op2, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT1);
@@ -1507,13 +1739,13 @@ void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
#else
int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
#endif
- addJump(branch32(LessThan, regT1, Imm32(op1imm)), target + 3);
+ addJump(branch32(LessThan, regT1, Imm32(op1imm)), target);
} else {
emitGetVirtualRegisters(op1, regT0, op2, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT1);
- addJump(branch32(GreaterThan, regT0, regT1), target + 3);
+ addJump(branch32(GreaterThan, regT0, regT1), target);
}
}
@@ -1550,7 +1782,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
move(Imm32(op2imm), regT1);
convertInt32ToDouble(regT1, fpRegT1);
- emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT1, fpRegT0), target + 3);
+ emitJumpSlowToHot(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), target);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
@@ -1567,7 +1799,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
stubCall.addArgument(regT0);
stubCall.addArgument(op2, regT2);
stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target);
} else if (isOperandConstantImmediateInt(op1)) {
linkSlowCase(iter);
@@ -1591,7 +1823,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
move(Imm32(op1imm), regT0);
convertInt32ToDouble(regT0, fpRegT0);
- emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT1, fpRegT0), target + 3);
+ emitJumpSlowToHot(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), target);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
@@ -1608,7 +1840,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
stubCall.addArgument(op1, regT2);
stubCall.addArgument(regT1);
stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target);
} else {
linkSlowCase(iter);
@@ -1637,7 +1869,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
#endif
- emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT1, fpRegT0), target + 3);
+ emitJumpSlowToHot(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), target);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
@@ -1660,7 +1892,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target);
}
}
@@ -1849,7 +2081,7 @@ void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEn
/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
-#if PLATFORM(X86) || PLATFORM(X86_64)
+#if CPU(X86) || CPU(X86_64)
void JIT::emit_op_mod(Instruction* currentInstruction)
{
@@ -1898,7 +2130,7 @@ void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>
stubCall.call(result);
}
-#else // PLATFORM(X86) || PLATFORM(X86_64)
+#else // CPU(X86) || CPU(X86_64)
void JIT::emit_op_mod(Instruction* currentInstruction)
{
@@ -1917,7 +2149,7 @@ void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&)
ASSERT_NOT_REACHED();
}
-#endif // PLATFORM(X86) || PLATFORM(X86_64)
+#endif // CPU(X86) || CPU(X86_64)
/* ------------------------------ END: OP_MOD ------------------------------ */
@@ -2160,27 +2392,10 @@ void JIT::emit_op_div(Instruction* currentInstruction)
}
divDouble(fpRegT1, fpRegT0);
- JumpList doubleResult;
- Jump end;
- bool attemptIntConversion = (!isOperandConstantImmediateInt(op1) || getConstantOperand(op1).asInt32() > 1) && isOperandConstantImmediateInt(op2);
- if (attemptIntConversion) {
- m_assembler.cvttsd2si_rr(fpRegT0, regT0);
- doubleResult.append(branchTest32(Zero, regT0));
- m_assembler.ucomisd_rr(fpRegT1, fpRegT0);
-
- doubleResult.append(m_assembler.jne());
- doubleResult.append(m_assembler.jp());
- emitFastArithIntToImmNoCheck(regT0, regT0);
- end = jump();
- }
-
// Double result.
- doubleResult.link(this);
moveDoubleToPtr(fpRegT0, regT0);
subPtr(tagTypeNumberRegister, regT0);
- if (attemptIntConversion)
- end.link(this);
emitPutVirtualRegister(dst, regT0);
}
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITCall.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITCall.cpp
index f4f6e62..07253e1 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITCall.cpp
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITCall.cpp
@@ -249,10 +249,10 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
stubCall.addArgument(JIT::Imm32(registerOffset));
stubCall.addArgument(JIT::Imm32(argCount));
stubCall.call();
- wasEval = branch32(Equal, regT1, Imm32(JSValue::EmptyValueTag));
+ wasEval = branch32(NotEqual, regT1, Imm32(JSValue::EmptyValueTag));
}
- emitLoad(callee, regT1, regT2);
+ emitLoad(callee, regT1, regT0);
if (opcodeID == op_call)
compileOpCallSetupArgs(instruction);
@@ -260,12 +260,12 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
compileOpConstructSetupArgs(instruction);
emitJumpSlowCaseIfNotJSCell(callee, regT1);
- addSlowCase(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsFunctionVPtr)));
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr)));
// First, in the case of a construct, allocate the new object.
if (opcodeID == op_construct) {
JITStubCall(this, cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
- emitLoad(callee, regT1, regT2);
+ emitLoad(callee, regT1, regT0);
}
// Speculatively roll the callframe, assuming argCount will match the arity.
@@ -278,7 +278,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
if (opcodeID == op_call_eval)
wasEval.link(this);
- emitStore(dst, regT1, regT0);;
+ emitStore(dst, regT1, regT0);
sampleCodeBlock(m_codeBlock);
}
@@ -321,7 +321,13 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
emitLoad(callee, regT1, regT0);
DataLabelPtr addressOfLinkedFunctionCheck;
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
+
Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, ImmPtr(0));
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
+
addSlowCase(jumpToSlow);
ASSERT(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow) == patchOffsetOpCallCompareToJump);
m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
@@ -620,7 +626,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
END_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
addSlowCase(jumpToSlow);
- ASSERT(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow) == patchOffsetOpCallCompareToJump);
+ ASSERT_JIT_OFFSET(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow), patchOffsetOpCallCompareToJump);
m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
// The following is the fast case, only used whan a callee can be linked.
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITInlineMethods.h b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITInlineMethods.h
index f26457a..5af7565 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITInlineMethods.h
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITInlineMethods.h
@@ -37,7 +37,7 @@ namespace JSC {
// puts an arg onto the stack, as an arg to a context threaded function.
ALWAYS_INLINE void JIT::emitPutJITStubArg(RegisterID src, unsigned argumentNumber)
{
- unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + 1;
+ unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
poke(src, argumentStackOffset);
}
@@ -45,7 +45,7 @@ ALWAYS_INLINE void JIT::emitPutJITStubArg(RegisterID src, unsigned argumentNumbe
ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(unsigned value, unsigned argumentNumber)
{
- unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + 1;
+ unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
poke(Imm32(value), argumentStackOffset);
}
@@ -53,7 +53,7 @@ ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(unsigned value, unsigned argum
ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(void* value, unsigned argumentNumber)
{
- unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + 1;
+ unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
poke(ImmPtr(value), argumentStackOffset);
}
@@ -61,7 +61,7 @@ ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(void* value, unsigned argument
ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
{
- unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + 1;
+ unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
peek(dst, argumentStackOffset);
}
@@ -115,7 +115,7 @@ ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace)
{
-#if PLATFORM(ARM_TRADITIONAL)
+#if CPU(ARM_TRADITIONAL)
#ifndef NDEBUG
// Ensure the label after the sequence can also fit
insnSpace += sizeof(ARMWord);
@@ -144,7 +144,7 @@ ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace)
#endif
-#if PLATFORM(ARM_THUMB2)
+#if CPU(ARM)
ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
{
@@ -161,7 +161,7 @@ ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
loadPtr(address, linkRegister);
}
-#else // PLATFORM(X86) || PLATFORM(X86_64) || PLATFORM(ARM_TRADITIONAL)
+#else // CPU(X86) || CPU(X86_64)
ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
{
@@ -191,16 +191,13 @@ ALWAYS_INLINE void JIT::restoreArgumentReference()
{
move(stackPointerRegister, firstArgumentRegister);
poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
-#if PLATFORM(ARM_TRADITIONAL)
- move(ctiReturnRegister, ARMRegisters::lr);
-#endif
}
ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
{
-#if PLATFORM(X86)
+#if CPU(X86)
// Within a trampoline the return address will be on the stack at this point.
addPtr(Imm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister);
-#elif PLATFORM(ARM_THUMB2)
+#elif CPU(ARM)
move(stackPointerRegister, firstArgumentRegister);
#endif
// In the trampoline on x86-64, the first argument register is not overwritten.
@@ -268,9 +265,9 @@ ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag)
#if ENABLE(SAMPLING_COUNTERS)
ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, uint32_t count)
{
-#if PLATFORM(X86_64) // Or any other 64-bit plattform.
+#if CPU(X86_64) // Or any other 64-bit plattform.
addPtr(Imm32(count), AbsoluteAddress(&counter.m_counter));
-#elif PLATFORM(X86) // Or any other little-endian 32-bit plattform.
+#elif CPU(X86) // Or any other little-endian 32-bit plattform.
intptr_t hiWord = reinterpret_cast<intptr_t>(&counter.m_counter) + sizeof(int32_t);
add32(Imm32(count), AbsoluteAddress(&counter.m_counter));
addWithCarry32(Imm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord)));
@@ -281,7 +278,7 @@ ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, uint32_t cou
#endif
#if ENABLE(OPCODE_SAMPLING)
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
{
move(ImmPtr(m_interpreter->sampler()->sampleSlot()), X86Registers::ecx);
@@ -296,7 +293,7 @@ ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostF
#endif
#if ENABLE(CODEBLOCK_SAMPLING)
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
{
move(ImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86Registers::ecx);
@@ -588,7 +585,7 @@ ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(unsigned op1, unsigned op
ALWAYS_INLINE void JIT::emitPutJITStubArg(RegisterID tag, RegisterID payload, unsigned argumentNumber)
{
- unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + 1;
+ unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
poke(payload, argumentStackOffset);
poke(tag, argumentStackOffset + 1);
}
@@ -597,7 +594,7 @@ ALWAYS_INLINE void JIT::emitPutJITStubArg(RegisterID tag, RegisterID payload, un
ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch1, RegisterID scratch2)
{
- unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + 1;
+ unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
if (m_codeBlock->isConstantRegisterIndex(src)) {
JSValue constant = m_codeBlock->getConstant(src);
poke(Imm32(constant.payload()), argumentStackOffset);
@@ -820,7 +817,7 @@ ALWAYS_INLINE void JIT::emitFastArithImmToInt(RegisterID reg)
#if USE(JSVALUE64)
UNUSED_PARAM(reg);
#else
- rshiftPtr(Imm32(JSImmediate::IntegerPayloadShift), reg);
+ rshift32(Imm32(JSImmediate::IntegerPayloadShift), reg);
#endif
}
@@ -849,7 +846,7 @@ ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
// get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch)
{
- unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + 1;
+ unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
if (m_codeBlock->isConstantRegisterIndex(src)) {
JSValue value = m_codeBlock->getConstant(src);
poke(ImmPtr(JSValue::encode(value)), argumentStackOffset);
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITOpcodes.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITOpcodes.cpp
index b5f6597..601f2d6 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITOpcodes.cpp
@@ -33,6 +33,7 @@
#include "JSArray.h"
#include "JSCell.h"
#include "JSFunction.h"
+#include "JSPropertyNameIterator.h"
#include "LinkBuffer.h"
namespace JSC {
@@ -51,8 +52,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
// Checks out okay! - get the length from the Ustring.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSString, m_value) + OBJECT_OFFSETOF(UString, m_rep)), regT2);
- load32(Address(regT2, OBJECT_OFFSETOF(UString::Rep, len)), regT2);
+ load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_stringLength)), regT2);
Jump string_failureCases3 = branch32(Above, regT2, Imm32(INT_MAX));
move(regT2, regT0);
@@ -137,7 +137,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCode)), regT0);
jump(regT0);
-#if PLATFORM(X86)
+#if CPU(X86) || CPU(ARM_TRADITIONAL)
Label nativeCallThunk = align();
preserveReturnAddressAfterCall(regT0);
emitPutToCallFrameHeader(regT0, RegisterFile::ReturnPC); // Push return address
@@ -148,6 +148,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT1);
emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+#if CPU(X86)
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
/* We have two structs that we use to describe the stackframe we set up for our
@@ -159,7 +160,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
* stack pointer by the right amount after the call.
*/
-#if COMPILER(MSVC) || PLATFORM(LINUX)
+#if COMPILER(MSVC) || OS(LINUX)
#if COMPILER(MSVC)
#pragma pack(push)
#pragma pack(4)
@@ -222,7 +223,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
storePtr(regT2, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
storePtr(regT3, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
-#if COMPILER(MSVC) || PLATFORM(LINUX)
+#if COMPILER(MSVC) || OS(LINUX)
// ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, result)), stackPointerRegister, X86Registers::ecx);
@@ -247,6 +248,66 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
// so pull them off now
addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister);
+#elif CPU(ARM_TRADITIONAL)
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
+
+ // Allocate stack space for our arglist
+ COMPILE_ASSERT((sizeof(ArgList) & 0x7) == 0 && sizeof(JSValue) == 8 && sizeof(Register) == 8, ArgList_should_by_8byte_aligned);
+ subPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
+
+ // Set up arguments
+ subPtr(Imm32(1), regT0); // Don't include 'this' in argcount
+
+ // Push argcount
+ storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_argCount)));
+
+ // Calculate the start of the callframe header, and store in regT1
+ move(callFrameRegister, regT1);
+ sub32(Imm32(RegisterFile::CallFrameHeaderSize * (int32_t)sizeof(Register)), regT1);
+
+ // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT1)
+ mul32(Imm32(sizeof(Register)), regT0, regT0);
+ subPtr(regT0, regT1);
+
+ // push pointer to arguments
+ storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_args)));
+
+ // Argument passing method:
+ // r0 - points to return value
+ // r1 - callFrame
+ // r2 - callee
+ // stack: this(JSValue) and a pointer to ArgList
+
+ move(stackPointerRegister, regT3);
+ subPtr(Imm32(8), stackPointerRegister);
+ move(stackPointerRegister, regT0);
+ subPtr(Imm32(8 + 4 + 4 /* padding */), stackPointerRegister);
+
+ // Setup arg4:
+ storePtr(regT3, Address(stackPointerRegister, 8));
+
+ // Setup arg3
+ // regT1 currently points to the first argument, regT1-sizeof(Register) points to 'this'
+ load32(Address(regT1, -(int32_t)sizeof(void*) * 2), regT3);
+ storePtr(regT3, Address(stackPointerRegister, 0));
+ load32(Address(regT1, -(int32_t)sizeof(void*)), regT3);
+ storePtr(regT3, Address(stackPointerRegister, 4));
+
+ // Setup arg2:
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT2);
+
+ // Setup arg1:
+ move(callFrameRegister, regT1);
+
+ call(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_data)));
+
+ // Load return value
+ load32(Address(stackPointerRegister, 16), regT0);
+ load32(Address(stackPointerRegister, 20), regT1);
+
+ addPtr(Imm32(sizeof(ArgList) + 16 + 8), stackPointerRegister);
+#endif
+
// Check for an exception
move(ImmPtr(&globalData->exception), regT2);
Jump sawException = branch32(NotEqual, tagFor(0, regT2), Imm32(JSValue::EmptyValueTag));
@@ -267,7 +328,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
move(ImmPtr(&globalData->exceptionLocation), regT2);
storePtr(regT1, regT2);
- move(ImmPtr(reinterpret_cast<void*>(ctiVMThrowTrampoline)), regT2);
+ move(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT2);
emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
restoreReturnAddressBeforeReturn(regT2);
@@ -345,59 +406,7 @@ void JIT::emit_op_end(Instruction* currentInstruction)
void JIT::emit_op_jmp(Instruction* currentInstruction)
{
unsigned target = currentInstruction[1].u.operand;
- addJump(jump(), target + 1);
-}
-
-void JIT::emit_op_loop(Instruction* currentInstruction)
-{
- unsigned target = currentInstruction[1].u.operand;
- emitTimeoutCheck();
- addJump(jump(), target + 1);
-}
-
-void JIT::emit_op_loop_if_less(Instruction* currentInstruction)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- emitTimeoutCheck();
-
- if (isOperandConstantImmediateInt(op1)) {
- emitLoad(op2, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addJump(branch32(GreaterThan, regT0, Imm32(getConstantOperand(op1).asInt32())), target + 3);
- return;
- }
-
- if (isOperandConstantImmediateInt(op2)) {
- emitLoad(op1, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addJump(branch32(LessThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target + 3);
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(LessThan, regT0, regT2), target + 3);
-}
-
-void JIT::emitSlow_op_loop_if_less(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
-
- JITStubCall stubCall(this, cti_op_loop_if_less);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
+ addJump(jump(), target);
}
void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
@@ -411,21 +420,21 @@ void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
if (isOperandConstantImmediateInt(op1)) {
emitLoad(op2, regT1, regT0);
addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op1).asInt32())), target + 3);
+ addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op1).asInt32())), target);
return;
}
if (isOperandConstantImmediateInt(op2)) {
emitLoad(op1, regT1, regT0);
addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addJump(branch32(LessThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target + 3);
+ addJump(branch32(LessThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
return;
}
emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(LessThanOrEqual, regT0, regT2), target + 3);
+ addJump(branch32(LessThanOrEqual, regT0, regT2), target);
}
void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -442,7 +451,7 @@ void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<Slo
stubCall.addArgument(op1);
stubCall.addArgument(op2);
stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
}
void JIT::emit_op_new_object(Instruction* currentInstruction)
@@ -457,30 +466,20 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
unsigned baseVal = currentInstruction[3].u.operand;
unsigned proto = currentInstruction[4].u.operand;
- // Load the operands (baseVal, proto, and value respectively) into registers.
+ // Load the operands into registers.
// We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
- emitLoadPayload(proto, regT1);
- emitLoadPayload(baseVal, regT0);
emitLoadPayload(value, regT2);
+ emitLoadPayload(baseVal, regT0);
+ emitLoadPayload(proto, regT1);
- // Check that baseVal & proto are cells.
- emitJumpSlowCaseIfNotJSCell(proto);
+ // Check that value, baseVal, and proto are cells.
+ emitJumpSlowCaseIfNotJSCell(value);
emitJumpSlowCaseIfNotJSCell(baseVal);
+ emitJumpSlowCaseIfNotJSCell(proto);
- // Check that baseVal is an object, that it 'ImplementsHasInstance' but that it does not 'OverridesHasInstance'.
+ // Check that baseVal 'ImplementsDefaultHasInstance'.
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
- addSlowCase(branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType))); // FIXME: Maybe remove this test.
- addSlowCase(branchTest32(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsHasInstance))); // FIXME: TOT checks ImplementsDefaultHasInstance.
-
- // If value is not an Object, return false.
- emitLoadTag(value, regT0);
- Jump valueIsImmediate = branch32(NotEqual, regT0, Imm32(JSValue::CellTag));
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
- Jump valueIsNotObject = branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)); // FIXME: Maybe remove this test.
-
- // Check proto is object.
- loadPtr(Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
- addSlowCase(branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
+ addSlowCase(branchTest32(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsDefaultHasInstance)));
// Optimistically load the result true, and start looping.
// Initially, regT1 still contains proto and regT2 still contains value.
@@ -488,16 +487,14 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
move(Imm32(JSValue::TrueTag), regT0);
Label loop(this);
- // Load the prototype of the object in regT2. If this is equal to regT1 - WIN!
+ // Load the prototype of the cell in regT2. If this is equal to regT1 - WIN!
// Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
load32(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
Jump isInstance = branchPtr(Equal, regT2, regT1);
- branch32(NotEqual, regT2, Imm32(0), loop);
+ branchTest32(NonZero, regT2).linkTo(loop, this);
// We get here either by dropping out of the loop, or if value was not an Object. Result is false.
- valueIsImmediate.link(this);
- valueIsNotObject.link(this);
move(Imm32(JSValue::FalseTag), regT0);
// isInstance jumps right down to here, to skip setting the result to false (it has already set true).
@@ -512,11 +509,10 @@ void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCas
unsigned baseVal = currentInstruction[3].u.operand;
unsigned proto = currentInstruction[4].u.operand;
+ linkSlowCaseIfNotJSCell(iter, value);
linkSlowCaseIfNotJSCell(iter, baseVal);
linkSlowCaseIfNotJSCell(iter, proto);
linkSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
JITStubCall stubCall(this, cti_op_instanceof);
stubCall.addArgument(value);
@@ -661,40 +657,6 @@ void JIT::emit_op_strcat(Instruction* currentInstruction)
stubCall.call(currentInstruction[1].u.operand);
}
-void JIT::emit_op_loop_if_true(Instruction* currentInstruction)
-{
- unsigned cond = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- emitTimeoutCheck();
-
- emitLoad(cond, regT1, regT0);
-
- Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
- addJump(branch32(NotEqual, regT0, Imm32(0)), target + 2);
- Jump isNotZero = jump();
-
- isNotInteger.link(this);
-
- addJump(branch32(Equal, regT1, Imm32(JSValue::TrueTag)), target + 2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::FalseTag)));
-
- isNotZero.link(this);
-}
-
-void JIT::emitSlow_op_loop_if_true(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned cond = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_jtrue);
- stubCall.addArgument(cond);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 2);
-}
-
void JIT::emit_op_resolve_base(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_resolve_base);
@@ -785,11 +747,11 @@ void JIT::emit_op_jfalse(Instruction* currentInstruction)
emitLoad(cond, regT1, regT0);
Jump isTrue = branch32(Equal, regT1, Imm32(JSValue::TrueTag));
- addJump(branch32(Equal, regT1, Imm32(JSValue::FalseTag)), target + 2);
+ addJump(branch32(Equal, regT1, Imm32(JSValue::FalseTag)), target);
Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
Jump isTrue2 = branch32(NotEqual, regT0, Imm32(0));
- addJump(jump(), target + 2);
+ addJump(jump(), target);
if (supportsFloatingPoint()) {
isNotInteger.link(this);
@@ -798,7 +760,7 @@ void JIT::emit_op_jfalse(Instruction* currentInstruction)
zeroDouble(fpRegT0);
emitLoadDouble(cond, fpRegT1);
- addJump(branchDouble(DoubleEqual, fpRegT0, fpRegT1), target + 2);
+ addJump(branchDouble(DoubleEqualOrUnordered, fpRegT0, fpRegT1), target);
} else
addSlowCase(isNotInteger);
@@ -815,7 +777,7 @@ void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEnt
JITStubCall stubCall(this, cti_op_jtrue);
stubCall.addArgument(cond);
stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target + 2); // Inverted.
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target); // Inverted.
}
void JIT::emit_op_jtrue(Instruction* currentInstruction)
@@ -826,11 +788,11 @@ void JIT::emit_op_jtrue(Instruction* currentInstruction)
emitLoad(cond, regT1, regT0);
Jump isFalse = branch32(Equal, regT1, Imm32(JSValue::FalseTag));
- addJump(branch32(Equal, regT1, Imm32(JSValue::TrueTag)), target + 2);
+ addJump(branch32(Equal, regT1, Imm32(JSValue::TrueTag)), target);
Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
Jump isFalse2 = branch32(Equal, regT0, Imm32(0));
- addJump(jump(), target + 2);
+ addJump(jump(), target);
if (supportsFloatingPoint()) {
isNotInteger.link(this);
@@ -839,7 +801,7 @@ void JIT::emit_op_jtrue(Instruction* currentInstruction)
zeroDouble(fpRegT0);
emitLoadDouble(cond, fpRegT1);
- addJump(branchDouble(DoubleNotEqual, fpRegT0, fpRegT1), target + 2);
+ addJump(branchDouble(DoubleNotEqual, fpRegT0, fpRegT1), target);
} else
addSlowCase(isNotInteger);
@@ -856,7 +818,7 @@ void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntr
JITStubCall stubCall(this, cti_op_jtrue);
stubCall.addArgument(cond);
stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 2);
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
}
void JIT::emit_op_jeq_null(Instruction* currentInstruction)
@@ -870,7 +832,7 @@ void JIT::emit_op_jeq_null(Instruction* currentInstruction)
// First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addJump(branchTest32(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
+ addJump(branchTest32(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target);
Jump wasNotImmediate = jump();
@@ -881,7 +843,7 @@ void JIT::emit_op_jeq_null(Instruction* currentInstruction)
set32(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1);
or32(regT2, regT1);
- addJump(branchTest32(NonZero, regT1), target + 2);
+ addJump(branchTest32(NonZero, regT1), target);
wasNotImmediate.link(this);
}
@@ -897,7 +859,7 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction)
// First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addJump(branchTest32(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
+ addJump(branchTest32(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target);
Jump wasNotImmediate = jump();
@@ -908,7 +870,7 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction)
set32(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1);
or32(regT2, regT1);
- addJump(branchTest32(Zero, regT1), target + 2);
+ addJump(branchTest32(Zero, regT1), target);
wasNotImmediate.link(this);
}
@@ -920,8 +882,8 @@ void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
unsigned target = currentInstruction[3].u.operand;
emitLoad(src, regT1, regT0);
- addJump(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)), target + 3);
- addJump(branchPtr(NotEqual, regT0, ImmPtr(ptr)), target + 3);
+ addJump(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)), target);
+ addJump(branchPtr(NotEqual, regT0, ImmPtr(ptr)), target);
}
void JIT::emit_op_jsr(Instruction* currentInstruction)
@@ -929,7 +891,7 @@ void JIT::emit_op_jsr(Instruction* currentInstruction)
int retAddrDst = currentInstruction[1].u.operand;
int target = currentInstruction[2].u.operand;
DataLabelPtr storeLocation = storePtrWithPatch(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst));
- addJump(jump(), target + 2);
+ addJump(jump(), target);
m_jsrSites.append(JSRInfo(storeLocation, label()));
}
@@ -1195,23 +1157,109 @@ void JIT::emit_op_throw(Instruction* currentInstruction)
#endif
}
+void JIT::emit_op_get_pnames(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int i = currentInstruction[3].u.operand;
+ int size = currentInstruction[4].u.operand;
+ int breakTarget = currentInstruction[5].u.operand;
+
+ JumpList isNotObject;
+
+ emitLoad(base, regT1, regT0);
+ if (!m_codeBlock->isKnownNotImmediate(base))
+ isNotObject.append(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+ if (base != m_codeBlock->thisRegister()) {
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ isNotObject.append(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
+ }
+
+ // We could inline the case where you have a valid cache, but
+ // this call doesn't seem to be hot.
+ Label isObject(this);
+ JITStubCall getPnamesStubCall(this, cti_op_get_pnames);
+ getPnamesStubCall.addArgument(regT0);
+ getPnamesStubCall.call(dst);
+ load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3);
+ store32(Imm32(0), addressFor(i));
+ store32(regT3, addressFor(size));
+ Jump end = jump();
+
+ isNotObject.link(this);
+ addJump(branch32(Equal, regT1, Imm32(JSValue::NullTag)), breakTarget);
+ addJump(branch32(Equal, regT1, Imm32(JSValue::UndefinedTag)), breakTarget);
+ JITStubCall toObjectStubCall(this, cti_to_object);
+ toObjectStubCall.addArgument(regT1, regT0);
+ toObjectStubCall.call(base);
+ jump().linkTo(isObject, this);
+
+ end.link(this);
+}
+
void JIT::emit_op_next_pname(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
- int iter = currentInstruction[2].u.operand;
- int target = currentInstruction[3].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int i = currentInstruction[3].u.operand;
+ int size = currentInstruction[4].u.operand;
+ int it = currentInstruction[5].u.operand;
+ int target = currentInstruction[6].u.operand;
+
+ JumpList callHasProperty;
+
+ Label begin(this);
+ load32(addressFor(i), regT0);
+ Jump end = branch32(Equal, regT0, addressFor(size));
+
+ // Grab key @ i
+ loadPtr(addressFor(it), regT1);
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2);
+ load32(BaseIndex(regT2, regT0, TimesEight), regT2);
+ store32(Imm32(JSValue::CellTag), tagFor(dst));
+ store32(regT2, payloadFor(dst));
+
+ // Increment i
+ add32(Imm32(1), regT0);
+ store32(regT0, addressFor(i));
+
+ // Verify that i is valid:
+ loadPtr(addressFor(base), regT0);
+
+ // Test base's structure
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))));
+
+ // Test base's prototype chain
+ loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3);
+ loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3);
+ addJump(branchTestPtr(Zero, Address(regT3)), target);
+
+ Label checkPrototype(this);
+ callHasProperty.append(branch32(Equal, Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), Imm32(JSValue::NullTag)));
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3)));
+ addPtr(Imm32(sizeof(Structure*)), regT3);
+ branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this);
- load32(Address(callFrameRegister, (iter * sizeof(Register))), regT0);
+ // Continue loop.
+ addJump(jump(), target);
- JITStubCall stubCall(this, cti_op_next_pname);
+ // Slow case: Ask the object if i is valid.
+ callHasProperty.link(this);
+ loadPtr(addressFor(dst), regT1);
+ JITStubCall stubCall(this, cti_has_property);
stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
stubCall.call();
- Jump endOfIter = branchTestPtr(Zero, regT0);
- emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_next_pname), dst, regT1, regT0);
- addJump(jump(), target + 3);
- endOfIter.link(this);
+ // Test for valid key.
+ addJump(branchTest32(NonZero, regT0), target);
+ jump().linkTo(begin, this);
+
+ // End of loop.
+ end.link(this);
}
void JIT::emit_op_push_scope(Instruction* currentInstruction)
@@ -1286,7 +1334,7 @@ void JIT::emit_op_jmp_scopes(Instruction* currentInstruction)
JITStubCall stubCall(this, cti_op_jmp_scopes);
stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
stubCall.call();
- addJump(jump(), currentInstruction[2].u.operand + 2);
+ addJump(jump(), currentInstruction[2].u.operand);
}
void JIT::emit_op_switch_imm(Instruction* currentInstruction)
@@ -1361,7 +1409,6 @@ void JIT::emit_op_debug(Instruction* currentInstruction)
stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[4].u.operand));
stubCall.call();
}
@@ -1464,8 +1511,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
// Checks out okay! - get the length from the Ustring.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSString, m_value) + OBJECT_OFFSETOF(UString, m_rep)), regT0);
- load32(Address(regT0, OBJECT_OFFSETOF(UString::Rep, len)), regT0);
+ load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_stringLength)), regT0);
Jump string_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImmediateInt));
@@ -1560,7 +1606,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, X86Registers::ecx);
// Allocate stack space for our arglist
@@ -1596,7 +1642,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
call(Address(X86Registers::esi, OBJECT_OFFSETOF(JSFunction, m_data)));
addPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
-#elif PLATFORM(X86)
+#elif CPU(X86)
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
/* We have two structs that we use to describe the stackframe we set up for our
@@ -1607,7 +1653,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
* not the rest of the callframe so we need a nice way to ensure we increment the
* stack pointer by the right amount after the call.
*/
-#if COMPILER(MSVC) || PLATFORM(LINUX)
+#if COMPILER(MSVC) || OS(LINUX)
struct NativeCallFrameStructure {
// CallFrame* callFrame; // passed in EDX
JSObject* callee;
@@ -1660,7 +1706,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
loadPtr(Address(regT1, -(int)sizeof(Register)), regT1);
storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue)));
-#if COMPILER(MSVC) || PLATFORM(LINUX)
+#if COMPILER(MSVC) || OS(LINUX)
// ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, result)), stackPointerRegister, X86Registers::ecx);
@@ -1688,7 +1734,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
// so pull them off now
addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister);
-#elif PLATFORM(ARM_TRADITIONAL)
+#elif CPU(ARM)
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
// Allocate stack space for our arglist
@@ -1722,9 +1768,8 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
move(callFrameRegister, regT0);
// Setup arg4: This is a plain hack
- move(stackPointerRegister, ARMRegisters::S0);
+ move(stackPointerRegister, ARMRegisters::r3);
- move(ctiReturnRegister, ARMRegisters::lr);
call(Address(regT1, OBJECT_OFFSETOF(JSFunction, m_data)));
addPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
@@ -1755,7 +1800,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
move(ImmPtr(&globalData->exceptionLocation), regT2);
storePtr(regT1, regT2);
- move(ImmPtr(reinterpret_cast<void*>(ctiVMThrowTrampoline)), regT2);
+ move(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT2);
emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
restoreReturnAddressBeforeReturn(regT2);
@@ -1831,49 +1876,8 @@ void JIT::emit_op_end(Instruction* currentInstruction)
void JIT::emit_op_jmp(Instruction* currentInstruction)
{
unsigned target = currentInstruction[1].u.operand;
- addJump(jump(), target + 1);
- RECORD_JUMP_TARGET(target + 1);
-}
-
-void JIT::emit_op_loop(Instruction* currentInstruction)
-{
- emitTimeoutCheck();
-
- unsigned target = currentInstruction[1].u.operand;
- addJump(jump(), target + 1);
-}
-
-void JIT::emit_op_loop_if_less(Instruction* currentInstruction)
-{
- emitTimeoutCheck();
-
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
- if (isOperandConstantImmediateInt(op2)) {
- emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(JSVALUE64)
- int32_t op2imm = getConstantOperandImmediateInt(op2);
-#else
- int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
-#endif
- addJump(branch32(LessThan, regT0, Imm32(op2imm)), target + 3);
- } else if (isOperandConstantImmediateInt(op1)) {
- emitGetVirtualRegister(op2, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(JSVALUE64)
- int32_t op1imm = getConstantOperandImmediateInt(op1);
-#else
- int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
-#endif
- addJump(branch32(GreaterThan, regT0, Imm32(op1imm)), target + 3);
- } else {
- emitGetVirtualRegisters(op1, regT0, op2, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
- addJump(branch32(LessThan, regT0, regT1), target + 3);
- }
+ addJump(jump(), target);
+ RECORD_JUMP_TARGET(target);
}
void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
@@ -1891,12 +1895,12 @@ void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
#else
int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
#endif
- addJump(branch32(LessThanOrEqual, regT0, Imm32(op2imm)), target + 3);
+ addJump(branch32(LessThanOrEqual, regT0, Imm32(op2imm)), target);
} else {
emitGetVirtualRegisters(op1, regT0, op2, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT1);
- addJump(branch32(LessThanOrEqual, regT0, regT1), target + 3);
+ addJump(branch32(LessThanOrEqual, regT0, regT1), target);
}
}
@@ -1907,30 +1911,26 @@ void JIT::emit_op_new_object(Instruction* currentInstruction)
void JIT::emit_op_instanceof(Instruction* currentInstruction)
{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned value = currentInstruction[2].u.operand;
+ unsigned baseVal = currentInstruction[3].u.operand;
+ unsigned proto = currentInstruction[4].u.operand;
+
// Load the operands (baseVal, proto, and value respectively) into registers.
// We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
- emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
- emitGetVirtualRegister(currentInstruction[4].u.operand, regT1);
- emitGetVirtualRegister(currentInstruction[2].u.operand, regT2);
+ emitGetVirtualRegister(value, regT2);
+ emitGetVirtualRegister(baseVal, regT0);
+ emitGetVirtualRegister(proto, regT1);
// Check that baseVal & proto are cells.
- emitJumpSlowCaseIfNotJSCell(regT0);
- emitJumpSlowCaseIfNotJSCell(regT1);
+ emitJumpSlowCaseIfNotJSCell(regT2, value);
+ emitJumpSlowCaseIfNotJSCell(regT0, baseVal);
+ emitJumpSlowCaseIfNotJSCell(regT1, proto);
- // Check that baseVal is an object, that it 'ImplementsHasInstance' but that it does not 'OverridesHasInstance'.
+ // Check that baseVal 'ImplementsDefaultHasInstance'.
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
- addSlowCase(branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
addSlowCase(branchTest32(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsDefaultHasInstance)));
- // If value is not an Object, return false.
- Jump valueIsImmediate = emitJumpIfNotJSCell(regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
- Jump valueIsNotObject = branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType));
-
- // Check proto is object.
- loadPtr(Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
- addSlowCase(branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
-
// Optimistically load the result true, and start looping.
// Initially, regT1 still contains proto and regT2 still contains value.
// As we loop regT2 will be updated with its prototype, recursively walking the prototype chain.
@@ -1942,16 +1942,14 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2);
Jump isInstance = branchPtr(Equal, regT2, regT1);
- branchPtr(NotEqual, regT2, ImmPtr(JSValue::encode(jsNull())), loop);
+ emitJumpIfJSCell(regT2).linkTo(loop, this);
// We get here either by dropping out of the loop, or if value was not an Object. Result is false.
- valueIsImmediate.link(this);
- valueIsNotObject.link(this);
move(ImmPtr(JSValue::encode(jsBoolean(false))), regT0);
// isInstance jumps right down to here, to skip setting the result to false (it has already set true).
isInstance.link(this);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
+ emitPutVirtualRegister(dst);
}
void JIT::emit_op_new_func(Instruction* currentInstruction)
@@ -2125,21 +2123,6 @@ void JIT::emit_op_strcat(Instruction* currentInstruction)
stubCall.call(currentInstruction[1].u.operand);
}
-void JIT::emit_op_loop_if_true(Instruction* currentInstruction)
-{
- emitTimeoutCheck();
-
- unsigned target = currentInstruction[2].u.operand;
- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
-
- Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0))));
- addJump(emitJumpIfImmediateInteger(regT0), target + 2);
-
- addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))), target + 2);
- addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))));
-
- isZero.link(this);
-};
void JIT::emit_op_resolve_base(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_resolve_base);
@@ -2202,14 +2185,14 @@ void JIT::emit_op_jfalse(Instruction* currentInstruction)
unsigned target = currentInstruction[2].u.operand;
emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))), target + 2);
+ addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))), target);
Jump isNonZero = emitJumpIfImmediateInteger(regT0);
- addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))), target + 2);
+ addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))), target);
addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))));
isNonZero.link(this);
- RECORD_JUMP_TARGET(target + 2);
+ RECORD_JUMP_TARGET(target);
};
void JIT::emit_op_jeq_null(Instruction* currentInstruction)
{
@@ -2221,16 +2204,16 @@ void JIT::emit_op_jeq_null(Instruction* currentInstruction)
// First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addJump(branchTest32(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
+ addJump(branchTest32(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target);
Jump wasNotImmediate = jump();
// Now handle the immediate cases - undefined & null
isImmediate.link(this);
andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
- addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNull()))), target + 2);
+ addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNull()))), target);
wasNotImmediate.link(this);
- RECORD_JUMP_TARGET(target + 2);
+ RECORD_JUMP_TARGET(target);
};
void JIT::emit_op_jneq_null(Instruction* currentInstruction)
{
@@ -2242,16 +2225,16 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction)
// First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addJump(branchTest32(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
+ addJump(branchTest32(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target);
Jump wasNotImmediate = jump();
// Now handle the immediate cases - undefined & null
isImmediate.link(this);
andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
- addJump(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsNull()))), target + 2);
+ addJump(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsNull()))), target);
wasNotImmediate.link(this);
- RECORD_JUMP_TARGET(target + 2);
+ RECORD_JUMP_TARGET(target);
}
void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
@@ -2261,9 +2244,9 @@ void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
unsigned target = currentInstruction[3].u.operand;
emitGetVirtualRegister(src, regT0);
- addJump(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(JSValue(ptr)))), target + 3);
+ addJump(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(JSValue(ptr)))), target);
- RECORD_JUMP_TARGET(target + 3);
+ RECORD_JUMP_TARGET(target);
}
void JIT::emit_op_jsr(Instruction* currentInstruction)
@@ -2271,10 +2254,10 @@ void JIT::emit_op_jsr(Instruction* currentInstruction)
int retAddrDst = currentInstruction[1].u.operand;
int target = currentInstruction[2].u.operand;
DataLabelPtr storeLocation = storePtrWithPatch(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst));
- addJump(jump(), target + 2);
+ addJump(jump(), target);
m_jsrSites.append(JSRInfo(storeLocation, label()));
killLastResultRegister();
- RECORD_JUMP_TARGET(target + 2);
+ RECORD_JUMP_TARGET(target);
}
void JIT::emit_op_sret(Instruction* currentInstruction)
@@ -2326,13 +2309,13 @@ void JIT::emit_op_jtrue(Instruction* currentInstruction)
emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0))));
- addJump(emitJumpIfImmediateInteger(regT0), target + 2);
+ addJump(emitJumpIfImmediateInteger(regT0), target);
- addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))), target + 2);
+ addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))), target);
addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))));
isZero.link(this);
- RECORD_JUMP_TARGET(target + 2);
+ RECORD_JUMP_TARGET(target);
}
void JIT::emit_op_neq(Instruction* currentInstruction)
@@ -2383,15 +2366,116 @@ void JIT::emit_op_throw(Instruction* currentInstruction)
#endif
}
+void JIT::emit_op_get_pnames(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int i = currentInstruction[3].u.operand;
+ int size = currentInstruction[4].u.operand;
+ int breakTarget = currentInstruction[5].u.operand;
+
+ JumpList isNotObject;
+
+ emitGetVirtualRegister(base, regT0);
+ if (!m_codeBlock->isKnownNotImmediate(base))
+ isNotObject.append(emitJumpIfNotJSCell(regT0));
+ if (base != m_codeBlock->thisRegister()) {
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ isNotObject.append(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
+ }
+
+ // We could inline the case where you have a valid cache, but
+ // this call doesn't seem to be hot.
+ Label isObject(this);
+ JITStubCall getPnamesStubCall(this, cti_op_get_pnames);
+ getPnamesStubCall.addArgument(regT0);
+ getPnamesStubCall.call(dst);
+ load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3);
+ store32(Imm32(0), addressFor(i));
+ store32(regT3, addressFor(size));
+ Jump end = jump();
+
+ isNotObject.link(this);
+ move(regT0, regT1);
+ and32(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT1);
+ addJump(branch32(Equal, regT1, Imm32(JSImmediate::FullTagTypeNull)), breakTarget);
+
+ JITStubCall toObjectStubCall(this, cti_to_object);
+ toObjectStubCall.addArgument(regT0);
+ toObjectStubCall.call(base);
+ jump().linkTo(isObject, this);
+
+ end.link(this);
+}
+
void JIT::emit_op_next_pname(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_next_pname);
- stubCall.addArgument(currentInstruction[2].u.operand, regT2);
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int i = currentInstruction[3].u.operand;
+ int size = currentInstruction[4].u.operand;
+ int it = currentInstruction[5].u.operand;
+ int target = currentInstruction[6].u.operand;
+
+ JumpList callHasProperty;
+
+ Label begin(this);
+ load32(addressFor(i), regT0);
+ Jump end = branch32(Equal, regT0, addressFor(size));
+
+ // Grab key @ i
+ loadPtr(addressFor(it), regT1);
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2);
+
+#if USE(JSVALUE64)
+ loadPtr(BaseIndex(regT2, regT0, TimesEight), regT2);
+#else
+ loadPtr(BaseIndex(regT2, regT0, TimesFour), regT2);
+#endif
+
+ emitPutVirtualRegister(dst, regT2);
+
+ // Increment i
+ add32(Imm32(1), regT0);
+ store32(regT0, addressFor(i));
+
+ // Verify that i is valid:
+ emitGetVirtualRegister(base, regT0);
+
+ // Test base's structure
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))));
+
+ // Test base's prototype chain
+ loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3);
+ loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3);
+ addJump(branchTestPtr(Zero, Address(regT3)), target);
+
+ Label checkPrototype(this);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2);
+ callHasProperty.append(emitJumpIfNotJSCell(regT2));
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3)));
+ addPtr(Imm32(sizeof(Structure*)), regT3);
+ branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this);
+
+ // Continue loop.
+ addJump(jump(), target);
+
+ // Slow case: Ask the object if i is valid.
+ callHasProperty.link(this);
+ emitGetVirtualRegister(dst, regT1);
+ JITStubCall stubCall(this, cti_has_property);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
stubCall.call();
- Jump endOfIter = branchTestPtr(Zero, regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- addJump(jump(), currentInstruction[3].u.operand + 3);
- endOfIter.link(this);
+
+ // Test for valid key.
+ addJump(branchTest32(NonZero, regT0), target);
+ jump().linkTo(begin, this);
+
+ // End of loop.
+ end.link(this);
}
void JIT::emit_op_push_scope(Instruction* currentInstruction)
@@ -2480,8 +2564,8 @@ void JIT::emit_op_jmp_scopes(Instruction* currentInstruction)
JITStubCall stubCall(this, cti_op_jmp_scopes);
stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
stubCall.call();
- addJump(jump(), currentInstruction[2].u.operand + 2);
- RECORD_JUMP_TARGET(currentInstruction[2].u.operand + 2);
+ addJump(jump(), currentInstruction[2].u.operand);
+ RECORD_JUMP_TARGET(currentInstruction[2].u.operand);
}
void JIT::emit_op_switch_imm(Instruction* currentInstruction)
@@ -2552,7 +2636,6 @@ void JIT::emit_op_debug(Instruction* currentInstruction)
stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[4].u.operand));
stubCall.call();
}
@@ -2724,36 +2807,6 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
stubCall.call(dst);
}
-void JIT::emitSlow_op_loop_if_less(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
- if (isOperandConstantImmediateInt(op2)) {
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_loop_if_less);
- stubCall.addArgument(regT0);
- stubCall.addArgument(op2, regT2);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
- } else if (isOperandConstantImmediateInt(op1)) {
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_loop_if_less);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(regT0);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
- } else {
- linkSlowCase(iter);
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_loop_if_less);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
- }
-}
-
void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
unsigned op2 = currentInstruction[2].u.operand;
@@ -2764,7 +2817,7 @@ void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<Slo
stubCall.addArgument(regT0);
stubCall.addArgument(currentInstruction[2].u.operand, regT2);
stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
} else {
linkSlowCase(iter);
linkSlowCase(iter);
@@ -2772,7 +2825,7 @@ void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<Slo
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
}
}
@@ -2794,15 +2847,6 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
stubPutByValCall.call();
}
-void JIT::emitSlow_op_loop_if_true(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_jtrue);
- stubCall.addArgument(regT0);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), currentInstruction[2].u.operand + 2);
-}
-
void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
@@ -2818,7 +2862,7 @@ void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEnt
JITStubCall stubCall(this, cti_op_jtrue);
stubCall.addArgument(regT0);
stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), currentInstruction[2].u.operand + 2); // inverted!
+ emitJumpSlowToHot(branchTest32(Zero, regT0), currentInstruction[2].u.operand); // inverted!
}
void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -2835,7 +2879,7 @@ void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntr
JITStubCall stubCall(this, cti_op_jtrue);
stubCall.addArgument(regT0);
stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), currentInstruction[2].u.operand + 2);
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), currentInstruction[2].u.operand);
}
void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -2901,16 +2945,20 @@ void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCase
void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- linkSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned value = currentInstruction[2].u.operand;
+ unsigned baseVal = currentInstruction[3].u.operand;
+ unsigned proto = currentInstruction[4].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, value);
+ linkSlowCaseIfNotJSCell(iter, baseVal);
+ linkSlowCaseIfNotJSCell(iter, proto);
linkSlowCase(iter);
JITStubCall stubCall(this, cti_op_instanceof);
- stubCall.addArgument(currentInstruction[2].u.operand, regT2);
- stubCall.addArgument(currentInstruction[3].u.operand, regT2);
- stubCall.addArgument(currentInstruction[4].u.operand, regT2);
- stubCall.call(currentInstruction[1].u.operand);
+ stubCall.addArgument(value, regT2);
+ stubCall.addArgument(baseVal, regT2);
+ stubCall.addArgument(proto, regT2);
+ stubCall.call(dst);
}
void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITPropertyAccess.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITPropertyAccess.cpp
index 9edfd01..ef95f99 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITPropertyAccess.cpp
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -33,6 +33,7 @@
#include "JITStubCall.h"
#include "JSArray.h"
#include "JSFunction.h"
+#include "JSPropertyNameIterator.h"
#include "Interpreter.h"
#include "LinkBuffer.h"
#include "RepatchBuffer.h"
@@ -211,12 +212,17 @@ void JIT::emit_op_method_check(Instruction* currentInstruction)
emitLoad(base, regT1, regT0);
emitJumpSlowCaseIfNotJSCell(base, regT1);
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
+
Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT2);
Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
// This will be relinked to load the function without doing a load.
DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
+
move(Imm32(JSValue::CellTag), regT1);
Jump match = jump();
@@ -373,6 +379,9 @@ void JIT::compileGetByIdHotPath()
// Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
// to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
// to jump back to if one of these trampolies finds a match.
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
+
Label hotPathBegin(this);
m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
m_propertyAccessInstructionIndex++;
@@ -395,6 +404,8 @@ void JIT::compileGetByIdHotPath()
Label putResult(this);
ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult);
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
}
void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -416,13 +427,18 @@ void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<Sl
linkSlowCaseIfNotJSCell(iter, base);
linkSlowCase(iter);
- Label coldPathBegin(this);
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
+#ifndef NDEBUG
+ Label coldPathBegin(this);
+#endif
JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
stubCall.addArgument(regT1, regT0);
stubCall.addArgument(ImmPtr(ident));
Call call = stubCall.call(dst);
+ END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
+
ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
// Track the location of the call; this will be used to recover patch information.
@@ -443,6 +459,8 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
emitJumpSlowCaseIfNotJSCell(base, regT1);
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
+
Label hotPathBegin(this);
m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
m_propertyAccessInstructionIndex++;
@@ -460,6 +478,9 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT0, patchGetByIdDefaultOffset)); // payload
DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT0, patchGetByIdDefaultOffset)); // tag
+
+ END_UNINTERRUPTED_SEQUENCE(sequencePutById);
+
ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetPutByIdPropertyMapOffset1);
ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetPutByIdPropertyMapOffset2);
}
@@ -521,22 +542,26 @@ void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID res
load32(Address(temp, offset + 4), resultTag);
}
+void JIT::testPrototype(Structure* structure, JumpList& failureCases)
+{
+ if (structure->m_prototype.isNull())
+ return;
+
+ failureCases.append(branchPtr(NotEqual, AbsoluteAddress(&asCell(structure->m_prototype)->m_structure), ImmPtr(asCell(structure->m_prototype)->m_structure)));
+}
+
void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress)
{
// It is assumed that regT0 contains the basePayload and regT1 contains the baseTag. The value can be found on the stack.
JumpList failureCases;
failureCases.append(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- failureCases.append(branchPtr(NotEqual, regT2, ImmPtr(oldStructure)));
+ failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure)));
+ testPrototype(oldStructure, failureCases);
// Verify that nothing in the prototype chain has a setter for this property.
- for (RefPtr<Structure>* it = chain->head(); *it; ++it) {
- loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- failureCases.append(branchPtr(NotEqual, regT2, ImmPtr(it->get())));
- }
+ for (RefPtr<Structure>* it = chain->head(); *it; ++it)
+ testPrototype(it->get(), failureCases);
// Reallocate property storage if needed.
Call callTarget;
@@ -705,7 +730,7 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
// Check the prototype object's Structure had not changed.
Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
move(ImmPtr(prototypeStructure), regT3);
Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
#else
@@ -785,7 +810,7 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
// Check the prototype object's Structure had not changed.
Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
move(ImmPtr(prototypeStructure), regT3);
Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
#else
@@ -838,7 +863,7 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
// Check the prototype object's Structure had not changed.
Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
move(ImmPtr(currStructure), regT3);
bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
#else
@@ -893,7 +918,7 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
// Check the prototype object's Structure had not changed.
Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
move(ImmPtr(currStructure), regT3);
bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
#else
@@ -930,6 +955,69 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID structure, RegisterID offset)
+{
+ ASSERT(sizeof(((Structure*)0)->m_propertyStorageCapacity) == sizeof(int32_t));
+ ASSERT(sizeof(JSObject::inlineStorageCapacity) == sizeof(int32_t));
+ ASSERT(sizeof(JSValue) == 8);
+
+ Jump notUsingInlineStorage = branch32(NotEqual, Address(structure, OBJECT_OFFSETOF(Structure, m_propertyStorageCapacity)), Imm32(JSObject::inlineStorageCapacity));
+ loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSObject, m_inlineStorage)+OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
+ loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSObject, m_inlineStorage)+OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
+ Jump finishedLoad = jump();
+ notUsingInlineStorage.link(this);
+ loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
+ loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
+ loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
+ finishedLoad.link(this);
+}
+
+void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+ unsigned expected = currentInstruction[4].u.operand;
+ unsigned iter = currentInstruction[5].u.operand;
+ unsigned i = currentInstruction[6].u.operand;
+
+ emitLoad2(property, regT1, regT0, base, regT3, regT2);
+ emitJumpSlowCaseIfNotJSCell(property, regT1);
+ addSlowCase(branchPtr(NotEqual, regT0, payloadFor(expected)));
+ // Property registers are now available as the property is known
+ emitJumpSlowCaseIfNotJSCell(base, regT3);
+ emitLoadPayload(iter, regT1);
+
+ // Test base's structure
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
+ addSlowCase(branchPtr(NotEqual, regT0, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
+ load32(addressFor(i), regT3);
+ sub32(Imm32(1), regT3);
+ addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
+ compileGetDirectOffset(regT2, regT1, regT0, regT0, regT3);
+
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_pname), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, property);
+ linkSlowCase(iter);
+ linkSlowCaseIfNotJSCell(iter, base);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(base);
+ stubCall.addArgument(property);
+ stubCall.call(dst);
+}
+
#else // USE(JSVALUE32_64)
void JIT::emit_op_get_by_val(Instruction* currentInstruction)
@@ -963,6 +1051,62 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
emitPutVirtualRegister(dst);
}
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID structure, RegisterID offset, RegisterID scratch)
+{
+ ASSERT(sizeof(((Structure*)0)->m_propertyStorageCapacity) == sizeof(int32_t));
+ ASSERT(sizeof(JSObject::inlineStorageCapacity) == sizeof(int32_t));
+
+ Jump notUsingInlineStorage = branch32(NotEqual, Address(structure, OBJECT_OFFSETOF(Structure, m_propertyStorageCapacity)), Imm32(JSObject::inlineStorageCapacity));
+ loadPtr(BaseIndex(base, offset, ScalePtr, OBJECT_OFFSETOF(JSObject, m_inlineStorage)), result);
+ Jump finishedLoad = jump();
+ notUsingInlineStorage.link(this);
+ loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), scratch);
+ loadPtr(BaseIndex(scratch, offset, ScalePtr, 0), result);
+ finishedLoad.link(this);
+}
+
+void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+ unsigned expected = currentInstruction[4].u.operand;
+ unsigned iter = currentInstruction[5].u.operand;
+ unsigned i = currentInstruction[6].u.operand;
+
+ emitGetVirtualRegister(property, regT0);
+ addSlowCase(branchPtr(NotEqual, regT0, addressFor(expected)));
+ emitGetVirtualRegisters(base, regT0, iter, regT1);
+ emitJumpSlowCaseIfNotJSCell(regT0, base);
+
+ // Test base's structure
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ addSlowCase(branchPtr(NotEqual, regT2, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
+ load32(addressFor(i), regT3);
+ sub32(Imm32(1), regT3);
+ addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
+ compileGetDirectOffset(regT0, regT0, regT2, regT3, regT1);
+
+ emitPutVirtualRegister(dst, regT0);
+}
+
+void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCaseIfNotJSCell(iter, base);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(base, regT2);
+ stubCall.addArgument(property, regT2);
+ stubCall.call(dst);
+}
+
void JIT::emit_op_put_by_val(Instruction* currentInstruction)
{
unsigned base = currentInstruction[1].u.operand;
@@ -1128,9 +1272,9 @@ void JIT::emit_op_method_check(Instruction* currentInstruction)
Jump match = jump();
- ASSERT(differenceBetween(info.structureToCompare, protoObj) == patchOffsetMethodCheckProtoObj);
- ASSERT(differenceBetween(info.structureToCompare, protoStructureToCompare) == patchOffsetMethodCheckProtoStruct);
- ASSERT(differenceBetween(info.structureToCompare, putFunction) == patchOffsetMethodCheckPutFunction);
+ ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoObj), patchOffsetMethodCheckProtoObj);
+ ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoStructureToCompare), patchOffsetMethodCheckProtoStruct);
+ ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, putFunction), patchOffsetMethodCheckPutFunction);
// Link the failure cases here.
notCell.link(this);
@@ -1197,22 +1341,22 @@ void JIT::compileGetByIdHotPath(int, int baseVReg, Identifier*, unsigned propert
DataLabelPtr structureToCompare;
Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
addSlowCase(structureCheck);
- ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure);
- ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase);
+ ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetGetByIdStructure);
+ ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureCheck), patchOffsetGetByIdBranchToSlowCase)
Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
Label externalLoadComplete(this);
- ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetGetByIdExternalLoad);
- ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthGetByIdExternalLoad);
+ ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, externalLoad), patchOffsetGetByIdExternalLoad);
+ ASSERT_JIT_OFFSET(differenceBetween(externalLoad, externalLoadComplete), patchLengthGetByIdExternalLoad);
DataLabel32 displacementLabel = loadPtrWithAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
- ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetGetByIdPropertyMapOffset);
+ ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel), patchOffsetGetByIdPropertyMapOffset);
Label putResult(this);
END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
- ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult);
+ ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, putResult), patchOffsetGetByIdPutResult);
}
void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -1247,7 +1391,7 @@ void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident
END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
- ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
+ ASSERT_JIT_OFFSET(differenceBetween(coldPathBegin, call), patchOffsetGetByIdSlowCaseCall);
// Track the location of the call; this will be used to recover patch information.
m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
@@ -1278,19 +1422,19 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
// It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
DataLabelPtr structureToCompare;
addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
- ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure);
+ ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetPutByIdStructure);
// Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
Label externalLoadComplete(this);
- ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetPutByIdExternalLoad);
- ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad);
+ ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, externalLoad), patchOffsetPutByIdExternalLoad);
+ ASSERT_JIT_OFFSET(differenceBetween(externalLoad, externalLoadComplete), patchLengthPutByIdExternalLoad);
DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT0, patchGetByIdDefaultOffset));
END_UNINTERRUPTED_SEQUENCE(sequencePutById);
- ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetPutByIdPropertyMapOffset);
+ ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel), patchOffsetPutByIdPropertyMapOffset);
}
void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -1347,35 +1491,27 @@ void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID res
}
}
+void JIT::testPrototype(Structure* structure, JumpList& failureCases)
+{
+ if (structure->m_prototype.isNull())
+ return;
+
+ move(ImmPtr(&asCell(structure->m_prototype)->m_structure), regT2);
+ move(ImmPtr(asCell(structure->m_prototype)->m_structure), regT3);
+ failureCases.append(branchPtr(NotEqual, Address(regT2), regT3));
+}
+
void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress)
{
JumpList failureCases;
// Check eax is an object of the right Structure.
failureCases.append(emitJumpIfNotJSCell(regT0));
failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure)));
- JumpList successCases;
-
- // ecx = baseObject
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- // proto(ecx) = baseObject->structure()->prototype()
- failureCases.append(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType)));
+ testPrototype(oldStructure, failureCases);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2);
-
// ecx = baseObject->m_structure
- for (RefPtr<Structure>* it = chain->head(); *it; ++it) {
- // null check the prototype
- successCases.append(branchPtr(Equal, regT2, ImmPtr(JSValue::encode(jsNull()))));
-
- // Check the structure id
- failureCases.append(branchPtr(NotEqual, Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(it->get())));
-
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- failureCases.append(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType)));
- loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2);
- }
-
- successCases.link(this);
+ for (RefPtr<Structure>* it = chain->head(); *it; ++it)
+ testPrototype(it->get(), failureCases);
Call callTarget;
@@ -1540,7 +1676,7 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
// Check the prototype object's Structure had not changed.
Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
move(ImmPtr(prototypeStructure), regT3);
Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
#else
@@ -1615,7 +1751,7 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
// Check the prototype object's Structure had not changed.
Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
move(ImmPtr(prototypeStructure), regT3);
Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
#else
@@ -1668,7 +1804,7 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
// Check the prototype object's Structure had not changed.
Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
move(ImmPtr(currStructure), regT3);
bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
#else
@@ -1721,7 +1857,7 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
// Check the prototype object's Structure had not changed.
Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
move(ImmPtr(currStructure), regT3);
bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
#else
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubCall.h b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubCall.h
index cb5354b..cfbd7dc 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubCall.h
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubCall.h
@@ -26,7 +26,7 @@
#ifndef JITStubCall_h
#define JITStubCall_h
-#include <wtf/Platform.h>
+#include "MacroAssemblerCodeRef.h"
#if ENABLE(JIT)
@@ -36,58 +36,58 @@ namespace JSC {
public:
JITStubCall(JIT* jit, JSObject* (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
: m_jit(jit)
- , m_stub(reinterpret_cast<void*>(stub))
+ , m_stub(stub)
, m_returnType(Cell)
- , m_stackIndex(stackIndexStart)
+ , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
{
}
JITStubCall(JIT* jit, JSPropertyNameIterator* (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
: m_jit(jit)
- , m_stub(reinterpret_cast<void*>(stub))
+ , m_stub(stub)
, m_returnType(Cell)
- , m_stackIndex(stackIndexStart)
+ , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
{
}
JITStubCall(JIT* jit, void* (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
: m_jit(jit)
- , m_stub(reinterpret_cast<void*>(stub))
+ , m_stub(stub)
, m_returnType(VoidPtr)
- , m_stackIndex(stackIndexStart)
+ , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
{
}
JITStubCall(JIT* jit, int (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
: m_jit(jit)
- , m_stub(reinterpret_cast<void*>(stub))
+ , m_stub(stub)
, m_returnType(Int)
- , m_stackIndex(stackIndexStart)
+ , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
{
}
JITStubCall(JIT* jit, bool (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
: m_jit(jit)
- , m_stub(reinterpret_cast<void*>(stub))
+ , m_stub(stub)
, m_returnType(Int)
- , m_stackIndex(stackIndexStart)
+ , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
{
}
JITStubCall(JIT* jit, void (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
: m_jit(jit)
- , m_stub(reinterpret_cast<void*>(stub))
+ , m_stub(stub)
, m_returnType(Void)
- , m_stackIndex(stackIndexStart)
+ , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
{
}
#if USE(JSVALUE32_64)
JITStubCall(JIT* jit, EncodedJSValue (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
: m_jit(jit)
- , m_stub(reinterpret_cast<void*>(stub))
+ , m_stub(stub)
, m_returnType(Value)
- , m_stackIndex(stackIndexStart)
+ , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
{
}
#endif
@@ -145,7 +145,7 @@ namespace JSC {
void getArgument(size_t argumentNumber, JIT::RegisterID tag, JIT::RegisterID payload)
{
- size_t stackIndex = stackIndexStart + (argumentNumber * stackIndexStep);
+ size_t stackIndex = JITSTACKFRAME_ARGS_INDEX + (argumentNumber * stackIndexStep);
m_jit->peek(payload, stackIndex);
m_jit->peek(tag, stackIndex + 1);
}
@@ -171,7 +171,7 @@ namespace JSC {
m_jit->restoreArgumentReference();
JIT::Call call = m_jit->call();
- m_jit->m_calls.append(CallRecord(call, m_jit->m_bytecodeIndex, m_stub));
+ m_jit->m_calls.append(CallRecord(call, m_jit->m_bytecodeIndex, m_stub.value()));
#if ENABLE(OPCODE_SAMPLING)
if (m_jit->m_bytecodeIndex != (unsigned)-1)
@@ -222,10 +222,9 @@ namespace JSC {
private:
static const size_t stackIndexStep = sizeof(EncodedJSValue) == 2 * sizeof(void*) ? 2 : 1;
- static const size_t stackIndexStart = 1; // Index 0 is reserved for restoreArgumentReference().
JIT* m_jit;
- void* m_stub;
+ FunctionPtr m_stub;
enum { Void, VoidPtr, Int, Value, Cell } m_returnType;
size_t m_stackIndex;
};
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.cpp
index b098728..91b9401 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.cpp
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.cpp
@@ -56,6 +56,7 @@
#include "RegExpPrototype.h"
#include "Register.h"
#include "SamplingTool.h"
+#include <wtf/StdLibExtras.h>
#include <stdarg.h>
#include <stdio.h>
@@ -67,25 +68,37 @@ using namespace std;
namespace JSC {
-#if PLATFORM(DARWIN) || PLATFORM(WIN_OS)
+#if OS(DARWIN) || OS(WINDOWS)
#define SYMBOL_STRING(name) "_" #name
#else
#define SYMBOL_STRING(name) #name
#endif
-#if PLATFORM(IPHONE)
+#if OS(IPHONE_OS)
#define THUMB_FUNC_PARAM(name) SYMBOL_STRING(name)
#else
#define THUMB_FUNC_PARAM(name)
#endif
-#if PLATFORM(DARWIN)
+#if OS(LINUX) && CPU(X86_64)
+#define SYMBOL_STRING_RELOCATION(name) #name "@plt"
+#else
+#define SYMBOL_STRING_RELOCATION(name) SYMBOL_STRING(name)
+#endif
+
+#if OS(DARWIN)
// Mach-O platform
#define HIDE_SYMBOL(name) ".private_extern _" #name
-#elif PLATFORM(AIX)
+#elif OS(AIX)
// IBM's own file format
#define HIDE_SYMBOL(name) ".lglobl " #name
-#elif PLATFORM(LINUX) || PLATFORM(FREEBSD) || PLATFORM(OPENBSD) || PLATFORM(SOLARIS) || (PLATFORM(HPUX) && PLATFORM(IA64)) || PLATFORM(SYMBIAN) || PLATFORM(NETBSD)
+#elif OS(LINUX) \
+ || OS(FREEBSD) \
+ || OS(OPENBSD) \
+ || OS(SOLARIS) \
+ || (OS(HPUX) && CPU(IA64)) \
+ || OS(SYMBIAN) \
+ || OS(NETBSD)
// ELF platform
#define HIDE_SYMBOL(name) ".hidden " #name
#else
@@ -94,7 +107,7 @@ namespace JSC {
#if USE(JSVALUE32_64)
-#if COMPILER(GCC) && PLATFORM(X86)
+#if COMPILER(GCC) && CPU(X86)
// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
// need to change the assembly trampolines below to match.
@@ -104,6 +117,7 @@ COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x58, JITStackFrame_
COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x50, JITStackFrame_code_offset_matches_ctiTrampoline);
asm volatile (
+".text\n"
".globl " SYMBOL_STRING(ctiTrampoline) "\n"
HIDE_SYMBOL(ctiTrampoline) "\n"
SYMBOL_STRING(ctiTrampoline) ":" "\n"
@@ -131,7 +145,7 @@ SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
#if !USE(JIT_STUB_ARGUMENT_VA_LIST)
"movl %esp, %ecx" "\n"
#endif
- "call " SYMBOL_STRING(cti_vm_throw) "\n"
+ "call " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
"addl $0x3c, %esp" "\n"
"popl %ebx" "\n"
"popl %edi" "\n"
@@ -152,7 +166,7 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
"ret" "\n"
);
-#elif COMPILER(GCC) && PLATFORM(X86_64)
+#elif COMPILER(GCC) && CPU(X86_64)
#if USE(JIT_STUB_ARGUMENT_VA_LIST)
#error "JIT_STUB_ARGUMENT_VA_LIST not supported on x86-64."
@@ -197,7 +211,7 @@ asm volatile (
HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
"movq %rsp, %rdi" "\n"
- "call " SYMBOL_STRING(cti_vm_throw) "\n"
+ "call " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
"addq $0x48, %rsp" "\n"
"popq %rbx" "\n"
"popq %r15" "\n"
@@ -222,7 +236,7 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
"ret" "\n"
);
-#elif COMPILER(GCC) && PLATFORM(ARM_THUMB2)
+#elif COMPILER(GCC) && CPU(ARM_THUMB2)
#if USE(JIT_STUB_ARGUMENT_VA_LIST)
#error "JIT_STUB_ARGUMENT_VA_LIST not supported on ARMv7."
@@ -232,9 +246,9 @@ asm volatile (
".text" "\n"
".align 2" "\n"
".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+HIDE_SYMBOL(ctiTrampoline) "\n"
".thumb" "\n"
".thumb_func " THUMB_FUNC_PARAM(ctiTrampoline) "\n"
-HIDE_SYMBOL(ctiTrampoline) "\n"
SYMBOL_STRING(ctiTrampoline) ":" "\n"
"sub sp, sp, #0x3c" "\n"
"str lr, [sp, #0x20]" "\n"
@@ -259,12 +273,12 @@ asm volatile (
".text" "\n"
".align 2" "\n"
".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
".thumb" "\n"
".thumb_func " THUMB_FUNC_PARAM(ctiVMThrowTrampoline) "\n"
-HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
"cpy r0, sp" "\n"
- "bl " SYMBOL_STRING(cti_vm_throw) "\n"
+ "bl " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
"ldr r6, [sp, #0x2c]" "\n"
"ldr r5, [sp, #0x28]" "\n"
"ldr r4, [sp, #0x24]" "\n"
@@ -277,9 +291,9 @@ asm volatile (
".text" "\n"
".align 2" "\n"
".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
".thumb" "\n"
".thumb_func " THUMB_FUNC_PARAM(ctiOpThrowNotCaught) "\n"
-HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
"ldr r6, [sp, #0x2c]" "\n"
"ldr r5, [sp, #0x28]" "\n"
@@ -289,7 +303,44 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
"bx lr" "\n"
);
-#elif COMPILER(MSVC)
+#elif COMPILER(GCC) && CPU(ARM_TRADITIONAL)
+
+asm volatile (
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+HIDE_SYMBOL(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "stmdb sp!, {r1-r3}" "\n"
+ "stmdb sp!, {r4-r8, lr}" "\n"
+ "sub sp, sp, #68" "\n"
+ "mov r4, r2" "\n"
+ "mov r5, #512" "\n"
+ // r0 contains the code
+ "mov lr, pc" "\n"
+ "mov pc, r0" "\n"
+ "add sp, sp, #68" "\n"
+ "ldmia sp!, {r4-r8, lr}" "\n"
+ "add sp, sp, #12" "\n"
+ "mov pc, lr" "\n"
+);
+
+asm volatile (
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+ "mov r0, sp" "\n"
+ "bl " SYMBOL_STRING(cti_vm_throw) "\n"
+
+// Both has the same return sequence
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "add sp, sp, #68" "\n"
+ "ldmia sp!, {r4-r8, lr}" "\n"
+ "add sp, sp, #12" "\n"
+ "mov pc, lr" "\n"
+);
+
+#elif COMPILER(MSVC) && CPU(X86)
#if USE(JIT_STUB_ARGUMENT_VA_LIST)
#error "JIT_STUB_ARGUMENT_VA_LIST configuration not supported on MSVC."
@@ -353,11 +404,13 @@ extern "C" {
}
}
-#endif // COMPILER(GCC) && PLATFORM(X86)
+#else
+ #error "JIT not supported on this platform."
+#endif
#else // USE(JSVALUE32_64)
-#if COMPILER(GCC) && PLATFORM(X86)
+#if COMPILER(GCC) && CPU(X86)
// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
// need to change the assembly trampolines below to match.
@@ -366,6 +419,7 @@ COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x30, JITStackFrame_code_
COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x1c, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
asm volatile (
+".text\n"
".globl " SYMBOL_STRING(ctiTrampoline) "\n"
HIDE_SYMBOL(ctiTrampoline) "\n"
SYMBOL_STRING(ctiTrampoline) ":" "\n"
@@ -393,7 +447,7 @@ SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
#if !USE(JIT_STUB_ARGUMENT_VA_LIST)
"movl %esp, %ecx" "\n"
#endif
- "call " SYMBOL_STRING(cti_vm_throw) "\n"
+ "call " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
"addl $0x1c, %esp" "\n"
"popl %ebx" "\n"
"popl %edi" "\n"
@@ -414,7 +468,7 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
"ret" "\n"
);
-#elif COMPILER(GCC) && PLATFORM(X86_64)
+#elif COMPILER(GCC) && CPU(X86_64)
#if USE(JIT_STUB_ARGUMENT_VA_LIST)
#error "JIT_STUB_ARGUMENT_VA_LIST not supported on x86-64."
@@ -427,6 +481,7 @@ COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x48, JITStackFrame_code_
COMPILE_ASSERT(offsetof(struct JITStackFrame, savedRBX) == 0x78, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
asm volatile (
+".text\n"
".globl " SYMBOL_STRING(ctiTrampoline) "\n"
HIDE_SYMBOL(ctiTrampoline) "\n"
SYMBOL_STRING(ctiTrampoline) ":" "\n"
@@ -465,7 +520,7 @@ asm volatile (
HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
"movq %rsp, %rdi" "\n"
- "call " SYMBOL_STRING(cti_vm_throw) "\n"
+ "call " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
"addq $0x78, %rsp" "\n"
"popq %rbx" "\n"
"popq %r15" "\n"
@@ -490,7 +545,7 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
"ret" "\n"
);
-#elif COMPILER(GCC) && PLATFORM(ARM_THUMB2)
+#elif COMPILER(GCC) && CPU(ARM_THUMB2)
#if USE(JIT_STUB_ARGUMENT_VA_LIST)
#error "JIT_STUB_ARGUMENT_VA_LIST not supported on ARMv7."
@@ -532,7 +587,7 @@ HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
".thumb_func " THUMB_FUNC_PARAM(ctiVMThrowTrampoline) "\n"
SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
"cpy r0, sp" "\n"
- "bl " SYMBOL_STRING(cti_vm_throw) "\n"
+ "bl " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
"ldr r6, [sp, #0x2c]" "\n"
"ldr r5, [sp, #0x28]" "\n"
"ldr r4, [sp, #0x24]" "\n"
@@ -557,30 +612,24 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
"bx lr" "\n"
);
-#elif COMPILER(GCC) && PLATFORM(ARM_TRADITIONAL)
+#elif COMPILER(GCC) && CPU(ARM_TRADITIONAL)
asm volatile (
+".text\n"
".globl " SYMBOL_STRING(ctiTrampoline) "\n"
HIDE_SYMBOL(ctiTrampoline) "\n"
SYMBOL_STRING(ctiTrampoline) ":" "\n"
"stmdb sp!, {r1-r3}" "\n"
"stmdb sp!, {r4-r8, lr}" "\n"
- "mov r6, pc" "\n"
- "add r6, r6, #40" "\n"
- "sub sp, sp, #32" "\n"
- "ldr r4, [sp, #60]" "\n"
+ "sub sp, sp, #36" "\n"
+ "mov r4, r2" "\n"
"mov r5, #512" "\n"
- // r0 contains the code
- "add r8, pc, #4" "\n"
- "str r8, [sp, #-4]!" "\n"
+ "mov lr, pc" "\n"
"mov pc, r0" "\n"
- "add sp, sp, #32" "\n"
+ "add sp, sp, #36" "\n"
"ldmia sp!, {r4-r8, lr}" "\n"
"add sp, sp, #12" "\n"
"mov pc, lr" "\n"
-
- // the return instruction
- "ldr pc, [sp], #4" "\n"
);
asm volatile (
@@ -588,22 +637,58 @@ asm volatile (
HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
"mov r0, sp" "\n"
- "mov lr, r6" "\n"
- "add r8, pc, #4" "\n"
- "str r8, [sp, #-4]!" "\n"
- "b " SYMBOL_STRING(cti_vm_throw) "\n"
+ "bl " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
// Both has the same return sequence
".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
- "add sp, sp, #32" "\n"
+ "add sp, sp, #36" "\n"
"ldmia sp!, {r4-r8, lr}" "\n"
"add sp, sp, #12" "\n"
"mov pc, lr" "\n"
);
-#elif COMPILER(MSVC)
+#elif COMPILER(RVCT) && CPU(ARM_TRADITIONAL)
+
+__asm EncodedJSValue ctiTrampoline(void*, RegisterFile*, CallFrame*, JSValue*, Profiler**, JSGlobalData*)
+{
+ ARM
+ stmdb sp!, {r1-r3}
+ stmdb sp!, {r4-r8, lr}
+ sub sp, sp, #36
+ mov r4, r2
+ mov r5, #512
+ mov lr, pc
+ bx r0
+ add sp, sp, #36
+ ldmia sp!, {r4-r8, lr}
+ add sp, sp, #12
+ bx lr
+}
+
+__asm void ctiVMThrowTrampoline()
+{
+ ARM
+ PRESERVE8
+ mov r0, sp
+ bl cti_vm_throw
+ add sp, sp, #36
+ ldmia sp!, {r4-r8, lr}
+ add sp, sp, #12
+ bx lr
+}
+
+__asm void ctiOpThrowNotCaught()
+{
+ ARM
+ add sp, sp, #36
+ ldmia sp!, {r4-r8, lr}
+ add sp, sp, #12
+ bx lr
+}
+
+#elif COMPILER(MSVC) && CPU(X86)
#if USE(JIT_STUB_ARGUMENT_VA_LIST)
#error "JIT_STUB_ARGUMENT_VA_LIST configuration not supported on MSVC."
@@ -666,7 +751,9 @@ extern "C" {
}
}
-#endif // COMPILER(GCC) && PLATFORM(X86)
+#else
+ #error "JIT not supported on this platform."
+#endif
#endif // USE(JSVALUE32_64)
@@ -680,7 +767,7 @@ JITThunks::JITThunks(JSGlobalData* globalData)
{
JIT::compileCTIMachineTrampolines(globalData, &m_executablePool, &m_ctiStringLengthTrampoline, &m_ctiVirtualCallLink, &m_ctiVirtualCall, &m_ctiNativeCallThunk);
-#if PLATFORM(ARM_THUMB2)
+#if CPU(ARM_THUMB2)
// Unfortunate the arm compiler does not like the use of offsetof on JITStackFrame (since it contains non POD types),
// and the OBJECT_OFFSETOF macro does not appear constantish enough for it to be happy with its use in COMPILE_ASSERT
// macros.
@@ -732,11 +819,15 @@ NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* co
// Structure transition, cache transition info
if (slot.type() == PutPropertySlot::NewProperty) {
- StructureChain* prototypeChain = structure->prototypeChain(callFrame);
- if (!prototypeChain->isCacheable() || structure->isDictionary()) {
+ if (structure->isDictionary()) {
ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_put_by_id_generic));
return;
}
+
+ // put_by_id_transition checks the prototype chain for setters.
+ normalizePrototypeChain(callFrame, baseCell);
+
+ StructureChain* prototypeChain = structure->prototypeChain(callFrame);
stubInfo->initPutByIdTransition(structure->previousID(), structure, prototypeChain);
JIT::compilePutByIdTransition(callFrame->scopeChain()->globalData, codeBlock, stubInfo, structure->previousID(), structure, slot.cachedOffset(), prototypeChain, returnAddress);
return;
@@ -796,35 +887,42 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co
return;
}
+ if (structure->isDictionary()) {
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
+ return;
+ }
+
if (slot.slotBase() == structure->prototypeForLookup(callFrame)) {
ASSERT(slot.slotBase().isObject());
JSObject* slotBaseObject = asObject(slot.slotBase());
-
+ size_t offset = slot.cachedOffset();
+
// Since we're accessing a prototype in a loop, it's a good bet that it
// should not be treated as a dictionary.
- if (slotBaseObject->structure()->isDictionary())
- slotBaseObject->setStructure(Structure::fromDictionaryTransition(slotBaseObject->structure()));
+ if (slotBaseObject->structure()->isDictionary()) {
+ slotBaseObject->flattenDictionaryObject();
+ offset = slotBaseObject->structure()->get(propertyName);
+ }
stubInfo->initGetByIdProto(structure, slotBaseObject->structure());
- JIT::compileGetByIdProto(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, slotBaseObject->structure(), slot.cachedOffset(), returnAddress);
+ ASSERT(!structure->isDictionary());
+ ASSERT(!slotBaseObject->structure()->isDictionary());
+ JIT::compileGetByIdProto(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, slotBaseObject->structure(), offset, returnAddress);
return;
}
- size_t count = countPrototypeChainEntriesAndCheckForProxies(callFrame, baseValue, slot);
+ size_t offset = slot.cachedOffset();
+ size_t count = normalizePrototypeChain(callFrame, baseValue, slot.slotBase(), propertyName, offset);
if (!count) {
stubInfo->accessType = access_get_by_id_generic;
return;
}
StructureChain* prototypeChain = structure->prototypeChain(callFrame);
- if (!prototypeChain->isCacheable()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
- return;
- }
stubInfo->initGetByIdChain(structure, prototypeChain);
- JIT::compileGetByIdChain(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, prototypeChain, count, slot.cachedOffset(), returnAddress);
+ JIT::compileGetByIdChain(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, prototypeChain, count, offset, returnAddress);
}
#endif // ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
@@ -920,7 +1018,7 @@ static NEVER_INLINE void throwStackOverflowError(CallFrame* callFrame, JSGlobalD
} \
} while (0)
-#if PLATFORM(ARM_THUMB2)
+#if CPU(ARM_THUMB2)
#define DEFINE_STUB_FUNCTION(rtype, op) \
extern "C" { \
@@ -941,6 +1039,57 @@ static NEVER_INLINE void throwStackOverflowError(CallFrame* callFrame, JSGlobalD
); \
rtype JITStubThunked_##op(STUB_ARGS_DECLARATION) \
+#elif CPU(ARM_TRADITIONAL) && COMPILER(GCC)
+
+#if USE(JSVALUE32_64)
+#define THUNK_RETURN_ADDRESS_OFFSET 64
+#else
+#define THUNK_RETURN_ADDRESS_OFFSET 32
+#endif
+
+COMPILE_ASSERT(offsetof(struct JITStackFrame, thunkReturnAddress) == THUNK_RETURN_ADDRESS_OFFSET, JITStackFrame_thunkReturnAddress_offset_mismatch);
+
+#define DEFINE_STUB_FUNCTION(rtype, op) \
+ extern "C" { \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \
+ }; \
+ asm volatile ( \
+ ".globl " SYMBOL_STRING(cti_##op) "\n" \
+ HIDE_SYMBOL(cti_##op) "\n" \
+ SYMBOL_STRING(cti_##op) ":" "\n" \
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "]" "\n" \
+ "bl " SYMBOL_STRING(JITStubThunked_##op) "\n" \
+ "ldr lr, [sp, #" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "]" "\n" \
+ "mov pc, lr" "\n" \
+ ); \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
+
+#elif CPU(ARM_TRADITIONAL) && COMPILER(RVCT)
+
+#define DEFINE_STUB_FUNCTION(rtype, op) rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
+
+/* The following is a workaround for RVCT toolchain; precompiler macros are not expanded before the code is passed to the assembler */
+
+/* The following section is a template to generate code for GeneratedJITStubs_RVCT.h */
+/* The pattern "#xxx#" will be replaced with "xxx" */
+
+/*
+RVCT(extern "C" #rtype# JITStubThunked_#op#(STUB_ARGS_DECLARATION);)
+RVCT(__asm #rtype# cti_#op#(STUB_ARGS_DECLARATION))
+RVCT({)
+RVCT( ARM)
+RVCT( IMPORT JITStubThunked_#op#)
+RVCT( str lr, [sp, #32])
+RVCT( bl JITStubThunked_#op#)
+RVCT( ldr lr, [sp, #32])
+RVCT( bx lr)
+RVCT(})
+RVCT()
+*/
+
+/* Include the generated file */
+#include "GeneratedJITStubs_RVCT.h"
+
#else
#define DEFINE_STUB_FUNCTION(rtype, op) rtype JIT_STUB cti_##op(STUB_ARGS_DECLARATION)
#endif
@@ -972,38 +1121,19 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_add)
JSValue v1 = stackFrame.args[0].jsValue();
JSValue v2 = stackFrame.args[1].jsValue();
-
- double left;
- double right = 0.0;
-
- bool rightIsNumber = v2.getNumber(right);
- if (rightIsNumber && v1.getNumber(left))
- return JSValue::encode(jsNumber(stackFrame.globalData, left + right));
-
CallFrame* callFrame = stackFrame.callFrame;
- bool leftIsString = v1.isString();
- if (leftIsString && v2.isString()) {
- RefPtr<UString::Rep> value = concatenate(asString(v1)->value().rep(), asString(v2)->value().rep());
- if (UNLIKELY(!value)) {
- throwOutOfMemoryError(callFrame);
- VM_THROW_EXCEPTION();
- }
-
- return JSValue::encode(jsString(stackFrame.globalData, value.release()));
+ if (v1.isString()) {
+ JSValue result = v2.isString()
+ ? jsString(callFrame, asString(v1), asString(v2))
+ : jsString(callFrame, asString(v1), v2.toPrimitiveString(callFrame));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
}
- if (rightIsNumber & leftIsString) {
- RefPtr<UString::Rep> value = v2.isInt32() ?
- concatenate(asString(v1)->value().rep(), v2.asInt32()) :
- concatenate(asString(v1)->value().rep(), right);
-
- if (UNLIKELY(!value)) {
- throwOutOfMemoryError(callFrame);
- VM_THROW_EXCEPTION();
- }
- return JSValue::encode(jsString(stackFrame.globalData, value.release()));
- }
+ double left = 0.0, right;
+ if (v1.getNumber(left) && v2.getNumber(right))
+ return JSValue::encode(jsNumber(stackFrame.globalData, left + right));
// All other cases are pretty uncommon
JSValue result = jsAddSlowCase(callFrame, v1, v2);
@@ -1053,19 +1183,6 @@ DEFINE_STUB_FUNCTION(void, register_file_check)
throwStackOverflowError(oldCallFrame, stackFrame.globalData, ReturnAddressPtr(oldCallFrame->returnPC()), STUB_RETURN_ADDRESS);
}
-DEFINE_STUB_FUNCTION(int, op_loop_if_less)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
- CallFrame* callFrame = stackFrame.callFrame;
-
- bool result = jsLess(callFrame, src1, src2);
- CHECK_FOR_EXCEPTION_AT_END();
- return result;
-}
-
DEFINE_STUB_FUNCTION(int, op_loop_if_lesseq)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -1204,7 +1321,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check)
// Since we're accessing a prototype in a loop, it's a good bet that it
// should not be treated as a dictionary.
if (slotBaseObject->structure()->isDictionary())
- slotBaseObject->setStructure(Structure::fromDictionaryTransition(slotBaseObject->structure()));
+ slotBaseObject->flattenDictionaryObject();
// The result fetched should always be the callee!
ASSERT(result == JSValue(callee));
@@ -1222,7 +1339,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check)
// for now. For now it performs a check on a special object on the global object only used for this
// purpose. The object is in no way exposed, and as such the check will always pass.
if (slot.slotBase() == baseValue) {
- JIT::patchMethodCallProto(codeBlock, methodCallLinkInfo, callee, structure, callFrame->scopeChain()->globalObject()->methodCallDummy(), STUB_RETURN_ADDRESS);
+ JIT::patchMethodCallProto(codeBlock, methodCallLinkInfo, callee, structure, callFrame->scopeChain()->globalObject->methodCallDummy(), STUB_RETURN_ADDRESS);
return JSValue::encode(result);
}
}
@@ -1332,14 +1449,15 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list)
STUB_INIT_STACK_FRAME(stackFrame);
CallFrame* callFrame = stackFrame.callFrame;
+ const Identifier& propertyName = stackFrame.args[1].identifier();
JSValue baseValue = stackFrame.args[0].jsValue();
PropertySlot slot(baseValue);
- JSValue result = baseValue.get(callFrame, stackFrame.args[1].identifier(), slot);
+ JSValue result = baseValue.get(callFrame, propertyName, slot);
CHECK_FOR_EXCEPTION();
- if (!baseValue.isCell() || !slot.isCacheable() || asCell(baseValue)->structure()->isUncacheableDictionary()) {
+ if (!baseValue.isCell() || !slot.isCacheable() || asCell(baseValue)->structure()->isDictionary()) {
ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
return JSValue::encode(result);
}
@@ -1350,32 +1468,34 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list)
ASSERT(slot.slotBase().isObject());
JSObject* slotBaseObject = asObject(slot.slotBase());
+
+ size_t offset = slot.cachedOffset();
if (slot.slotBase() == baseValue)
ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
else if (slot.slotBase() == asCell(baseValue)->structure()->prototypeForLookup(callFrame)) {
+ ASSERT(!asCell(baseValue)->structure()->isDictionary());
// Since we're accessing a prototype in a loop, it's a good bet that it
// should not be treated as a dictionary.
- if (slotBaseObject->structure()->isDictionary())
- slotBaseObject->setStructure(Structure::fromDictionaryTransition(slotBaseObject->structure()));
+ if (slotBaseObject->structure()->isDictionary()) {
+ slotBaseObject->flattenDictionaryObject();
+ offset = slotBaseObject->structure()->get(propertyName);
+ }
int listIndex;
PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(stubInfo, listIndex);
- JIT::compileGetByIdProtoList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, slotBaseObject->structure(), slot.cachedOffset());
+ JIT::compileGetByIdProtoList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, slotBaseObject->structure(), offset);
if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full));
- } else if (size_t count = countPrototypeChainEntriesAndCheckForProxies(callFrame, baseValue, slot)) {
- StructureChain* protoChain = structure->prototypeChain(callFrame);
- if (!protoChain->isCacheable()) {
- ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
- return JSValue::encode(result);
- }
-
+ } else if (size_t count = normalizePrototypeChain(callFrame, baseValue, slot.slotBase(), propertyName, offset)) {
+ ASSERT(!asCell(baseValue)->structure()->isDictionary());
int listIndex;
PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(stubInfo, listIndex);
- JIT::compileGetByIdChainList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, protoChain, count, slot.cachedOffset());
+
+ StructureChain* protoChain = structure->prototypeChain(callFrame);
+ JIT::compileGetByIdChainList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, protoChain, count, offset);
if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full));
@@ -1452,7 +1572,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_instanceof)
// ECMA-262 15.3.5.3:
// Throw an exception either if baseVal is not an object, or if it does not implement 'HasInstance' (i.e. is a function).
- TypeInfo typeInfo(UnspecifiedType, 0);
+ TypeInfo typeInfo(UnspecifiedType);
if (!baseVal.isObject() || !(typeInfo = asObject(baseVal)->structure()->typeInfo()).implementsHasInstance()) {
CallFrame* callFrame = stackFrame.callFrame;
CodeBlock* codeBlock = callFrame->codeBlock();
@@ -1520,7 +1640,7 @@ DEFINE_STUB_FUNCTION(void*, op_call_JSFunction)
{
STUB_INIT_STACK_FRAME(stackFrame);
-#ifndef NDEBUG
+#if !ASSERT_DISABLED
CallData callData;
ASSERT(stackFrame.args[0].jsValue().getCallData(callData) == CallTypeJS);
#endif
@@ -1769,7 +1889,7 @@ DEFINE_STUB_FUNCTION(JSObject*, op_construct_JSConstruct)
VM_THROW_EXCEPTION();
}
-#ifndef NDEBUG
+#if !ASSERT_DISABLED
ConstructData constructData;
ASSERT(constructor->getConstructData(constructData) == ConstructTypeJS);
#endif
@@ -1778,7 +1898,7 @@ DEFINE_STUB_FUNCTION(JSObject*, op_construct_JSConstruct)
if (stackFrame.args[3].jsValue().isObject())
structure = asObject(stackFrame.args[3].jsValue())->inheritorID();
else
- structure = constructor->scope().node()->globalObject()->emptyObjectStructure();
+ structure = constructor->scope().node()->globalObject->emptyObjectStructure();
#ifdef QT_BUILD_SCRIPT_LIB
return new (stackFrame.globalData) QT_PREPEND_NAMESPACE(QScriptObject)(structure);
#else
@@ -1843,7 +1963,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val)
} else if (isJSString(globalData, baseValue) && asString(baseValue)->canGetIndex(i)) {
// All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val_string));
- result = asString(baseValue)->getIndex(stackFrame.globalData, i);
+ result = asString(baseValue)->getIndex(callFrame, i);
} else if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
// All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val_byte_array));
@@ -1874,7 +1994,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val_string)
if (LIKELY(subscript.isUInt32())) {
uint32_t i = subscript.asUInt32();
if (isJSString(globalData, baseValue) && asString(baseValue)->canGetIndex(i))
- result = asString(baseValue)->getIndex(stackFrame.globalData, i);
+ result = asString(baseValue)->getIndex(callFrame, i);
else {
result = baseValue.get(callFrame, i);
if (!isJSString(globalData, baseValue))
@@ -2039,19 +2159,6 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_lesseq)
return JSValue::encode(result);
}
-DEFINE_STUB_FUNCTION(int, op_loop_if_true)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- bool result = src1.toBoolean(callFrame);
- CHECK_FOR_EXCEPTION_AT_END();
- return result;
-}
-
DEFINE_STUB_FUNCTION(int, op_load_varargs)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -2326,8 +2433,6 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_post_inc)
return JSValue::encode(number);
}
-#if USE(JSVALUE32_64)
-
DEFINE_STUB_FUNCTION(int, op_eq)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -2335,6 +2440,7 @@ DEFINE_STUB_FUNCTION(int, op_eq)
JSValue src1 = stackFrame.args[0].jsValue();
JSValue src2 = stackFrame.args[1].jsValue();
+#if USE(JSVALUE32_64)
start:
if (src2.isUndefined()) {
return src1.isNull() ||
@@ -2390,20 +2496,20 @@ DEFINE_STUB_FUNCTION(int, op_eq)
if (cell1->isString()) {
if (src2.isInt32())
- return static_cast<JSString*>(cell1)->value().toDouble() == src2.asInt32();
+ return static_cast<JSString*>(cell1)->value(stackFrame.callFrame).toDouble() == src2.asInt32();
if (src2.isDouble())
- return static_cast<JSString*>(cell1)->value().toDouble() == src2.asDouble();
+ return static_cast<JSString*>(cell1)->value(stackFrame.callFrame).toDouble() == src2.asDouble();
if (src2.isTrue())
- return static_cast<JSString*>(cell1)->value().toDouble() == 1.0;
+ return static_cast<JSString*>(cell1)->value(stackFrame.callFrame).toDouble() == 1.0;
if (src2.isFalse())
- return static_cast<JSString*>(cell1)->value().toDouble() == 0.0;
+ return static_cast<JSString*>(cell1)->value(stackFrame.callFrame).toDouble() == 0.0;
JSCell* cell2 = asCell(src2);
if (cell2->isString())
- return static_cast<JSString*>(cell1)->value() == static_cast<JSString*>(cell2)->value();
+ return static_cast<JSString*>(cell1)->value(stackFrame.callFrame) == static_cast<JSString*>(cell2)->value(stackFrame.callFrame);
src2 = asObject(cell2)->toPrimitive(stackFrame.callFrame);
CHECK_FOR_EXCEPTION();
@@ -2420,8 +2526,18 @@ DEFINE_STUB_FUNCTION(int, op_eq)
src1 = asObject(cell1)->toPrimitive(stackFrame.callFrame);
CHECK_FOR_EXCEPTION();
goto start;
+
+#else // USE(JSVALUE32_64)
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ bool result = JSValue::equalSlowCaseInline(callFrame, src1, src2);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+#endif // USE(JSVALUE32_64)
}
+#if USE(JSVALUE32_64)
+
DEFINE_STUB_FUNCTION(int, op_eq_strings)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -2431,26 +2547,10 @@ DEFINE_STUB_FUNCTION(int, op_eq_strings)
ASSERT(string1->isString());
ASSERT(string2->isString());
- return string1->value() == string2->value();
-}
-
-#else // USE(JSVALUE32_64)
-
-DEFINE_STUB_FUNCTION(int, op_eq)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- bool result = JSValue::equalSlowCaseInline(callFrame, src1, src2);
- CHECK_FOR_EXCEPTION_AT_END();
- return result;
+ return string1->value(stackFrame.callFrame) == string2->value(stackFrame.callFrame);
}
-#endif // USE(JSVALUE32_64)
+#endif
DEFINE_STUB_FUNCTION(EncodedJSValue, op_lshift)
{
@@ -2668,7 +2768,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_eval)
Register* newCallFrame = callFrame->registers() + registerOffset;
Register* argv = newCallFrame - RegisterFile::CallFrameHeaderSize - argCount;
JSValue thisValue = argv[0].jsValue();
- JSGlobalObject* globalObject = callFrame->scopeChain()->globalObject();
+ JSGlobalObject* globalObject = callFrame->scopeChain()->globalObject;
if (thisValue == globalObject && funcVal == globalObject->evalFunction()) {
JSValue exceptionValue;
@@ -2699,7 +2799,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_throw)
if (!handler) {
*stackFrame.exception = exceptionValue;
- STUB_SET_RETURN_ADDRESS(reinterpret_cast<void*>(ctiOpThrowNotCaught));
+ STUB_SET_RETURN_ADDRESS(FunctionPtr(ctiOpThrowNotCaught).value());
return JSValue::encode(jsNull());
}
@@ -2714,18 +2814,22 @@ DEFINE_STUB_FUNCTION(JSPropertyNameIterator*, op_get_pnames)
{
STUB_INIT_STACK_FRAME(stackFrame);
- return JSPropertyNameIterator::create(stackFrame.callFrame, stackFrame.args[0].jsValue());
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSObject* o = stackFrame.args[0].jsObject();
+ Structure* structure = o->structure();
+ JSPropertyNameIterator* jsPropertyNameIterator = structure->enumerationCache();
+ if (!jsPropertyNameIterator || jsPropertyNameIterator->cachedPrototypeChain() != structure->prototypeChain(callFrame))
+ jsPropertyNameIterator = JSPropertyNameIterator::create(callFrame, o);
+ return jsPropertyNameIterator;
}
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_next_pname)
+DEFINE_STUB_FUNCTION(int, has_property)
{
STUB_INIT_STACK_FRAME(stackFrame);
- JSPropertyNameIterator* it = stackFrame.args[0].propertyNameIterator();
- JSValue temp = it->next(stackFrame.callFrame);
- if (!temp)
- it->invalidate();
- return JSValue::encode(temp);
+ JSObject* base = stackFrame.args[0].jsObject();
+ JSString* property = stackFrame.args[1].jsString();
+ return base->hasProperty(stackFrame.callFrame, Identifier(stackFrame.callFrame, property->value(stackFrame.callFrame)));
}
DEFINE_STUB_FUNCTION(JSObject*, op_push_scope)
@@ -2802,7 +2906,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_stricteq)
JSValue src1 = stackFrame.args[0].jsValue();
JSValue src2 = stackFrame.args[1].jsValue();
- return JSValue::encode(jsBoolean(JSValue::strictEqual(src1, src2)));
+ return JSValue::encode(jsBoolean(JSValue::strictEqual(stackFrame.callFrame, src1, src2)));
}
DEFINE_STUB_FUNCTION(EncodedJSValue, op_to_primitive)
@@ -2816,7 +2920,9 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_strcat)
{
STUB_INIT_STACK_FRAME(stackFrame);
- return JSValue::encode(concatenateStrings(stackFrame.callFrame, &stackFrame.callFrame->registers()[stackFrame.args[0].int32()], stackFrame.args[1].int32()));
+ JSValue result = jsString(stackFrame.callFrame, &stackFrame.callFrame->registers()[stackFrame.args[0].int32()], stackFrame.args[1].int32());
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
}
DEFINE_STUB_FUNCTION(EncodedJSValue, op_nstricteq)
@@ -2826,7 +2932,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_nstricteq)
JSValue src1 = stackFrame.args[0].jsValue();
JSValue src2 = stackFrame.args[1].jsValue();
- return JSValue::encode(jsBoolean(!JSValue::strictEqual(src1, src2)));
+ return JSValue::encode(jsBoolean(!JSValue::strictEqual(stackFrame.callFrame, src1, src2)));
}
DEFINE_STUB_FUNCTION(EncodedJSValue, op_to_jsnumber)
@@ -2935,7 +3041,7 @@ DEFINE_STUB_FUNCTION(void*, op_switch_char)
void* result = codeBlock->characterSwitchJumpTable(tableIndex).ctiDefault.executableAddress();
if (scrutinee.isString()) {
- UString::Rep* value = asString(scrutinee)->value().rep();
+ UString::Rep* value = asString(scrutinee)->value(callFrame).rep();
if (value->size() == 1)
result = codeBlock->characterSwitchJumpTable(tableIndex).ctiForValue(value->data()[0]).executableAddress();
}
@@ -2955,7 +3061,7 @@ DEFINE_STUB_FUNCTION(void*, op_switch_string)
void* result = codeBlock->stringSwitchJumpTable(tableIndex).ctiDefault.executableAddress();
if (scrutinee.isString()) {
- UString::Rep* value = asString(scrutinee)->value().rep();
+ UString::Rep* value = asString(scrutinee)->value(callFrame).rep();
result = codeBlock->stringSwitchJumpTable(tableIndex).ctiForValue(value).executableAddress();
}
@@ -3034,9 +3140,8 @@ DEFINE_STUB_FUNCTION(void, op_debug)
int debugHookID = stackFrame.args[0].int32();
int firstLine = stackFrame.args[1].int32();
int lastLine = stackFrame.args[2].int32();
- int column = stackFrame.args[3].int32();
- stackFrame.globalData->interpreter->debug(callFrame, static_cast<DebugHookID>(debugHookID), firstLine, lastLine, column);
+ stackFrame.globalData->interpreter->debug(callFrame, static_cast<DebugHookID>(debugHookID), firstLine, lastLine);
}
#ifdef QT_BUILD_SCRIPT_LIB
@@ -3092,6 +3197,14 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, vm_throw)
return JSValue::encode(exceptionValue);
}
+DEFINE_STUB_FUNCTION(EncodedJSValue, to_object)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ return JSValue::encode(stackFrame.args[0].jsValue().toObject(callFrame));
+}
+
} // namespace JSC
- #endif // ENABLE(JIT)
+#endif // ENABLE(JIT)
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.h b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.h
index c2b8c02..da80133 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.h
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JITStubs.h
@@ -63,6 +63,7 @@ namespace JSC {
int32_t asInt32;
JSValue jsValue() { return JSValue::decode(asEncodedJSValue); }
+ JSObject* jsObject() { return static_cast<JSObject*>(asPointer); }
Identifier& identifier() { return *static_cast<Identifier*>(asPointer); }
int32_t int32() { return asInt32; }
CodeBlock* codeBlock() { return static_cast<CodeBlock*>(asPointer); }
@@ -74,7 +75,7 @@ namespace JSC {
ReturnAddressPtr returnAddress() { return ReturnAddressPtr(asPointer); }
};
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
struct JITStackFrame {
void* reserved; // Unused
JITStubArg args[6];
@@ -98,7 +99,7 @@ namespace JSC {
// When JIT code makes a call, it pushes its return address just below the rest of the stack.
ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; }
};
-#elif PLATFORM(X86)
+#elif CPU(X86)
#if COMPILER(MSVC)
#pragma pack(push)
#pragma pack(4)
@@ -129,7 +130,7 @@ namespace JSC {
#if COMPILER(MSVC)
#pragma pack(pop)
#endif // COMPILER(MSVC)
-#elif PLATFORM(ARM_THUMB2)
+#elif CPU(ARM_THUMB2)
struct JITStackFrame {
void* reserved; // Unused
JITStubArg args[6];
@@ -157,11 +158,13 @@ namespace JSC {
ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
};
-#elif PLATFORM(ARM_TRADITIONAL)
+#elif CPU(ARM_TRADITIONAL)
struct JITStackFrame {
JITStubArg padding; // Unused
JITStubArg args[7];
+ ReturnAddressPtr thunkReturnAddress;
+
void* preservedR4;
void* preservedR5;
void* preservedR6;
@@ -172,16 +175,20 @@ namespace JSC {
RegisterFile* registerFile;
CallFrame* callFrame;
JSValue* exception;
+
+ // These arguments passed on the stack.
Profiler** enabledProfilerReference;
JSGlobalData* globalData;
// When JIT code makes a call, it pushes its return address just below the rest of the stack.
- ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; }
+ ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
};
#else
#error "JITStackFrame not defined for this platform."
#endif
+#define JITSTACKFRAME_ARGS_INDEX (OBJECT_OFFSETOF(JITStackFrame, args) / sizeof(void*))
+
#if USE(JIT_STUB_ARGUMENT_VA_LIST)
#define STUB_ARGS_DECLARATION void* args, ...
#define STUB_ARGS (reinterpret_cast<void**>(vl_args) - 1)
@@ -195,16 +202,16 @@ namespace JSC {
#define STUB_ARGS_DECLARATION void** args
#define STUB_ARGS (args)
- #if PLATFORM(X86) && COMPILER(MSVC)
+ #if CPU(X86) && COMPILER(MSVC)
#define JIT_STUB __fastcall
- #elif PLATFORM(X86) && COMPILER(GCC)
+ #elif CPU(X86) && COMPILER(GCC)
#define JIT_STUB __attribute__ ((fastcall))
#else
#define JIT_STUB
#endif
#endif
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
struct VoidPtrPair {
void* first;
void* second;
@@ -289,7 +296,6 @@ extern "C" {
EncodedJSValue JIT_STUB cti_op_mod(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_mul(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_negate(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_next_pname(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_not(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_nstricteq(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_post_dec(STUB_ARGS_DECLARATION);
@@ -311,6 +317,7 @@ extern "C" {
EncodedJSValue JIT_STUB cti_op_typeof(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_urshift(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_vm_throw(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_to_object(STUB_ARGS_DECLARATION);
JSObject* JIT_STUB cti_op_construct_JSConstruct(STUB_ARGS_DECLARATION);
JSObject* JIT_STUB cti_op_new_array(STUB_ARGS_DECLARATION);
JSObject* JIT_STUB cti_op_new_error(STUB_ARGS_DECLARATION);
@@ -332,10 +339,9 @@ extern "C" {
int JIT_STUB cti_op_jlesseq(STUB_ARGS_DECLARATION);
int JIT_STUB cti_op_jtrue(STUB_ARGS_DECLARATION);
int JIT_STUB cti_op_load_varargs(STUB_ARGS_DECLARATION);
- int JIT_STUB cti_op_loop_if_less(STUB_ARGS_DECLARATION);
int JIT_STUB cti_op_loop_if_lesseq(STUB_ARGS_DECLARATION);
- int JIT_STUB cti_op_loop_if_true(STUB_ARGS_DECLARATION);
int JIT_STUB cti_timeout_check(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_has_property(STUB_ARGS_DECLARATION);
void JIT_STUB cti_op_create_arguments(STUB_ARGS_DECLARATION);
void JIT_STUB cti_op_create_arguments_no_params(STUB_ARGS_DECLARATION);
void JIT_STUB cti_op_debug(STUB_ARGS_DECLARATION);