diff options
Diffstat (limited to 'src/3rdparty/webkit/JavaScriptCore/jit')
-rw-r--r-- | src/3rdparty/webkit/JavaScriptCore/jit/ExecutableAllocator.h | 42 | ||||
-rw-r--r-- | src/3rdparty/webkit/JavaScriptCore/jit/JIT.cpp | 139 | ||||
-rw-r--r-- | src/3rdparty/webkit/JavaScriptCore/jit/JIT.h | 101 | ||||
-rw-r--r-- | src/3rdparty/webkit/JavaScriptCore/jit/JITArithmetic.cpp | 41 | ||||
-rw-r--r-- | src/3rdparty/webkit/JavaScriptCore/jit/JITCall.cpp | 58 | ||||
-rw-r--r-- | src/3rdparty/webkit/JavaScriptCore/jit/JITInlineMethods.h | 44 | ||||
-rw-r--r-- | src/3rdparty/webkit/JavaScriptCore/jit/JITOpcodes.cpp | 68 | ||||
-rw-r--r-- | src/3rdparty/webkit/JavaScriptCore/jit/JITPropertyAccess.cpp | 174 | ||||
-rw-r--r-- | src/3rdparty/webkit/JavaScriptCore/jit/JITStubCall.h | 3 | ||||
-rw-r--r-- | src/3rdparty/webkit/JavaScriptCore/jit/JITStubs.cpp | 224 | ||||
-rw-r--r-- | src/3rdparty/webkit/JavaScriptCore/jit/JITStubs.h | 33 |
11 files changed, 601 insertions, 326 deletions
diff --git a/src/3rdparty/webkit/JavaScriptCore/jit/ExecutableAllocator.h b/src/3rdparty/webkit/JavaScriptCore/jit/ExecutableAllocator.h index 18261f8..0de4f79 100644 --- a/src/3rdparty/webkit/JavaScriptCore/jit/ExecutableAllocator.h +++ b/src/3rdparty/webkit/JavaScriptCore/jit/ExecutableAllocator.h @@ -26,12 +26,17 @@ #ifndef ExecutableAllocator_h #define ExecutableAllocator_h +#include <limits> #include <wtf/Assertions.h> #include <wtf/PassRefPtr.h> #include <wtf/RefCounted.h> +#include <wtf/UnusedParam.h> #include <wtf/Vector.h> -#include <limits> +#if PLATFORM(IPHONE) +#include <libkern/OSCacheControl.h> +#include <sys/mman.h> +#endif #define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize) #define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4) @@ -151,10 +156,20 @@ public: return pool.release(); } -#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) - static void makeWritable(void* start, size_t size) { reprotectRegion(start, size, Writable); } - static void makeExecutable(void* start, size_t size) { reprotectRegion(start, size, Executable); } +#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) || !(PLATFORM(X86) || PLATFORM(X86_64)) + static void makeWritable(void* start, size_t size) + { + reprotectRegion(start, size, Writable); + } + + static void makeExecutable(void* start, size_t size) + { + reprotectRegion(start, size, Executable); + cacheFlush(start, size); + } + // If ASSEMBLER_WX_EXCLUSIVE protection is turned on, or on non-x86 platforms, + // we need to track start & size so we can makeExecutable/cacheFlush at the end. class MakeWritable { public: MakeWritable(void* start, size_t size) @@ -176,13 +191,32 @@ public: #else static void makeWritable(void*, size_t) {} static void makeExecutable(void*, size_t) {} + + // On x86, without ASSEMBLER_WX_EXCLUSIVE, there is nothing to do here. class MakeWritable { public: MakeWritable(void*, size_t) {} }; #endif private: +#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) || !(PLATFORM(X86) || PLATFORM(X86_64)) #if ENABLE(ASSEMBLER_WX_EXCLUSIVE) static void reprotectRegion(void*, size_t, ProtectionSeting); +#else + static void reprotectRegion(void*, size_t, ProtectionSeting) {} +#endif + + static void cacheFlush(void* code, size_t size) + { +#if PLATFORM(X86) || PLATFORM(X86_64) + UNUSED_PARAM(code); + UNUSED_PARAM(size); +#elif PLATFORM_ARM_ARCH(7) && PLATFORM(IPHONE) + sys_dcache_flush(code, size); + sys_icache_invalidate(code, size); +#else +#error "ExecutableAllocator::cacheFlush not implemented on this platform." +#endif + } #endif RefPtr<ExecutablePool> m_smallAllocationPool; diff --git a/src/3rdparty/webkit/JavaScriptCore/jit/JIT.cpp b/src/3rdparty/webkit/JavaScriptCore/jit/JIT.cpp index 7f2656c..02cb09b 100644 --- a/src/3rdparty/webkit/JavaScriptCore/jit/JIT.cpp +++ b/src/3rdparty/webkit/JavaScriptCore/jit/JIT.cpp @@ -1,4 +1,3 @@ - /* * Copyright (C) 2008, 2009 Apple Inc. All rights reserved. * @@ -46,19 +45,22 @@ using namespace std; namespace JSC { -void ctiPatchNearCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, MacroAssemblerCodePtr newCalleeFunction) +void ctiPatchNearCallByReturnAddress(ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction) { - returnAddress.relinkNearCallerToTrampoline(newCalleeFunction); + MacroAssembler::RepatchBuffer repatchBuffer; + repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction); } -void ctiPatchCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, MacroAssemblerCodePtr newCalleeFunction) +void ctiPatchCallByReturnAddress(ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction) { - returnAddress.relinkCallerToTrampoline(newCalleeFunction); + MacroAssembler::RepatchBuffer repatchBuffer; + repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction); } -void ctiPatchCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, FunctionPtr newCalleeFunction) +void ctiPatchCallByReturnAddress(ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction) { - returnAddress.relinkCallerToFunction(newCalleeFunction); + MacroAssembler::RepatchBuffer repatchBuffer; + repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction); } JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock) @@ -68,6 +70,7 @@ JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock) , m_labels(codeBlock ? codeBlock->instructions().size() : 0) , m_propertyAccessCompilationInfo(codeBlock ? codeBlock->numberOfStructureStubInfos() : 0) , m_callStructureStubCompilationInfo(codeBlock ? codeBlock->numberOfCallLinkInfos() : 0) + , m_bytecodeIndex((unsigned)-1) , m_lastResultBytecodeRegister(std::numeric_limits<int>::max()) , m_jumpTargetsPosition(0) { @@ -403,10 +406,10 @@ void JIT::privateCompile() // In the case of a fast linked call, we do not set this up in the caller. emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock); - peek(regT0, FIELD_OFFSET(JITStackFrame, registerFile) / sizeof (void*)); + peek(regT0, OBJECT_OFFSETOF(JITStackFrame, registerFile) / sizeof (void*)); addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1); - slowRegisterFileCheck = branchPtr(Above, regT1, Address(regT0, FIELD_OFFSET(RegisterFile, m_end))); + slowRegisterFileCheck = branchPtr(Above, regT1, Address(regT0, OBJECT_OFFSETOF(RegisterFile, m_end))); afterRegisterFileCheck = label(); } @@ -426,7 +429,7 @@ void JIT::privateCompile() ASSERT(m_jmpTable.isEmpty()); - PatchBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size())); + LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size())); // Translate vPC offsets into addresses in JIT generated code, for switch tables. for (unsigned i = 0; i < m_switches.size(); ++i) { @@ -474,7 +477,7 @@ void JIT::privateCompile() // Link absolute addresses for jsr for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter) - patchBuffer.patch(iter->storeLocation, patchBuffer.locationOf(iter->target).addressForJSR()); + patchBuffer.patch(iter->storeLocation, patchBuffer.locationOf(iter->target).executableAddress()); #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) for (unsigned i = 0; i < m_codeBlock->numberOfStructureStubInfos(); ++i) { @@ -489,7 +492,6 @@ void JIT::privateCompile() info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation); info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin); info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther); - info.coldPathOther = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].coldPathOther); } #endif unsigned methodCallCount = m_methodCallCompilationInfo.size(); @@ -514,8 +516,8 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable Jump array_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)); // Checks out okay! - get the length from the storage - loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT0); - load32(Address(regT0, FIELD_OFFSET(ArrayStorage, m_length)), regT0); + loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT0); + load32(Address(regT0, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT0); Jump array_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImmediateInt)); @@ -532,8 +534,8 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)); // Checks out okay! - get the length from the Ustring. - loadPtr(Address(regT0, FIELD_OFFSET(JSString, m_value) + FIELD_OFFSET(UString, m_rep)), regT0); - load32(Address(regT0, FIELD_OFFSET(UString::Rep, len)), regT0); + loadPtr(Address(regT0, OBJECT_OFFSETOF(JSString, m_value) + OBJECT_OFFSETOF(UString, m_rep)), regT0); + load32(Address(regT0, OBJECT_OFFSETOF(UString::Rep, len)), regT0); Jump string_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImmediateInt)); @@ -544,16 +546,14 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable #endif // (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct. - + COMPILE_ASSERT(sizeof(CodeType) == 4, CodeTypeEnumMustBe32Bit); + Label virtualCallPreLinkBegin = align(); // Load the callee CodeBlock* into eax - loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_body)), regT3); - loadPtr(Address(regT3, FIELD_OFFSET(FunctionBodyNode, m_code)), regT0); + loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3); + loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT0); Jump hasCodeBlock1 = branchTestPtr(NonZero, regT0); - // If m_code is null and m_jitCode is not, then we have a native function, so arity is irrelevant - loadPtr(Address(regT3, FIELD_OFFSET(FunctionBodyNode, m_jitCode)), regT0); - Jump isNativeFunc1 = branchTestPtr(NonZero, regT0); preverveReturnAddressAfterCall(regT3); restoreArgumentReference(); Call callJSFunction1 = call(); @@ -562,8 +562,10 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable restoreReturnAddressBeforeReturn(regT3); hasCodeBlock1.link(this); + Jump isNativeFunc1 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode)); + // Check argCount matches callee arity. - Jump arityCheckOkay1 = branch32(Equal, Address(regT0, FIELD_OFFSET(CodeBlock, m_numParameters)), regT1); + Jump arityCheckOkay1 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1); preverveReturnAddressAfterCall(regT3); emitPutJITStubArg(regT3, 2); emitPutJITStubArg(regT0, 4); @@ -590,12 +592,9 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable Label virtualCallLinkBegin = align(); // Load the callee CodeBlock* into eax - loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_body)), regT3); - loadPtr(Address(regT3, FIELD_OFFSET(FunctionBodyNode, m_code)), regT0); + loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3); + loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT0); Jump hasCodeBlock2 = branchTestPtr(NonZero, regT0); - // If m_code is null and m_jitCode is not, then we have a native function, so arity is irrelevant - loadPtr(Address(regT3, FIELD_OFFSET(FunctionBodyNode, m_jitCode)), regT0); - Jump isNativeFunc2 = branchTestPtr(NonZero, regT0); preverveReturnAddressAfterCall(regT3); restoreArgumentReference(); Call callJSFunction2 = call(); @@ -604,8 +603,10 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable restoreReturnAddressBeforeReturn(regT3); hasCodeBlock2.link(this); + Jump isNativeFunc2 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode)); + // Check argCount matches callee arity. - Jump arityCheckOkay2 = branch32(Equal, Address(regT0, FIELD_OFFSET(CodeBlock, m_numParameters)), regT1); + Jump arityCheckOkay2 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1); preverveReturnAddressAfterCall(regT3); emitPutJITStubArg(regT3, 2); emitPutJITStubArg(regT0, 4); @@ -631,23 +632,22 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable Label virtualCallBegin = align(); // Load the callee CodeBlock* into eax - loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_body)), regT3); - loadPtr(Address(regT3, FIELD_OFFSET(FunctionBodyNode, m_code)), regT0); + loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3); + loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT0); Jump hasCodeBlock3 = branchTestPtr(NonZero, regT0); - // If m_code is null and m_jitCode is not, then we have a native function, so arity is irrelevant - loadPtr(Address(regT3, FIELD_OFFSET(FunctionBodyNode, m_jitCode)), regT0); - Jump isNativeFunc3 = branchTestPtr(NonZero, regT0); preverveReturnAddressAfterCall(regT3); restoreArgumentReference(); Call callJSFunction3 = call(); emitGetJITStubArg(1, regT2); emitGetJITStubArg(3, regT1); restoreReturnAddressBeforeReturn(regT3); - loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_body)), regT3); // reload the function body nody, so we can reload the code pointer. + loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3); // reload the function body nody, so we can reload the code pointer. hasCodeBlock3.link(this); + + Jump isNativeFunc3 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode)); // Check argCount matches callee arity. - Jump arityCheckOkay3 = branch32(Equal, Address(regT0, FIELD_OFFSET(CodeBlock, m_numParameters)), regT1); + Jump arityCheckOkay3 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1); preverveReturnAddressAfterCall(regT3); emitPutJITStubArg(regT3, 2); emitPutJITStubArg(regT0, 4); @@ -657,12 +657,13 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable emitGetJITStubArg(1, regT2); emitGetJITStubArg(3, regT1); restoreReturnAddressBeforeReturn(regT3); - loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_body)), regT3); // reload the function body nody, so we can reload the code pointer. + loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3); // reload the function body nody, so we can reload the code pointer. arityCheckOkay3.link(this); - // load ctiCode from the new codeBlock. - loadPtr(Address(regT3, FIELD_OFFSET(FunctionBodyNode, m_jitCode)), regT0); isNativeFunc3.link(this); + // load ctiCode from the new codeBlock. + loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_jitCode)), regT0); + compileOpCallInitializeCallFrame(); jump(regT0); @@ -689,7 +690,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable subPtr(Imm32(1), X86::ecx); // Don't include 'this' in argcount // Push argcount - storePtr(X86::ecx, Address(stackPointerRegister, FIELD_OFFSET(ArgList, m_argCount))); + storePtr(X86::ecx, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_argCount))); // Calculate the start of the callframe header, and store in edx addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int32_t)sizeof(Register)), callFrameRegister, X86::edx); @@ -699,7 +700,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable subPtr(X86::ecx, X86::edx); // push pointer to arguments - storePtr(X86::edx, Address(stackPointerRegister, FIELD_OFFSET(ArgList, m_args))); + storePtr(X86::edx, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_args))); // ArgList is passed by reference so is stackPointerRegister move(stackPointerRegister, X86::ecx); @@ -711,10 +712,10 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable move(callFrameRegister, X86::edi); - call(Address(X86::esi, FIELD_OFFSET(JSFunction, m_data))); + call(Address(X86::esi, OBJECT_OFFSETOF(JSFunction, m_data))); addPtr(Imm32(sizeof(ArgList)), stackPointerRegister); -#else +#elif PLATFORM(X86) emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0); /* We have two structs that we use to describe the stackframe we set up for our @@ -760,7 +761,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable subPtr(Imm32(1), regT0); // Don't include 'this' in argcount // push argcount - storePtr(regT0, Address(stackPointerRegister, FIELD_OFFSET(NativeCallFrameStructure, args) + FIELD_OFFSET(ArgList, m_argCount))); + storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_argCount))); // Calculate the start of the callframe header, and store in regT1 addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int)sizeof(Register)), callFrameRegister, regT1); @@ -768,28 +769,28 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT0) mul32(Imm32(sizeof(Register)), regT0, regT0); subPtr(regT0, regT1); - storePtr(regT1, Address(stackPointerRegister, FIELD_OFFSET(NativeCallFrameStructure, args) + FIELD_OFFSET(ArgList, m_args))); + storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_args))); // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register) - addPtr(Imm32(FIELD_OFFSET(NativeCallFrameStructure, args)), stackPointerRegister, regT0); - storePtr(regT0, Address(stackPointerRegister, FIELD_OFFSET(NativeCallFrameStructure, argPointer))); + addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, args)), stackPointerRegister, regT0); + storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, argPointer))); // regT1 currently points to the first argument, regT1 - sizeof(Register) points to 'this' loadPtr(Address(regT1, -(int)sizeof(Register)), regT1); - storePtr(regT1, Address(stackPointerRegister, FIELD_OFFSET(NativeCallFrameStructure, thisValue))); + storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue))); #if COMPILER(MSVC) || PLATFORM(LINUX) // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register) - addPtr(Imm32(FIELD_OFFSET(NativeCallFrameStructure, result)), stackPointerRegister, X86::ecx); + addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, result)), stackPointerRegister, X86::ecx); // Plant callee emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::eax); - storePtr(X86::eax, Address(stackPointerRegister, FIELD_OFFSET(NativeCallFrameStructure, callee))); + storePtr(X86::eax, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, callee))); // Plant callframe move(callFrameRegister, X86::edx); - call(Address(X86::eax, FIELD_OFFSET(JSFunction, m_data))); + call(Address(X86::eax, OBJECT_OFFSETOF(JSFunction, m_data))); // JSValue is a non-POD type loadPtr(Address(X86::eax), X86::eax); @@ -799,13 +800,17 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable // Plant callframe move(callFrameRegister, X86::ecx); - call(Address(X86::edx, FIELD_OFFSET(JSFunction, m_data))); + call(Address(X86::edx, OBJECT_OFFSETOF(JSFunction, m_data))); #endif // We've put a few temporaries on the stack in addition to the actual arguments // so pull them off now addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister); +#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL) +#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform." +#else + breakpoint(); #endif // Check for an exception @@ -830,7 +835,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable storePtr(regT1, regT2); move(ImmPtr(reinterpret_cast<void*>(ctiVMThrowTrampoline)), regT2); emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister); - poke(callFrameRegister, offsetof(struct JITStackFrame, callFrame) / sizeof (void*)); + poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*)); restoreReturnAddressBeforeReturn(regT2); ret(); @@ -845,7 +850,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable #endif // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object. - PatchBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size())); + LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size())); #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) patchBuffer.link(array_failureCases1Call, FunctionPtr(JITStubs::cti_op_get_by_id_array_fail)); @@ -882,15 +887,15 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable void JIT::emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst) { - loadPtr(Address(variableObject, FIELD_OFFSET(JSVariableObject, d)), dst); - loadPtr(Address(dst, FIELD_OFFSET(JSVariableObject::JSVariableObjectData, registers)), dst); + loadPtr(Address(variableObject, OBJECT_OFFSETOF(JSVariableObject, d)), dst); + loadPtr(Address(dst, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), dst); loadPtr(Address(dst, index * sizeof(Register)), dst); } void JIT::emitPutVariableObjectRegister(RegisterID src, RegisterID variableObject, int index) { - loadPtr(Address(variableObject, FIELD_OFFSET(JSVariableObject, d)), variableObject); - loadPtr(Address(variableObject, FIELD_OFFSET(JSVariableObject::JSVariableObjectData, registers)), variableObject); + loadPtr(Address(variableObject, OBJECT_OFFSETOF(JSVariableObject, d)), variableObject); + loadPtr(Address(variableObject, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), variableObject); storePtr(src, Address(variableObject, index * sizeof(Register))); } @@ -899,25 +904,29 @@ void JIT::unlinkCall(CallLinkInfo* callLinkInfo) // When the JSFunction is deleted the pointer embedded in the instruction stream will no longer be valid // (and, if a new JSFunction happened to be constructed at the same location, we could get a false positive // match). Reset the check so it no longer matches. - callLinkInfo->hotPathBegin.repatch(JSValue::encode(JSValue())); + RepatchBuffer repatchBuffer; + repatchBuffer.repatch(callLinkInfo->hotPathBegin, JSValue::encode(JSValue())); } -void JIT::linkCall(JSFunction* callee, CodeBlock* calleeCodeBlock, JITCode& code, CallLinkInfo* callLinkInfo, int callerArgCount) +void JIT::linkCall(JSFunction* callee, CodeBlock* calleeCodeBlock, JITCode& code, CallLinkInfo* callLinkInfo, int callerArgCount, JSGlobalData* globalData) { + ASSERT(calleeCodeBlock); + RepatchBuffer repatchBuffer; + // Currently we only link calls with the exact number of arguments. // If this is a native call calleeCodeBlock is null so the number of parameters is unimportant - if (!calleeCodeBlock || callerArgCount == calleeCodeBlock->m_numParameters) { + if (callerArgCount == calleeCodeBlock->m_numParameters || calleeCodeBlock->codeType() == NativeCode) { ASSERT(!callLinkInfo->isLinked()); if (calleeCodeBlock) calleeCodeBlock->addCaller(callLinkInfo); - callLinkInfo->hotPathBegin.repatch(callee); - callLinkInfo->hotPathOther.relink(code.addressForCall()); + repatchBuffer.repatch(callLinkInfo->hotPathBegin, callee); + repatchBuffer.relink(callLinkInfo->hotPathOther, code.addressForCall()); } - // patch the instruction that jumps out to the cold path, so that we only try to link once. - callLinkInfo->hotPathBegin.jumpAtOffset(patchOffsetOpCallCompareToJump).relink(callLinkInfo->coldPathOther); + // patch the call so we do not continue to try to link. + repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs.ctiVirtualCall()); } } // namespace JSC diff --git a/src/3rdparty/webkit/JavaScriptCore/jit/JIT.h b/src/3rdparty/webkit/JavaScriptCore/jit/JIT.h index c9e5355..bc006fc 100644 --- a/src/3rdparty/webkit/JavaScriptCore/jit/JIT.h +++ b/src/3rdparty/webkit/JavaScriptCore/jit/JIT.h @@ -28,6 +28,11 @@ #include <wtf/Platform.h> +// OBJECT_OFFSETOF: Like the C++ offsetof macro, but you can use it with classes. +// The magic number 0x4000 is insignificant. We use it to avoid using NULL, since +// NULL can cause compiler problems, especially in cases of multiple inheritance. +#define OBJECT_OFFSETOF(class, field) (reinterpret_cast<ptrdiff_t>(&(reinterpret_cast<class*>(0x4000)->field)) - 0x4000) + #if ENABLE(JIT) // We've run into some problems where changing the size of the class JIT leads to @@ -153,7 +158,6 @@ namespace JSC { MacroAssembler::DataLabelPtr hotPathBegin; MacroAssembler::Call hotPathOther; MacroAssembler::Call callReturnLocation; - MacroAssembler::Label coldPathOther; }; struct MethodCallCompilationInfo { @@ -167,9 +171,9 @@ namespace JSC { }; // Near calls can only be patched to other JIT code, regular calls can be patched to JIT code or relinked to stub functions. - void ctiPatchNearCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, MacroAssemblerCodePtr newCalleeFunction); - void ctiPatchCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, MacroAssemblerCodePtr newCalleeFunction); - void ctiPatchCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, FunctionPtr newCalleeFunction); + void ctiPatchNearCallByReturnAddress(ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction); + void ctiPatchCallByReturnAddress(ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction); + void ctiPatchCallByReturnAddress(ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction); class JIT : private MacroAssembler { friend class JITStubCall; @@ -228,6 +232,22 @@ namespace JSC { static const FPRegisterID fpRegT0 = X86::xmm0; static const FPRegisterID fpRegT1 = X86::xmm1; static const FPRegisterID fpRegT2 = X86::xmm2; +#elif PLATFORM_ARM_ARCH(7) + static const RegisterID returnValueRegister = ARM::r0; + static const RegisterID cachedResultRegister = ARM::r0; + static const RegisterID firstArgumentRegister = ARM::r0; + + static const RegisterID regT0 = ARM::r0; + static const RegisterID regT1 = ARM::r1; + static const RegisterID regT2 = ARM::r2; + static const RegisterID regT3 = ARM::r4; + + static const RegisterID callFrameRegister = ARM::r5; + static const RegisterID timeoutCheckRegister = ARM::r6; + + static const FPRegisterID fpRegT0 = ARM::d0; + static const FPRegisterID fpRegT1 = ARM::d1; + static const FPRegisterID fpRegT2 = ARM::d2; #else #error "JIT not supported on this platform." #endif @@ -260,7 +280,7 @@ namespace JSC { static const int patchOffsetMethodCheckProtoObj = 20; static const int patchOffsetMethodCheckProtoStruct = 30; static const int patchOffsetMethodCheckPutFunction = 50; -#else +#elif PLATFORM(X86) // These architecture specific value are used to enable patching - see comment on op_put_by_id. static const int patchOffsetPutByIdStructure = 7; static const int patchOffsetPutByIdExternalLoad = 13; @@ -287,6 +307,29 @@ namespace JSC { static const int patchOffsetMethodCheckProtoObj = 11; static const int patchOffsetMethodCheckProtoStruct = 18; static const int patchOffsetMethodCheckPutFunction = 29; +#elif PLATFORM_ARM_ARCH(7) + // These architecture specific value are used to enable patching - see comment on op_put_by_id. + static const int patchOffsetPutByIdStructure = 10; + static const int patchOffsetPutByIdExternalLoad = 20; + static const int patchLengthPutByIdExternalLoad = 12; + static const int patchOffsetPutByIdPropertyMapOffset = 40; + // These architecture specific value are used to enable patching - see comment on op_get_by_id. + static const int patchOffsetGetByIdStructure = 10; + static const int patchOffsetGetByIdBranchToSlowCase = 20; + static const int patchOffsetGetByIdExternalLoad = 20; + static const int patchLengthGetByIdExternalLoad = 12; + static const int patchOffsetGetByIdPropertyMapOffset = 40; + static const int patchOffsetGetByIdPutResult = 44; +#if ENABLE(OPCODE_SAMPLING) + static const int patchOffsetGetByIdSlowCaseCall = 0; // FIMXE +#else + static const int patchOffsetGetByIdSlowCaseCall = 28; +#endif + static const int patchOffsetOpCallCompareToJump = 10; + + static const int patchOffsetMethodCheckProtoObj = 18; + static const int patchOffsetMethodCheckProtoStruct = 28; + static const int patchOffsetMethodCheckPutFunction = 46; #endif public: @@ -296,57 +339,81 @@ namespace JSC { jit.privateCompile(); } - static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ProcessorReturnAddress returnAddress) + static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress) { JIT jit(globalData, codeBlock); +#if ENABLE(OPCODE_SAMPLING) + jit->m_bytecodeIndex = jit->m_codeBlock->getCallReturnOffset(returnAddress.value()); +#endif jit.privateCompileGetByIdProto(stubInfo, structure, prototypeStructure, cachedOffset, returnAddress, callFrame); } static void compileGetByIdSelfList(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset) { JIT jit(globalData, codeBlock); +#if ENABLE(OPCODE_SAMPLING) + jit->m_bytecodeIndex = jit->m_codeBlock->getCallReturnOffset(returnAddress.value()); +#endif jit.privateCompileGetByIdSelfList(stubInfo, polymorphicStructures, currentIndex, structure, cachedOffset); } static void compileGetByIdProtoList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset) { JIT jit(globalData, codeBlock); +#if ENABLE(OPCODE_SAMPLING) + jit->m_bytecodeIndex = jit->m_codeBlock->getCallReturnOffset(returnAddress.value()); +#endif jit.privateCompileGetByIdProtoList(stubInfo, prototypeStructureList, currentIndex, structure, prototypeStructure, cachedOffset, callFrame); } static void compileGetByIdChainList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset) { JIT jit(globalData, codeBlock); +#if ENABLE(OPCODE_SAMPLING) + jit->m_bytecodeIndex = jit->m_codeBlock->getCallReturnOffset(returnAddress.value()); +#endif jit.privateCompileGetByIdChainList(stubInfo, prototypeStructureList, currentIndex, structure, chain, count, cachedOffset, callFrame); } - static void compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ProcessorReturnAddress returnAddress) + static void compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress) { JIT jit(globalData, codeBlock); +#if ENABLE(OPCODE_SAMPLING) + jit->m_bytecodeIndex = jit->m_codeBlock->getCallReturnOffset(returnAddress.value()); +#endif jit.privateCompileGetByIdChain(stubInfo, structure, chain, count, cachedOffset, returnAddress, callFrame); } - static void compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ProcessorReturnAddress returnAddress) + static void compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress) { JIT jit(globalData, codeBlock); +#if ENABLE(OPCODE_SAMPLING) + jit->m_bytecodeIndex = jit->m_codeBlock->getCallReturnOffset(returnAddress.value()); +#endif jit.privateCompilePutByIdTransition(stubInfo, oldStructure, newStructure, cachedOffset, chain, returnAddress); } static void compileCTIMachineTrampolines(JSGlobalData* globalData, RefPtr<ExecutablePool>* executablePool, CodePtr* ctiArrayLengthTrampoline, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallPreLink, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk) { JIT jit(globalData); +#if ENABLE(OPCODE_SAMPLING) + jit->m_bytecodeIndex = jit->m_codeBlock->getCallReturnOffset(returnAddress.value()); +#endif jit.privateCompileCTIMachineTrampolines(executablePool, globalData, ctiArrayLengthTrampoline, ctiStringLengthTrampoline, ctiVirtualCallPreLink, ctiVirtualCallLink, ctiVirtualCall, ctiNativeCallThunk); } - static void patchGetByIdSelf(StructureStubInfo*, Structure*, size_t cachedOffset, ProcessorReturnAddress returnAddress); - static void patchPutByIdReplace(StructureStubInfo*, Structure*, size_t cachedOffset, ProcessorReturnAddress returnAddress); + static void patchGetByIdSelf(StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress); + static void patchPutByIdReplace(StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress); static void patchMethodCallProto(MethodCallLinkInfo&, JSFunction*, Structure*, JSObject*); - static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, ProcessorReturnAddress returnAddress) + static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, ReturnAddressPtr returnAddress) { JIT jit(globalData, codeBlock); +#if ENABLE(OPCODE_SAMPLING) + jit->m_bytecodeIndex = jit->m_codeBlock->getCallReturnOffset(returnAddress.value()); +#endif return jit.privateCompilePatchGetArrayLength(returnAddress); } - static void linkCall(JSFunction* callee, CodeBlock* calleeCodeBlock, JITCode&, CallLinkInfo*, int callerArgCount); + static void linkCall(JSFunction* callee, CodeBlock* calleeCodeBlock, JITCode&, CallLinkInfo*, int callerArgCount, JSGlobalData*); static void unlinkCall(CallLinkInfo*); private: @@ -367,15 +434,15 @@ namespace JSC { void privateCompileLinkPass(); void privateCompileSlowCases(); void privateCompile(); - void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, size_t cachedOffset, ProcessorReturnAddress returnAddress, CallFrame* callFrame); + void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame); void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, size_t cachedOffset); void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame); void privateCompileGetByIdChainList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame); - void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, size_t cachedOffset, ProcessorReturnAddress returnAddress, CallFrame* callFrame); - void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, size_t cachedOffset, StructureChain*, ProcessorReturnAddress returnAddress); + void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame); + void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, size_t cachedOffset, StructureChain*, ReturnAddressPtr returnAddress); void privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* data, CodePtr* ctiArrayLengthTrampoline, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallPreLink, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk); - void privateCompilePatchGetArrayLength(ProcessorReturnAddress returnAddress); + void privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress); void addSlowCase(Jump); void addJump(Jump, int); @@ -397,7 +464,7 @@ namespace JSC { void compileOpStrictEq(Instruction* instruction, CompileOpStrictEqType type); void compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset); - void compileGetDirectOffset(JSObject* base, RegisterID result, size_t cachedOffset); + void compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID result, size_t cachedOffset); void compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset); // Arithmetic Ops diff --git a/src/3rdparty/webkit/JavaScriptCore/jit/JITArithmetic.cpp b/src/3rdparty/webkit/JavaScriptCore/jit/JITArithmetic.cpp index 2ceb935..15808e2 100644 --- a/src/3rdparty/webkit/JavaScriptCore/jit/JITArithmetic.cpp +++ b/src/3rdparty/webkit/JavaScriptCore/jit/JITArithmetic.cpp @@ -116,18 +116,19 @@ void JIT::emit_op_rshift(Instruction* currentInstruction) #endif } else { emitGetVirtualRegisters(op1, regT0, op2, regT2); - if (supportsFloatingPoint()) { + if (supportsFloatingPointTruncate()) { Jump lhsIsInt = emitJumpIfImmediateInteger(regT0); #if USE(ALTERNATE_JSIMMEDIATE) // supportsFloatingPoint() && USE(ALTERNATE_JSIMMEDIATE) => 3 SlowCases addSlowCase(emitJumpIfNotImmediateNumber(regT0)); + addPtr(tagTypeNumberRegister, regT0); movePtrToDouble(regT0, fpRegT0); addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0)); #else // supportsFloatingPoint() && !USE(ALTERNATE_JSIMMEDIATE) => 5 SlowCases (of which 1 IfNotJSCell) emitJumpSlowCaseIfNotJSCell(regT0, op1); addSlowCase(checkStructure(regT0, m_globalData->numberStructure.get())); - loadDouble(Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0); + loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0); addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0)); addSlowCase(branchAdd32(Overflow, regT0, regT0)); #endif @@ -171,7 +172,7 @@ void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEnt stubCall.addArgument(regT0); stubCall.addArgument(op2, regT2); } else { - if (supportsFloatingPoint()) { + if (supportsFloatingPointTruncate()) { #if USE(ALTERNATE_JSIMMEDIATE) linkSlowCase(iter); linkSlowCase(iter); @@ -262,7 +263,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt fail1 = emitJumpIfNotJSCell(regT0); Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get()); - loadDouble(Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0); + loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0); #endif int32_t op2imm = getConstantOperand(op2).getInt32Fast();; @@ -303,7 +304,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt fail1 = emitJumpIfNotJSCell(regT1); Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get()); - loadDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT1); + loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1); #endif int32_t op1imm = getConstantOperand(op1).getInt32Fast();; @@ -353,8 +354,8 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get()); Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get()); - loadDouble(Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0); - loadDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT1); + loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0); + loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1); #endif emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqual, fpRegT1, fpRegT0), target + 3); @@ -447,7 +448,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE fail1 = emitJumpIfNotJSCell(regT0); Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get()); - loadDouble(Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0); + loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0); #endif int32_t op2imm = getConstantOperand(op2).getInt32Fast();; @@ -488,7 +489,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE fail1 = emitJumpIfNotJSCell(regT1); Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get()); - loadDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT1); + loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1); #endif int32_t op1imm = getConstantOperand(op1).getInt32Fast();; @@ -538,8 +539,8 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get()); Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get()); - loadDouble(Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0); - loadDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT1); + loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0); + loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1); #endif emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT1, fpRegT0), target + 3); @@ -1105,7 +1106,7 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u } // (1a) if we get here, src1 is also a number cell - loadDouble(Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0); + loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0); Jump loadedDouble = jump(); // (1b) if we get here, src1 is an immediate op1imm.link(this); @@ -1114,16 +1115,16 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u // (1c) loadedDouble.link(this); if (opcodeID == op_add) - addDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0); + addDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0); else if (opcodeID == op_sub) - subDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0); + subDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0); else { ASSERT(opcodeID == op_mul); - mulDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0); + mulDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0); } // Store the result to the JSNumberCell and jump. - storeDouble(fpRegT0, Address(regT1, FIELD_OFFSET(JSNumberCell, m_value))); + storeDouble(fpRegT0, Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value))); move(regT1, regT0); emitPutVirtualRegister(dst); wasJSNumberCell2 = jump(); @@ -1151,7 +1152,7 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u } // (1a) if we get here, src2 is also a number cell - loadDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT1); + loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1); Jump loadedDouble = jump(); // (1b) if we get here, src2 is an immediate op2imm.link(this); @@ -1159,7 +1160,7 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u convertInt32ToDouble(regT1, fpRegT1); // (1c) loadedDouble.link(this); - loadDouble(Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0); + loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0); if (opcodeID == op_add) addDouble(fpRegT1, fpRegT0); else if (opcodeID == op_sub) @@ -1168,11 +1169,11 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u ASSERT(opcodeID == op_mul); mulDouble(fpRegT1, fpRegT0); } - storeDouble(fpRegT0, Address(regT0, FIELD_OFFSET(JSNumberCell, m_value))); + storeDouble(fpRegT0, Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value))); emitPutVirtualRegister(dst); // Store the result to the JSNumberCell and jump. - storeDouble(fpRegT0, Address(regT0, FIELD_OFFSET(JSNumberCell, m_value))); + storeDouble(fpRegT0, Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value))); emitPutVirtualRegister(dst); wasJSNumberCell1 = jump(); diff --git a/src/3rdparty/webkit/JavaScriptCore/jit/JITCall.cpp b/src/3rdparty/webkit/JavaScriptCore/jit/JITCall.cpp index cf852be..abdb5ed 100644 --- a/src/3rdparty/webkit/JavaScriptCore/jit/JITCall.cpp +++ b/src/3rdparty/webkit/JavaScriptCore/jit/JITCall.cpp @@ -49,7 +49,7 @@ void JIT::compileOpCallInitializeCallFrame() { store32(regT1, Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register)))); - loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_data) + FIELD_OFFSET(ScopeChain, m_node)), regT1); // newScopeChain + loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain storePtr(ImmPtr(JSValue::encode(JSValue())), Address(callFrameRegister, RegisterFile::OptionalCalleeArguments * static_cast<int>(sizeof(Register)))); storePtr(regT2, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register)))); @@ -242,7 +242,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca // Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee storePtr(ImmPtr(JSValue::encode(JSValue())), Address(callFrameRegister, (registerOffset + RegisterFile::OptionalCalleeArguments) * static_cast<int>(sizeof(Register)))); storePtr(regT2, Address(callFrameRegister, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register)))); - loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_data) + FIELD_OFFSET(ScopeChain, m_node)), regT1); // newScopeChain + loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register)))); storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register)))); storePtr(regT1, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register)))); @@ -285,60 +285,32 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>: emitGetVirtualRegister(callee, regT2); } - move(Imm32(argCount), regT1); - // Speculatively roll the callframe, assuming argCount will match the arity. storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)))); addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister); + move(Imm32(argCount), regT1); - m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = - emitNakedCall(m_globalData->jitStubs.ctiVirtualCallPreLink()); + m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_globalData->jitStubs.ctiVirtualCallPreLink()); - Jump storeResultForFirstRun = jump(); + // Put the return value in dst. + emitPutVirtualRegister(dst); + sampleCodeBlock(m_codeBlock); - // This is the address for the cold path *after* the first run (which tries to link the call). - m_callStructureStubCompilationInfo[callLinkInfoIndex].coldPathOther = MacroAssembler::Label(this); + // If not, we need an extra case in the if below! + ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval)); - // The arguments have been set up on the hot path for op_call_eval - if (opcodeID == op_call) - compileOpCallSetupArgs(instruction); - else if (opcodeID == op_construct) - compileOpConstructSetupArgs(instruction); - - // Check for JSFunctions. - Jump isNotObject = emitJumpIfNotJSCell(regT2); - Jump isJSFunction = branchPtr(Equal, Address(regT2), ImmPtr(m_globalData->jsFunctionVPtr)); + // Done! - return back to the hot path. + if (opcodeID == op_construct) + emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_construct)); + else + emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_call)); // This handles host functions - isNotObject.link(this); callLinkFailNotObject.link(this); callLinkFailNotJSFunction.link(this); - JITStubCall stubCall(this, opcodeID == op_construct ? JITStubs::cti_op_construct_NotJSConstruct : JITStubs::cti_op_call_NotJSFunction); - stubCall.call(); - Jump wasNotJSFunction = jump(); - - // Next, handle JSFunctions... - isJSFunction.link(this); + JITStubCall(this, opcodeID == op_construct ? JITStubs::cti_op_construct_NotJSConstruct : JITStubs::cti_op_call_NotJSFunction).call(); - // First, in the case of a construct, allocate the new object. - if (opcodeID == op_construct) { - JITStubCall stubCall(this, JITStubs::cti_op_construct_JSConstruct); - stubCall.call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); - emitGetVirtualRegister(callee, regT2); - } - - // Speculatively roll the callframe, assuming argCount will match the arity. - storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)))); - addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister); - move(Imm32(argCount), regT1); - - emitNakedCall(m_globalData->jitStubs.ctiVirtualCall()); - - // Put the return value in dst. In the interpreter, op_ret does this. - wasNotJSFunction.link(this); - storeResultForFirstRun.link(this); emitPutVirtualRegister(dst); - sampleCodeBlock(m_codeBlock); } diff --git a/src/3rdparty/webkit/JavaScriptCore/jit/JITInlineMethods.h b/src/3rdparty/webkit/JavaScriptCore/jit/JITInlineMethods.h index d5de291..deca0d1 100644 --- a/src/3rdparty/webkit/JavaScriptCore/jit/JITInlineMethods.h +++ b/src/3rdparty/webkit/JavaScriptCore/jit/JITInlineMethods.h @@ -30,15 +30,6 @@ #if ENABLE(JIT) -#if PLATFORM(WIN) -#undef FIELD_OFFSET // Fix conflict with winnt.h. -#endif - -// FIELD_OFFSET: Like the C++ offsetof macro, but you can use it with classes. -// The magic number 0x4000 is insignificant. We use it to avoid using NULL, since -// NULL can cause compiler problems, especially in cases of multiple inheritance. -#define FIELD_OFFSET(class, field) (reinterpret_cast<ptrdiff_t>(&(reinterpret_cast<class*>(0x4000)->field)) - 0x4000) - namespace JSC { ALWAYS_INLINE void JIT::killLastResultRegister() @@ -186,6 +177,8 @@ ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function) return nakedCall; } +#if PLATFORM(X86) || PLATFORM(X86_64) + ALWAYS_INLINE void JIT::preverveReturnAddressAfterCall(RegisterID reg) { pop(reg); @@ -201,31 +194,52 @@ ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address) push(address); } +#elif PLATFORM_ARM_ARCH(7) + +ALWAYS_INLINE void JIT::preverveReturnAddressAfterCall(RegisterID reg) +{ + move(linkRegister, reg); +} + +ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg) +{ + move(reg, linkRegister); +} + +ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address) +{ + loadPtr(address, linkRegister); +} + +#endif + #if USE(JIT_STUB_ARGUMENT_VA_LIST) ALWAYS_INLINE void JIT::restoreArgumentReference() { - poke(callFrameRegister, offsetof(struct JITStackFrame, callFrame) / sizeof (void*)); + poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*)); } ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() {} #else ALWAYS_INLINE void JIT::restoreArgumentReference() { move(stackPointerRegister, firstArgumentRegister); - poke(callFrameRegister, offsetof(struct JITStackFrame, callFrame) / sizeof (void*)); + poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*)); } ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() { - // In the trampoline on x86-64, the first argument register is not overwritten. -#if !PLATFORM(X86_64) +#if PLATFORM(X86) + // Within a trampoline the return address will be on the stack at this point. + addPtr(Imm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister); +#elif PLATFORM_ARM_ARCH(7) move(stackPointerRegister, firstArgumentRegister); - addPtr(Imm32(sizeof(void*)), firstArgumentRegister); #endif + // In the trampoline on x86-64, the first argument register is not overwritten. } #endif ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure) { - return branchPtr(NotEqual, Address(reg, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(structure)); + return branchPtr(NotEqual, Address(reg, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(structure)); } ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg) diff --git a/src/3rdparty/webkit/JavaScriptCore/jit/JITOpcodes.cpp b/src/3rdparty/webkit/JavaScriptCore/jit/JITOpcodes.cpp index 1737551..dbcb34d 100644 --- a/src/3rdparty/webkit/JavaScriptCore/jit/JITOpcodes.cpp +++ b/src/3rdparty/webkit/JavaScriptCore/jit/JITOpcodes.cpp @@ -159,18 +159,18 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction) emitJumpSlowCaseIfNotJSCell(regT1); // Check that baseVal is an object, that it 'ImplementsHasInstance' but that it does not 'OverridesHasInstance'. - loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT0); - addSlowCase(branch32(NotEqual, Address(regT0, FIELD_OFFSET(Structure, m_typeInfo.m_type)), Imm32(ObjectType))); - addSlowCase(branchTest32(Zero, Address(regT0, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(ImplementsDefaultHasInstance))); + loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT0); + addSlowCase(branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType))); + addSlowCase(branchTest32(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsDefaultHasInstance))); // If value is not an Object, return false. Jump valueIsImmediate = emitJumpIfNotJSCell(regT2); - loadPtr(Address(regT2, FIELD_OFFSET(JSCell, m_structure)), regT0); - Jump valueIsNotObject = branch32(NotEqual, Address(regT0, FIELD_OFFSET(Structure, m_typeInfo.m_type)), Imm32(ObjectType)); + loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT0); + Jump valueIsNotObject = branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)); // Check proto is object. - loadPtr(Address(regT1, FIELD_OFFSET(JSCell, m_structure)), regT0); - addSlowCase(branch32(NotEqual, Address(regT0, FIELD_OFFSET(Structure, m_typeInfo.m_type)), Imm32(ObjectType))); + loadPtr(Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), regT0); + addSlowCase(branch32(NotEqual, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType))); // Optimistically load the result true, and start looping. // Initially, regT1 still contains proto and regT2 still contains value. @@ -180,8 +180,8 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction) // Load the prototype of the object in regT2. If this is equal to regT1 - WIN! // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again. - loadPtr(Address(regT2, FIELD_OFFSET(JSCell, m_structure)), regT2); - loadPtr(Address(regT2, FIELD_OFFSET(Structure, m_prototype)), regT2); + loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); + loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2); Jump isInstance = branchPtr(Equal, regT2, regT1); branchPtr(NotEqual, regT2, ImmPtr(JSValue::encode(jsNull())), loop); @@ -251,9 +251,9 @@ void JIT::emit_op_get_scoped_var(Instruction* currentInstruction) emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0); while (skip--) - loadPtr(Address(regT0, FIELD_OFFSET(ScopeChainNode, next)), regT0); + loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0); - loadPtr(Address(regT0, FIELD_OFFSET(ScopeChainNode, object)), regT0); + loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, object)), regT0); emitGetVariableObjectRegister(regT0, currentInstruction[2].u.operand, regT0); emitPutVirtualRegister(currentInstruction[1].u.operand); } @@ -265,9 +265,9 @@ void JIT::emit_op_put_scoped_var(Instruction* currentInstruction) emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1); emitGetVirtualRegister(currentInstruction[3].u.operand, regT0); while (skip--) - loadPtr(Address(regT1, FIELD_OFFSET(ScopeChainNode, next)), regT1); + loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, next)), regT1); - loadPtr(Address(regT1, FIELD_OFFSET(ScopeChainNode, object)), regT1); + loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, object)), regT1); emitPutVariableObjectRegister(regT0, regT1, currentInstruction[1].u.operand); } @@ -327,8 +327,8 @@ void JIT::emit_op_construct_verify(Instruction* currentInstruction) emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); emitJumpSlowCaseIfNotJSCell(regT0); - loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2); - addSlowCase(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType))); + loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); + addSlowCase(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType))); } @@ -407,11 +407,11 @@ void JIT::emit_op_resolve_global(Instruction* currentInstruction) // Check Structure of global object move(ImmPtr(globalObject), regT0); loadPtr(structureAddress, regT1); - Jump noMatch = branchPtr(NotEqual, regT1, Address(regT0, FIELD_OFFSET(JSCell, m_structure))); // Structures don't match + Jump noMatch = branchPtr(NotEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure))); // Structures don't match // Load cached property // Assume that the global object always uses external storage. - loadPtr(Address(regT0, FIELD_OFFSET(JSGlobalObject, m_externalStorage)), regT0); + loadPtr(Address(regT0, OBJECT_OFFSETOF(JSGlobalObject, m_externalStorage)), regT0); load32(offsetAddr, regT1); loadPtr(BaseIndex(regT0, regT1, ScalePtr), regT0); emitPutVirtualRegister(currentInstruction[1].u.operand); @@ -459,8 +459,8 @@ void JIT::emit_op_jeq_null(Instruction* currentInstruction) Jump isImmediate = emitJumpIfNotJSCell(regT0); // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure. - loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2); - addJump(branchTest32(NonZero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2); + loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); + addJump(branchTest32(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2); Jump wasNotImmediate = jump(); // Now handle the immediate cases - undefined & null @@ -480,8 +480,8 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction) Jump isImmediate = emitJumpIfNotJSCell(regT0); // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure. - loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2); - addJump(branchTest32(Zero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2); + loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); + addJump(branchTest32(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2); Jump wasNotImmediate = jump(); // Now handle the immediate cases - undefined & null @@ -670,8 +670,8 @@ void JIT::emit_op_to_jsnumber(Instruction* currentInstruction) Jump wasImmediate = emitJumpIfImmediateInteger(regT0); emitJumpSlowCaseIfNotJSCell(regT0, srcVReg); - loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2); - addSlowCase(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_type)), Imm32(NumberType))); + loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); + addSlowCase(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(NumberType))); wasImmediate.link(this); @@ -689,7 +689,7 @@ void JIT::emit_op_push_new_scope(Instruction* currentInstruction) void JIT::emit_op_catch(Instruction* currentInstruction) { killLastResultRegister(); // FIXME: Implicitly treat op_catch as a labeled statement, and remove this line of code. - peek(callFrameRegister, offsetof(struct JITStackFrame, callFrame) / sizeof (void*)); + peek(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*)); emitPutVirtualRegister(currentInstruction[1].u.operand); } @@ -781,8 +781,8 @@ void JIT::emit_op_eq_null(Instruction* currentInstruction) emitGetVirtualRegister(src1, regT0); Jump isImmediate = emitJumpIfNotJSCell(regT0); - loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2); - setTest32(NonZero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0); + loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); + setTest32(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0); Jump wasNotImmediate = jump(); @@ -806,8 +806,8 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction) emitGetVirtualRegister(src1, regT0); Jump isImmediate = emitJumpIfNotJSCell(regT0); - loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2); - setTest32(Zero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0); + loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); + setTest32(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0); Jump wasNotImmediate = jump(); @@ -866,14 +866,14 @@ void JIT::emit_op_convert_this(Instruction* currentInstruction) emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); emitJumpSlowCaseIfNotJSCell(regT0); - loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT1); - addSlowCase(branchTest32(NonZero, Address(regT1, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion))); + loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1); + addSlowCase(branchTest32(NonZero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion))); } void JIT::emit_op_profile_will_call(Instruction* currentInstruction) { - peek(regT1, FIELD_OFFSET(JITStackFrame, enabledProfilerReference) / sizeof (void*)); + peek(regT1, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof (void*)); Jump noProfiler = branchTestPtr(Zero, Address(regT1)); JITStubCall stubCall(this, JITStubs::cti_op_profile_will_call); @@ -885,7 +885,7 @@ void JIT::emit_op_profile_will_call(Instruction* currentInstruction) void JIT::emit_op_profile_did_call(Instruction* currentInstruction) { - peek(regT1, FIELD_OFFSET(JITStackFrame, enabledProfilerReference) / sizeof (void*)); + peek(regT1, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof (void*)); Jump noProfiler = branchTestPtr(Zero, Address(regT1)); JITStubCall stubCall(this, JITStubs::cti_op_profile_did_call); @@ -943,10 +943,10 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas // This is slow void JIT::emitSlow_that handles accesses to arrays above the fast cut-off. // First, check if this is an access to the vector linkSlowCase(iter); - branch32(AboveOrEqual, regT1, Address(regT2, FIELD_OFFSET(ArrayStorage, m_vectorLength)), beginGetByValSlow); + branch32(AboveOrEqual, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength)), beginGetByValSlow); // okay, missed the fast region, but it is still in the vector. Get the value. - loadPtr(BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])), regT2); + loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT2); // Check whether the value loaded is zero; if so we need to return undefined. branchTestPtr(Zero, regT2, beginGetByValSlow); move(regT2, regT0); diff --git a/src/3rdparty/webkit/JavaScriptCore/jit/JITPropertyAccess.cpp b/src/3rdparty/webkit/JavaScriptCore/jit/JITPropertyAccess.cpp index 29b9cab..ed8f48f 100644 --- a/src/3rdparty/webkit/JavaScriptCore/jit/JITPropertyAccess.cpp +++ b/src/3rdparty/webkit/JavaScriptCore/jit/JITPropertyAccess.cpp @@ -64,11 +64,11 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction) addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr))); // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff - loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2); - addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, FIELD_OFFSET(JSArray, m_fastAccessCutoff)))); + loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2); + addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff)))); // Get the value from the vector - loadPtr(BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])), regT0); + loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); emitPutVirtualRegister(currentInstruction[1].u.operand); } @@ -86,19 +86,19 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction) addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr))); // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff - loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2); - Jump inFastVector = branch32(Below, regT1, Address(regT0, FIELD_OFFSET(JSArray, m_fastAccessCutoff))); + loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2); + Jump inFastVector = branch32(Below, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff))); // No; oh well, check if the access if within the vector - if so, we may still be okay. - addSlowCase(branch32(AboveOrEqual, regT1, Address(regT2, FIELD_OFFSET(ArrayStorage, m_vectorLength)))); + addSlowCase(branch32(AboveOrEqual, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength)))); // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location. // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff. - addSlowCase(branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])))); + addSlowCase(branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])))); // All good - put the value into the array. inFastVector.link(this); emitGetVirtualRegister(currentInstruction[3].u.operand, regT0); - storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0]))); + storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); } void JIT::emit_op_put_by_index(Instruction* currentInstruction) @@ -212,9 +212,9 @@ void JIT::emit_op_method_check(Instruction* currentInstruction) m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex)); MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last(); Jump notCell = emitJumpIfNotJSCell(regT0); - Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, FIELD_OFFSET(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); + Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT1); - Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT1, FIELD_OFFSET(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); + Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); // This will be relinked to load the function without doing a load. DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0); @@ -285,13 +285,12 @@ void JIT::compileGetByIdHotPath(int, int baseVReg, Identifier*, unsigned propert m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin; DataLabelPtr structureToCompare; - Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, FIELD_OFFSET(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); + Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); addSlowCase(structureCheck); ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure); ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase); - Label externalLoad(this); - loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_externalStorage)), regT0); + Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0); Label externalLoadComplete(this); ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetGetByIdExternalLoad); ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthGetByIdExternalLoad); @@ -358,12 +357,11 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction) // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over. DataLabelPtr structureToCompare; - addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, FIELD_OFFSET(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)))); + addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)))); ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure); // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used. - Label externalLoad(this); - loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_externalStorage)), regT0); + Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0); Label externalLoadComplete(this); ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetPutByIdExternalLoad); ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad); @@ -398,9 +396,9 @@ void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, Structure* s { int offset = cachedOffset * sizeof(JSValue); if (structure->isUsingInlineStorage()) - offset += FIELD_OFFSET(JSObject, m_inlineStorage); + offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage); else - loadPtr(Address(base, FIELD_OFFSET(JSObject, m_externalStorage)), base); + loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base); storePtr(value, Address(base, offset)); } @@ -409,34 +407,37 @@ void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, Structure* { int offset = cachedOffset * sizeof(JSValue); if (structure->isUsingInlineStorage()) - offset += FIELD_OFFSET(JSObject, m_inlineStorage); + offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage); else - loadPtr(Address(base, FIELD_OFFSET(JSObject, m_externalStorage)), base); + loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base); loadPtr(Address(base, offset), result); } -void JIT::compileGetDirectOffset(JSObject* base, RegisterID result, size_t cachedOffset) +void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID result, size_t cachedOffset) { if (base->isUsingInlineStorage()) loadPtr(static_cast<void*>(&base->m_inlineStorage[cachedOffset]), result); - else - loadPtr(static_cast<void*>(&base->m_externalStorage[cachedOffset]), result); + else { + PropertyStorage* protoPropertyStorage = &base->m_externalStorage; + loadPtr(static_cast<void*>(protoPropertyStorage), temp); + loadPtr(Address(temp, cachedOffset * sizeof(JSValue)), result); + } } -void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ProcessorReturnAddress returnAddress) +void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress) { JumpList failureCases; // Check eax is an object of the right Structure. failureCases.append(emitJumpIfNotJSCell(regT0)); - failureCases.append(branchPtr(NotEqual, Address(regT0, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(oldStructure))); + failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure))); JumpList successCases; // ecx = baseObject - loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2); + loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); // proto(ecx) = baseObject->structure()->prototype() - failureCases.append(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType))); + failureCases.append(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType))); - loadPtr(Address(regT2, FIELD_OFFSET(Structure, m_prototype)), regT2); + loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2); // ecx = baseObject->m_structure for (RefPtr<Structure>* it = chain->head(); *it; ++it) { @@ -444,11 +445,11 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure successCases.append(branchPtr(Equal, regT2, ImmPtr(JSValue::encode(jsNull())))); // Check the structure id - failureCases.append(branchPtr(NotEqual, Address(regT2, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(it->get()))); + failureCases.append(branchPtr(NotEqual, Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(it->get()))); - loadPtr(Address(regT2, FIELD_OFFSET(JSCell, m_structure)), regT2); - failureCases.append(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType))); - loadPtr(Address(regT2, FIELD_OFFSET(Structure, m_prototype)), regT2); + loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); + failureCases.append(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType))); + loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2); } successCases.link(this); @@ -477,7 +478,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure // codeblock should ensure oldStructure->m_refCount > 0 sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount())); add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount())); - storePtr(ImmPtr(newStructure), Address(regT0, FIELD_OFFSET(JSCell, m_structure))); + storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure))); // write the value compilePutDirectOffset(regT0, regT1, newStructure, cachedOffset); @@ -489,7 +490,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure restoreArgumentReferenceForTrampoline(); Call failureCall = tailRecursiveCall(); - PatchBuffer patchBuffer(this, m_codeBlock->executablePool()); + LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); patchBuffer.link(failureCall, FunctionPtr(JITStubs::cti_op_put_by_id_fail)); @@ -500,77 +501,81 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); stubInfo->stubRoutine = entryLabel; - returnAddress.relinkCallerToTrampoline(entryLabel); + RepatchBuffer repatchBuffer; + repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel); } -void JIT::patchGetByIdSelf(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress) +void JIT::patchGetByIdSelf(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress) { + RepatchBuffer repatchBuffer; + // We don't want to patch more than once - in future go to cti_op_get_by_id_generic. // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now. - returnAddress.relinkCallerToFunction(FunctionPtr(JITStubs::cti_op_get_by_id_self_fail)); + repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_self_fail)); int offset = sizeof(JSValue) * cachedOffset; // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load // and makes the subsequent load's offset automatically correct if (structure->isUsingInlineStorage()) - stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad).repatchLoadPtrToLEA(); + repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad)); // Patch the offset into the propoerty map to load from, then patch the Structure to look for. - stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure).repatch(structure); - stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset).repatch(offset); + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure); + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset), offset); } void JIT::patchMethodCallProto(MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto) { + RepatchBuffer repatchBuffer; + ASSERT(!methodCallLinkInfo.cachedStructure); methodCallLinkInfo.cachedStructure = structure; structure->ref(); - methodCallLinkInfo.structureLabel.repatch(structure); - methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj).repatch(proto); - methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct).repatch(proto->structure()); - methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction).repatch(callee); + repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure); + repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto); + repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), proto->structure()); + repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee); } -void JIT::patchPutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress) +void JIT::patchPutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress) { + RepatchBuffer repatchBuffer; + // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. // Should probably go to JITStubs::cti_op_put_by_id_fail, but that doesn't do anything interesting right now. - returnAddress.relinkCallerToFunction(FunctionPtr(JITStubs::cti_op_put_by_id_generic)); + repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(JITStubs::cti_op_put_by_id_generic)); int offset = sizeof(JSValue) * cachedOffset; // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load // and makes the subsequent load's offset automatically correct if (structure->isUsingInlineStorage()) - stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad).repatchLoadPtrToLEA(); + repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad)); // Patch the offset into the propoerty map to load from, then patch the Structure to look for. - stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure).repatch(structure); - stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset).repatch(offset); + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure); + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset), offset); } -void JIT::privateCompilePatchGetArrayLength(ProcessorReturnAddress returnAddress) +void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress) { - StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress.addressForLookup()); - - // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. - returnAddress.relinkCallerToFunction(FunctionPtr(JITStubs::cti_op_get_by_id_array_fail)); + StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress); // Check eax is an array Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)); // Checks out okay! - get the length from the storage - loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2); - load32(Address(regT2, FIELD_OFFSET(ArrayStorage, m_length)), regT2); + loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2); + load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2); Jump failureCases2 = branch32(Above, regT2, Imm32(JSImmediate::maxImmediateInt)); emitFastArithIntToImmNoCheck(regT2, regT0); Jump success = jump(); - PatchBuffer patchBuffer(this, m_codeBlock->executablePool()); + LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); // Use the patch information to link the failure cases back to the original slow case routine. CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall); @@ -586,14 +591,15 @@ void JIT::privateCompilePatchGetArrayLength(ProcessorReturnAddress returnAddress // Finally patch the jump to slow case back in the hot path to jump here instead. CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); - jumpLocation.relink(entryLabel); -} + RepatchBuffer repatchBuffer; + repatchBuffer.relink(jumpLocation, entryLabel); -void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ProcessorReturnAddress returnAddress, CallFrame* callFrame) -{ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. - returnAddress.relinkCallerToFunction(FunctionPtr(JITStubs::cti_op_get_by_id_proto_list)); + repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_array_fail)); +} +void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame) +{ // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is // referencing the prototype object - let's speculatively load it's table nice and early!) JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame)); @@ -611,11 +617,11 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str #endif // Checks out okay! - getDirectOffset - compileGetDirectOffset(protoObject, regT0, cachedOffset); + compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset); Jump success = jump(); - PatchBuffer patchBuffer(this, m_codeBlock->executablePool()); + LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); // Use the patch information to link the failure cases back to the original slow case routine. CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall); @@ -631,7 +637,11 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str // Finally patch the jump to slow case back in the hot path to jump here instead. CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); - jumpLocation.relink(entryLabel); + RepatchBuffer repatchBuffer; + repatchBuffer.relink(jumpLocation, entryLabel); + + // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. + repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_proto_list)); } void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset) @@ -640,7 +650,7 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic compileGetDirectOffset(regT0, regT0, structure, cachedOffset); Jump success = jump(); - PatchBuffer patchBuffer(this, m_codeBlock->executablePool()); + LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); // Use the patch information to link the failure cases back to the original slow case routine. CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine; @@ -659,7 +669,8 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic // Finally patch the jump to slow case back in the hot path to jump here instead. CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); - jumpLocation.relink(entryLabel); + RepatchBuffer repatchBuffer; + repatchBuffer.relink(jumpLocation, entryLabel); } void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame) @@ -681,11 +692,11 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi #endif // Checks out okay! - getDirectOffset - compileGetDirectOffset(protoObject, regT0, cachedOffset); + compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset); Jump success = jump(); - PatchBuffer patchBuffer(this, m_codeBlock->executablePool()); + LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); // Use the patch information to link the failure cases back to the original slow case routine. CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine; @@ -703,7 +714,8 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi // Finally patch the jump to slow case back in the hot path to jump here instead. CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); - jumpLocation.relink(entryLabel); + RepatchBuffer repatchBuffer; + repatchBuffer.relink(jumpLocation, entryLabel); } void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame) @@ -734,10 +746,10 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi } ASSERT(protoObject); - compileGetDirectOffset(protoObject, regT0, cachedOffset); + compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset); Jump success = jump(); - PatchBuffer patchBuffer(this, m_codeBlock->executablePool()); + LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); // Use the patch information to link the failure cases back to the original slow case routine. CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine; @@ -756,14 +768,12 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi // Finally patch the jump to slow case back in the hot path to jump here instead. CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); - jumpLocation.relink(entryLabel); + RepatchBuffer repatchBuffer; + repatchBuffer.relink(jumpLocation, entryLabel); } -void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ProcessorReturnAddress returnAddress, CallFrame* callFrame) +void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame) { - // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. - returnAddress.relinkCallerToFunction(FunctionPtr(JITStubs::cti_op_get_by_id_proto_list)); - ASSERT(count); JumpList bucketsOfFail; @@ -789,10 +799,10 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str } ASSERT(protoObject); - compileGetDirectOffset(protoObject, regT0, cachedOffset); + compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset); Jump success = jump(); - PatchBuffer patchBuffer(this, m_codeBlock->executablePool()); + LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); // Use the patch information to link the failure cases back to the original slow case routine. patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall)); @@ -806,7 +816,11 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str // Finally patch the jump to slow case back in the hot path to jump here instead. CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); - jumpLocation.relink(entryLabel); + RepatchBuffer repatchBuffer; + repatchBuffer.relink(jumpLocation, entryLabel); + + // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. + repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_proto_list)); } /* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */ diff --git a/src/3rdparty/webkit/JavaScriptCore/jit/JITStubCall.h b/src/3rdparty/webkit/JavaScriptCore/jit/JITStubCall.h index 6c9ccc1..2dc69c1 100644 --- a/src/3rdparty/webkit/JavaScriptCore/jit/JITStubCall.h +++ b/src/3rdparty/webkit/JavaScriptCore/jit/JITStubCall.h @@ -107,9 +107,8 @@ namespace JSC { JIT::Call call() { - ASSERT(m_jit->m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set. - #if ENABLE(OPCODE_SAMPLING) + ASSERT(m_jit->m_bytecodeIndex != (unsigned)-1); m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeIndex, true); #endif diff --git a/src/3rdparty/webkit/JavaScriptCore/jit/JITStubs.cpp b/src/3rdparty/webkit/JavaScriptCore/jit/JITStubs.cpp index 41ebd20..02bf7c0 100644 --- a/src/3rdparty/webkit/JavaScriptCore/jit/JITStubs.cpp +++ b/src/3rdparty/webkit/JavaScriptCore/jit/JITStubs.cpp @@ -77,7 +77,7 @@ COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x38, JITStackFrame_ COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x30, JITStackFrame_code_offset_matches_ctiTrampoline); COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x1c, JITStackFrame_stub_argument_space_matches_ctiTrampoline); -asm( +asm volatile ( ".globl " SYMBOL_STRING(ctiTrampoline) "\n" SYMBOL_STRING(ctiTrampoline) ":" "\n" "pushl %ebp" "\n" @@ -97,13 +97,22 @@ SYMBOL_STRING(ctiTrampoline) ":" "\n" "ret" "\n" ); -asm( +asm volatile ( ".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n" SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n" #if !USE(JIT_STUB_ARGUMENT_VA_LIST) "movl %esp, %ecx" "\n" #endif "call " SYMBOL_STRING(cti_vm_throw) "\n" + "addl $0x1c, %esp" "\n" + "popl %ebx" "\n" + "popl %edi" "\n" + "popl %esi" "\n" + "popl %ebp" "\n" + "ret" "\n" +); + +asm volatile ( ".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n" SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n" "addl $0x1c, %esp" "\n" @@ -126,7 +135,7 @@ COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x90, JITStackFrame_ COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x80, JITStackFrame_code_offset_matches_ctiTrampoline); COMPILE_ASSERT(offsetof(struct JITStackFrame, savedRBX) == 0x48, JITStackFrame_stub_argument_space_matches_ctiTrampoline); -asm( +asm volatile ( ".globl " SYMBOL_STRING(ctiTrampoline) "\n" SYMBOL_STRING(ctiTrampoline) ":" "\n" "pushq %rbp" "\n" @@ -152,11 +161,22 @@ SYMBOL_STRING(ctiTrampoline) ":" "\n" "ret" "\n" ); -asm( +asm volatile ( ".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n" SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n" "movq %rsp, %rdi" "\n" "call " SYMBOL_STRING(cti_vm_throw) "\n" + "addq $0x48, %rsp" "\n" + "popq %rbx" "\n" + "popq %r15" "\n" + "popq %r14" "\n" + "popq %r13" "\n" + "popq %r12" "\n" + "popq %rbp" "\n" + "ret" "\n" +); + +asm volatile ( ".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n" SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n" "addq $0x48, %rsp" "\n" @@ -169,6 +189,70 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n" "ret" "\n" ); +#elif COMPILER(GCC) && PLATFORM_ARM_ARCH(7) + +#if USE(JIT_STUB_ARGUMENT_VA_LIST) +#error "JIT_STUB_ARGUMENT_VA_LIST not supported on ARMv7." +#endif + +asm volatile ( +".text" "\n" +".align 2" "\n" +".globl " SYMBOL_STRING(ctiTrampoline) "\n" +".thumb" "\n" +".thumb_func " SYMBOL_STRING(ctiTrampoline) "\n" +SYMBOL_STRING(ctiTrampoline) ":" "\n" + "sub sp, sp, #0x3c" "\n" + "str lr, [sp, #0x20]" "\n" + "str r4, [sp, #0x24]" "\n" + "str r5, [sp, #0x28]" "\n" + "str r6, [sp, #0x2c]" "\n" + "str r1, [sp, #0x30]" "\n" + "str r2, [sp, #0x34]" "\n" + "str r3, [sp, #0x38]" "\n" + "cpy r5, r2" "\n" + "mov r6, #512" "\n" + "blx r0" "\n" + "ldr r6, [sp, #0x2c]" "\n" + "ldr r5, [sp, #0x28]" "\n" + "ldr r4, [sp, #0x24]" "\n" + "ldr lr, [sp, #0x20]" "\n" + "add sp, sp, #0x3c" "\n" + "bx lr" "\n" +); + +asm volatile ( +".text" "\n" +".align 2" "\n" +".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n" +".thumb" "\n" +".thumb_func " SYMBOL_STRING(ctiVMThrowTrampoline) "\n" +SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n" + "cpy r0, sp" "\n" + "bl " SYMBOL_STRING(cti_vm_throw) "\n" + "ldr r6, [sp, #0x2c]" "\n" + "ldr r5, [sp, #0x28]" "\n" + "ldr r4, [sp, #0x24]" "\n" + "ldr lr, [sp, #0x20]" "\n" + "add sp, sp, #0x3c" "\n" + "bx lr" "\n" +); + +asm volatile ( +".text" "\n" +".align 2" "\n" +".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n" +".thumb" "\n" +".thumb_func " SYMBOL_STRING(ctiOpThrowNotCaught) "\n" +SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n" + "ldr r6, [sp, #0x2c]" "\n" + "ldr r5, [sp, #0x28]" "\n" + "ldr r4, [sp, #0x24]" "\n" + "ldr lr, [sp, #0x20]" "\n" + "add sp, sp, #0x3c" "\n" + "bx lr" "\n" +); + #elif COMPILER(MSVC) #if USE(JIT_STUB_ARGUMENT_VA_LIST) @@ -218,18 +302,18 @@ extern "C" { ret; } } - - __declspec(naked) void ctiOpThrowNotCaught() - { - __asm { - add esp, 0x1c; - pop ebx; - pop edi; - pop esi; - pop ebp; - ret; - } - } + + __declspec(naked) void ctiOpThrowNotCaught() + { + __asm { + add esp, 0x1c; + pop ebx; + pop edi; + pop esi; + pop ebp; + ret; + } + } } #endif @@ -243,11 +327,29 @@ extern "C" { JITThunks::JITThunks(JSGlobalData* globalData) { JIT::compileCTIMachineTrampolines(globalData, &m_executablePool, &m_ctiArrayLengthTrampoline, &m_ctiStringLengthTrampoline, &m_ctiVirtualCallPreLink, &m_ctiVirtualCallLink, &m_ctiVirtualCall, &m_ctiNativeCallThunk); + +#if PLATFORM_ARM_ARCH(7) + // Unfortunate the arm compiler does not like the use of offsetof on JITStackFrame (since it contains non POD types), + // and the OBJECT_OFFSETOF macro does not appear constantish enough for it to be happy with its use in COMPILE_ASSERT + // macros. + ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedReturnAddress) == 0x20); + ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR4) == 0x24); + ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR5) == 0x28); + ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR6) == 0x2c); + + ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, registerFile) == 0x30); + ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, callFrame) == 0x34); + ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, exception) == 0x38); + // The fifth argument is the first item already on the stack. + ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, enabledProfilerReference) == 0x3c); + + ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, thunkReturnAddress) == 0x1C); +#endif } #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) -NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* codeBlock, void* returnAddress, JSValue baseValue, const PutPropertySlot& slot) +NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot& slot) { // The interpreter checks for recursion here; I do not believe this can occur in CTI. @@ -281,6 +383,10 @@ NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* co // Structure transition, cache transition info if (slot.type() == PutPropertySlot::NewProperty) { StructureChain* prototypeChain = structure->prototypeChain(callFrame); + if (!prototypeChain->isCacheable()) { + ctiPatchCallByReturnAddress(returnAddress, FunctionPtr(JITStubs::cti_op_put_by_id_generic)); + return; + } stubInfo->initPutByIdTransition(structure->previousID(), structure, prototypeChain); JIT::compilePutByIdTransition(callFrame->scopeChain()->globalData, codeBlock, stubInfo, structure->previousID(), structure, slot.cachedOffset(), prototypeChain, returnAddress); return; @@ -291,7 +397,7 @@ NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* co JIT::patchPutByIdReplace(stubInfo, structure, slot.cachedOffset(), returnAddress); } -NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* codeBlock, void* returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot) +NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot) { // FIXME: Write a test that proves we need to check for recursion here just // like the interpreter does, then add a check for recursion. @@ -368,6 +474,10 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co } StructureChain* prototypeChain = structure->prototypeChain(callFrame); + if (!prototypeChain->isCacheable()) { + ctiPatchCallByReturnAddress(returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_generic)); + return; + } stubInfo->initGetByIdChain(structure, prototypeChain); JIT::compileGetByIdChain(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, prototypeChain, count, slot.cachedOffset(), returnAddress); } @@ -398,7 +508,7 @@ struct StackHack { : stackFrame(stackFrame) , savedReturnAddress(*stackFrame.returnAddressSlot()) { - *stackFrame.returnAddressSlot() = reinterpret_cast<void*>(jscGeneratedNativeCode); + *stackFrame.returnAddressSlot() = ReturnAddressPtr(FunctionPtr(jscGeneratedNativeCode)); } ALWAYS_INLINE ~StackHack() @@ -407,17 +517,17 @@ struct StackHack { } JITStackFrame& stackFrame; - void* savedReturnAddress; + ReturnAddressPtr savedReturnAddress; }; #define STUB_INIT_STACK_FRAME(stackFrame) SETUP_VA_LISTL_ARGS; JITStackFrame& stackFrame = *reinterpret_cast<JITStackFrame*>(STUB_ARGS); StackHack stackHack(stackFrame) -#define STUB_SET_RETURN_ADDRESS(returnAddress) stackHack.savedReturnAddress = returnAddress +#define STUB_SET_RETURN_ADDRESS(returnAddress) stackHack.savedReturnAddress = ReturnAddressPtr(returnAddress) #define STUB_RETURN_ADDRESS stackHack.savedReturnAddress #else #define STUB_INIT_STACK_FRAME(stackFrame) SETUP_VA_LISTL_ARGS; JITStackFrame& stackFrame = *reinterpret_cast<JITStackFrame*>(STUB_ARGS) -#define STUB_SET_RETURN_ADDRESS(returnAddress) *stackFrame.returnAddressSlot() = returnAddress +#define STUB_SET_RETURN_ADDRESS(returnAddress) *stackFrame.returnAddressSlot() = ReturnAddressPtr(returnAddress) #define STUB_RETURN_ADDRESS *stackFrame.returnAddressSlot() #endif @@ -426,14 +536,14 @@ struct StackHack { // to get the address of the ctiVMThrowTrampoline function. It's also // good to keep the code size down by leaving as much of the exception // handling code out of line as possible. -static NEVER_INLINE void returnToThrowTrampoline(JSGlobalData* globalData, void* exceptionLocation, void*& returnAddressSlot) +static NEVER_INLINE void returnToThrowTrampoline(JSGlobalData* globalData, ReturnAddressPtr exceptionLocation, ReturnAddressPtr& returnAddressSlot) { ASSERT(globalData->exception); globalData->exceptionLocation = exceptionLocation; - returnAddressSlot = reinterpret_cast<void*>(ctiVMThrowTrampoline); + returnAddressSlot = ReturnAddressPtr(FunctionPtr(ctiVMThrowTrampoline)); } -static NEVER_INLINE void throwStackOverflowError(CallFrame* callFrame, JSGlobalData* globalData, void* exceptionLocation, void*& returnAddressSlot) +static NEVER_INLINE void throwStackOverflowError(CallFrame* callFrame, JSGlobalData* globalData, ReturnAddressPtr exceptionLocation, ReturnAddressPtr& returnAddressSlot) { globalData->exception = createStackOverflowError(callFrame); returnToThrowTrampoline(globalData, exceptionLocation, returnAddressSlot); @@ -467,7 +577,29 @@ static NEVER_INLINE void throwStackOverflowError(CallFrame* callFrame, JSGlobalD namespace JITStubs { +#if PLATFORM_ARM_ARCH(7) + +#define DEFINE_STUB_FUNCTION(rtype, op) \ + extern "C" { \ + rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \ + }; \ + asm volatile ( \ + ".text" "\n" \ + ".align 2" "\n" \ + ".globl " SYMBOL_STRING(cti_##op) "\n" \ + ".thumb" "\n" \ + ".thumb_func " SYMBOL_STRING(cti_##op) "\n" \ + SYMBOL_STRING(cti_##op) ":" "\n" \ + "str lr, [sp, #0x1c]" "\n" \ + "bl " SYMBOL_STRING(JITStubThunked_##op) "\n" \ + "ldr lr, [sp, #0x1c]" "\n" \ + "bx lr" "\n" \ + ); \ + rtype JITStubThunked_##op(STUB_ARGS_DECLARATION) \ + +#else #define DEFINE_STUB_FUNCTION(rtype, op) rtype JIT_STUB cti_##op(STUB_ARGS_DECLARATION) +#endif DEFINE_STUB_FUNCTION(JSObject*, op_convert_this) { @@ -573,7 +705,7 @@ DEFINE_STUB_FUNCTION(void, register_file_check) // moved the call frame forward. CallFrame* oldCallFrame = stackFrame.callFrame->callerFrame(); stackFrame.callFrame = oldCallFrame; - throwStackOverflowError(oldCallFrame, stackFrame.globalData, oldCallFrame->returnPC(), STUB_RETURN_ADDRESS); + throwStackOverflowError(oldCallFrame, stackFrame.globalData, ReturnAddressPtr(oldCallFrame->returnPC()), STUB_RETURN_ADDRESS); } DEFINE_STUB_FUNCTION(int, op_loop_if_less) @@ -832,7 +964,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_self_fail) if (stubInfo->opcodeID == op_get_by_id_self) { ASSERT(!stubInfo->stubRoutine); - polymorphicStructureList = new PolymorphicAccessStructureList(MacroAssembler::CodeLocationLabel(), stubInfo->u.getByIdSelf.baseObjectStructure); + polymorphicStructureList = new PolymorphicAccessStructureList(CodeLocationLabel(), stubInfo->u.getByIdSelf.baseObjectStructure); stubInfo->initGetByIdSelfList(polymorphicStructureList, 2); } else { polymorphicStructureList = stubInfo->u.getByIdSelfList.structureList; @@ -858,12 +990,12 @@ static PolymorphicAccessStructureList* getPolymorphicAccessStructureListSlot(Str switch (stubInfo->opcodeID) { case op_get_by_id_proto: prototypeStructureList = new PolymorphicAccessStructureList(stubInfo->stubRoutine, stubInfo->u.getByIdProto.baseObjectStructure, stubInfo->u.getByIdProto.prototypeStructure); - stubInfo->stubRoutine.reset(); + stubInfo->stubRoutine = CodeLocationLabel(); stubInfo->initGetByIdProtoList(prototypeStructureList, 2); break; case op_get_by_id_chain: prototypeStructureList = new PolymorphicAccessStructureList(stubInfo->stubRoutine, stubInfo->u.getByIdChain.baseObjectStructure, stubInfo->u.getByIdChain.chain); - stubInfo->stubRoutine.reset(); + stubInfo->stubRoutine = CodeLocationLabel(); stubInfo->initGetByIdProtoList(prototypeStructureList, 2); break; case op_get_by_id_proto_list: @@ -919,9 +1051,15 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list) if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1)) ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full)); } else if (size_t count = countPrototypeChainEntriesAndCheckForProxies(callFrame, baseValue, slot)) { + StructureChain* protoChain = structure->prototypeChain(callFrame); + if (!protoChain->isCacheable()) { + ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail)); + return JSValue::encode(result); + } + int listIndex; PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(stubInfo, listIndex); - JIT::compileGetByIdChainList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, structure->prototypeChain(callFrame), count, slot.cachedOffset()); + JIT::compileGetByIdChainList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, protoChain, count, slot.cachedOffset()); if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1)) ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full)); @@ -1072,6 +1210,7 @@ DEFINE_STUB_FUNCTION(void*, op_call_JSFunction) #endif JSFunction* function = asFunction(stackFrame.args[0].jsValue()); + ASSERT(!function->isHostFunction()); FunctionBodyNode* body = function->body(); ScopeChainNode* callDataScopeChain = function->scope().node(); body->jitCode(callDataScopeChain); @@ -1085,6 +1224,7 @@ DEFINE_STUB_FUNCTION(VoidPtrPair, op_call_arityCheck) CallFrame* callFrame = stackFrame.callFrame; CodeBlock* newCodeBlock = stackFrame.args[3].codeBlock(); + ASSERT(newCodeBlock->codeType() != NativeCode); int argCount = stackFrame.args[2].int32(); ASSERT(argCount != newCodeBlock->m_numParameters); @@ -1146,9 +1286,11 @@ DEFINE_STUB_FUNCTION(void*, vm_lazyLinkCall) CodeBlock* codeBlock = 0; if (!callee->isHostFunction()) codeBlock = &callee->body()->bytecode(callee->scope().node()); + else + codeBlock = &callee->body()->generatedBytecode(); CallLinkInfo* callLinkInfo = &stackFrame.callFrame->callerFrame()->codeBlock()->getCallLinkInfo(stackFrame.args[1].returnAddress()); - JIT::linkCall(callee, codeBlock, jitCode, callLinkInfo, stackFrame.args[2].int32()); + JIT::linkCall(callee, codeBlock, jitCode, callLinkInfo, stackFrame.args[2].int32(), stackFrame.globalData); return jitCode.addressForCall().executableAddress(); } @@ -1179,7 +1321,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_NotJSFunction) CallFrame* previousCallFrame = stackFrame.callFrame; CallFrame* callFrame = CallFrame::create(previousCallFrame->registers() + registerOffset); - callFrame->init(0, static_cast<Instruction*>(STUB_RETURN_ADDRESS), previousCallFrame->scopeChain(), previousCallFrame, 0, argCount, 0); + callFrame->init(0, static_cast<Instruction*>((STUB_RETURN_ADDRESS).value()), previousCallFrame->scopeChain(), previousCallFrame, 0, argCount, 0); stackFrame.callFrame = callFrame; Register* argv = stackFrame.callFrame->registers() - RegisterFile::CallFrameHeaderSize - argCount; @@ -2234,7 +2376,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_throw) } stackFrame.callFrame = callFrame; - void* catchRoutine = handler->nativeCode.addressForExceptionHandler(); + void* catchRoutine = handler->nativeCode.executableAddress(); ASSERT(catchRoutine); STUB_SET_RETURN_ADDRESS(catchRoutine); return JSValue::encode(exceptionValue); @@ -2442,14 +2584,14 @@ DEFINE_STUB_FUNCTION(void*, op_switch_imm) CodeBlock* codeBlock = callFrame->codeBlock(); if (scrutinee.isInt32Fast()) - return codeBlock->immediateSwitchJumpTable(tableIndex).ctiForValue(scrutinee.getInt32Fast()).addressForSwitch(); + return codeBlock->immediateSwitchJumpTable(tableIndex).ctiForValue(scrutinee.getInt32Fast()).executableAddress(); else { double value; int32_t intValue; if (scrutinee.getNumber(value) && ((intValue = static_cast<int32_t>(value)) == value)) - return codeBlock->immediateSwitchJumpTable(tableIndex).ctiForValue(intValue).addressForSwitch(); + return codeBlock->immediateSwitchJumpTable(tableIndex).ctiForValue(intValue).executableAddress(); else - return codeBlock->immediateSwitchJumpTable(tableIndex).ctiDefault.addressForSwitch(); + return codeBlock->immediateSwitchJumpTable(tableIndex).ctiDefault.executableAddress(); } } @@ -2462,12 +2604,12 @@ DEFINE_STUB_FUNCTION(void*, op_switch_char) CallFrame* callFrame = stackFrame.callFrame; CodeBlock* codeBlock = callFrame->codeBlock(); - void* result = codeBlock->characterSwitchJumpTable(tableIndex).ctiDefault.addressForSwitch(); + void* result = codeBlock->characterSwitchJumpTable(tableIndex).ctiDefault.executableAddress(); if (scrutinee.isString()) { UString::Rep* value = asString(scrutinee)->value().rep(); if (value->size() == 1) - result = codeBlock->characterSwitchJumpTable(tableIndex).ctiForValue(value->data()[0]).addressForSwitch(); + result = codeBlock->characterSwitchJumpTable(tableIndex).ctiForValue(value->data()[0]).executableAddress(); } return result; @@ -2482,11 +2624,11 @@ DEFINE_STUB_FUNCTION(void*, op_switch_string) CallFrame* callFrame = stackFrame.callFrame; CodeBlock* codeBlock = callFrame->codeBlock(); - void* result = codeBlock->stringSwitchJumpTable(tableIndex).ctiDefault.addressForSwitch(); + void* result = codeBlock->stringSwitchJumpTable(tableIndex).ctiDefault.executableAddress(); if (scrutinee.isString()) { UString::Rep* value = asString(scrutinee)->value().rep(); - result = codeBlock->stringSwitchJumpTable(tableIndex).ctiForValue(value).addressForSwitch(); + result = codeBlock->stringSwitchJumpTable(tableIndex).ctiForValue(value).executableAddress(); } return result; @@ -2590,7 +2732,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, vm_throw) } stackFrame.callFrame = callFrame; - void* catchRoutine = handler->nativeCode.addressForExceptionHandler(); + void* catchRoutine = handler->nativeCode.executableAddress(); ASSERT(catchRoutine); STUB_SET_RETURN_ADDRESS(catchRoutine); return JSValue::encode(exceptionValue); diff --git a/src/3rdparty/webkit/JavaScriptCore/jit/JITStubs.h b/src/3rdparty/webkit/JavaScriptCore/jit/JITStubs.h index 48e5369..0493189 100644 --- a/src/3rdparty/webkit/JavaScriptCore/jit/JITStubs.h +++ b/src/3rdparty/webkit/JavaScriptCore/jit/JITStubs.h @@ -68,7 +68,7 @@ namespace JSC { FuncExprNode* funcExprNode() { return static_cast<FuncExprNode*>(asPointer); } RegExp* regExp() { return static_cast<RegExp*>(asPointer); } JSPropertyNameIterator* propertyNameIterator() { return static_cast<JSPropertyNameIterator*>(asPointer); } - void* returnAddress() { return asPointer; } + ReturnAddressPtr returnAddress() { return ReturnAddressPtr(asPointer); } }; #if PLATFORM(X86_64) @@ -92,7 +92,7 @@ namespace JSC { JSGlobalData* globalData; // When JIT code makes a call, it pushes its return address just below the rest of the stack. - void** returnAddressSlot() { return reinterpret_cast<void**>(this) - 1; } + ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; } }; #elif PLATFORM(X86) struct JITStackFrame { @@ -113,7 +113,30 @@ namespace JSC { JSGlobalData* globalData; // When JIT code makes a call, it pushes its return address just below the rest of the stack. - void** returnAddressSlot() { return reinterpret_cast<void**>(this) - 1; } + ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; } + }; +#elif PLATFORM_ARM_ARCH(7) + struct JITStackFrame { + JITStubArg padding; // Unused + JITStubArg args[6]; + + ReturnAddressPtr thunkReturnAddress; + + void* preservedReturnAddress; + void* preservedR4; + void* preservedR5; + void* preservedR6; + + // These arguments passed in r1..r3 (r0 contained the entry code pointed, which is not preserved) + RegisterFile* registerFile; + CallFrame* callFrame; + JSValue* exception; + + // These arguments passed on the stack. + Profiler** enabledProfilerReference; + JSGlobalData* globalData; + + ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; } }; #else #error "JITStackFrame not defined for this platform." @@ -172,8 +195,8 @@ namespace JSC { public: JITThunks(JSGlobalData*); - static void tryCacheGetByID(CallFrame*, CodeBlock*, void* returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot&); - static void tryCachePutByID(CallFrame*, CodeBlock*, void* returnAddress, JSValue baseValue, const PutPropertySlot&); + static void tryCacheGetByID(CallFrame*, CodeBlock*, ReturnAddressPtr returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot&); + static void tryCachePutByID(CallFrame*, CodeBlock*, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot&); MacroAssemblerCodePtr ctiArrayLengthTrampoline() { return m_ctiArrayLengthTrampoline; } MacroAssemblerCodePtr ctiStringLengthTrampoline() { return m_ctiStringLengthTrampoline; } |