diff options
author | Simon Hausmann <simon.hausmann@nokia.com> | 2009-06-15 09:06:43 (GMT) |
---|---|---|
committer | Simon Hausmann <simon.hausmann@nokia.com> | 2009-06-15 09:31:31 (GMT) |
commit | c411f16870f112c3407c28c22b617f613a82cff4 (patch) | |
tree | 29a1bcd590c8b31af2aab445bfe8a978dc5bf582 /src/3rdparty/webkit/JavaScriptCore/jit/JITOpcodes.cpp | |
parent | 3d77b56b32a0c53ec0bbfaa07236fedb900ff336 (diff) | |
download | Qt-c411f16870f112c3407c28c22b617f613a82cff4.zip Qt-c411f16870f112c3407c28c22b617f613a82cff4.tar.gz Qt-c411f16870f112c3407c28c22b617f613a82cff4.tar.bz2 |
Updated WebKit from /home/shausman/src/webkit/trunk to qtwebkit-4.6-snapshot-15062009 ( 65232bf00dc494ebfd978f998c88f58d18ecce1e )
Diffstat (limited to 'src/3rdparty/webkit/JavaScriptCore/jit/JITOpcodes.cpp')
-rw-r--r-- | src/3rdparty/webkit/JavaScriptCore/jit/JITOpcodes.cpp | 1183 |
1 files changed, 1183 insertions, 0 deletions
diff --git a/src/3rdparty/webkit/JavaScriptCore/jit/JITOpcodes.cpp b/src/3rdparty/webkit/JavaScriptCore/jit/JITOpcodes.cpp new file mode 100644 index 0000000..1737551 --- /dev/null +++ b/src/3rdparty/webkit/JavaScriptCore/jit/JITOpcodes.cpp @@ -0,0 +1,1183 @@ +/* + * Copyright (C) 2009 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "JIT.h" + +#if ENABLE(JIT) + +#include "JITInlineMethods.h" +#include "JITStubCall.h" +#include "JSArray.h" +#include "JSCell.h" + +namespace JSC { + +#define RECORD_JUMP_TARGET(targetOffset) \ + do { m_labels[m_bytecodeIndex + (targetOffset)].used(); } while (false) + +void JIT::emit_op_mov(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int src = currentInstruction[2].u.operand; + + if (m_codeBlock->isConstantRegisterIndex(src)) { + storePtr(ImmPtr(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register))); + if (dst == m_lastResultBytecodeRegister) + killLastResultRegister(); + } else if ((src == m_lastResultBytecodeRegister) || (dst == m_lastResultBytecodeRegister)) { + // If either the src or dst is the cached register go though + // get/put registers to make sure we track this correctly. + emitGetVirtualRegister(src, regT0); + emitPutVirtualRegister(dst); + } else { + // Perform the copy via regT1; do not disturb any mapping in regT0. + loadPtr(Address(callFrameRegister, src * sizeof(Register)), regT1); + storePtr(regT1, Address(callFrameRegister, dst * sizeof(Register))); + } +} + +void JIT::emit_op_end(Instruction* currentInstruction) +{ + if (m_codeBlock->needsFullScopeChain()) + JITStubCall(this, JITStubs::cti_op_end).call(); + ASSERT(returnValueRegister != callFrameRegister); + emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister); + restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register)))); + ret(); +} + +void JIT::emit_op_jmp(Instruction* currentInstruction) +{ + unsigned target = currentInstruction[1].u.operand; + addJump(jump(), target + 1); + RECORD_JUMP_TARGET(target + 1); +} + +void JIT::emit_op_loop(Instruction* currentInstruction) +{ + emitTimeoutCheck(); + + unsigned target = currentInstruction[1].u.operand; + addJump(jump(), target + 1); +} + +void JIT::emit_op_loop_if_less(Instruction* currentInstruction) +{ + emitTimeoutCheck(); + + unsigned op1 = currentInstruction[1].u.operand; + unsigned op2 = currentInstruction[2].u.operand; + unsigned target = currentInstruction[3].u.operand; + if (isOperandConstantImmediateInt(op2)) { + emitGetVirtualRegister(op1, regT0); + emitJumpSlowCaseIfNotImmediateInteger(regT0); +#if USE(ALTERNATE_JSIMMEDIATE) + int32_t op2imm = getConstantOperandImmediateInt(op2); +#else + int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2))); +#endif + addJump(branch32(LessThan, regT0, Imm32(op2imm)), target + 3); + } else if (isOperandConstantImmediateInt(op1)) { + emitGetVirtualRegister(op2, regT0); + emitJumpSlowCaseIfNotImmediateInteger(regT0); +#if USE(ALTERNATE_JSIMMEDIATE) + int32_t op1imm = getConstantOperandImmediateInt(op1); +#else + int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1))); +#endif + addJump(branch32(GreaterThan, regT0, Imm32(op1imm)), target + 3); + } else { + emitGetVirtualRegisters(op1, regT0, op2, regT1); + emitJumpSlowCaseIfNotImmediateInteger(regT0); + emitJumpSlowCaseIfNotImmediateInteger(regT1); + addJump(branch32(LessThan, regT0, regT1), target + 3); + } +} + +void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction) +{ + emitTimeoutCheck(); + + unsigned op1 = currentInstruction[1].u.operand; + unsigned op2 = currentInstruction[2].u.operand; + unsigned target = currentInstruction[3].u.operand; + if (isOperandConstantImmediateInt(op2)) { + emitGetVirtualRegister(op1, regT0); + emitJumpSlowCaseIfNotImmediateInteger(regT0); +#if USE(ALTERNATE_JSIMMEDIATE) + int32_t op2imm = getConstantOperandImmediateInt(op2); +#else + int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2))); +#endif + addJump(branch32(LessThanOrEqual, regT0, Imm32(op2imm)), target + 3); + } else { + emitGetVirtualRegisters(op1, regT0, op2, regT1); + emitJumpSlowCaseIfNotImmediateInteger(regT0); + emitJumpSlowCaseIfNotImmediateInteger(regT1); + addJump(branch32(LessThanOrEqual, regT0, regT1), target + 3); + } +} + +void JIT::emit_op_new_object(Instruction* currentInstruction) +{ + JITStubCall(this, JITStubs::cti_op_new_object).call(currentInstruction[1].u.operand); +} + +void JIT::emit_op_instanceof(Instruction* currentInstruction) +{ + // Load the operands (baseVal, proto, and value respectively) into registers. + // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result. + emitGetVirtualRegister(currentInstruction[3].u.operand, regT0); + emitGetVirtualRegister(currentInstruction[4].u.operand, regT1); + emitGetVirtualRegister(currentInstruction[2].u.operand, regT2); + + // Check that baseVal & proto are cells. + emitJumpSlowCaseIfNotJSCell(regT0); + emitJumpSlowCaseIfNotJSCell(regT1); + + // Check that baseVal is an object, that it 'ImplementsHasInstance' but that it does not 'OverridesHasInstance'. + loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT0); + addSlowCase(branch32(NotEqual, Address(regT0, FIELD_OFFSET(Structure, m_typeInfo.m_type)), Imm32(ObjectType))); + addSlowCase(branchTest32(Zero, Address(regT0, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(ImplementsDefaultHasInstance))); + + // If value is not an Object, return false. + Jump valueIsImmediate = emitJumpIfNotJSCell(regT2); + loadPtr(Address(regT2, FIELD_OFFSET(JSCell, m_structure)), regT0); + Jump valueIsNotObject = branch32(NotEqual, Address(regT0, FIELD_OFFSET(Structure, m_typeInfo.m_type)), Imm32(ObjectType)); + + // Check proto is object. + loadPtr(Address(regT1, FIELD_OFFSET(JSCell, m_structure)), regT0); + addSlowCase(branch32(NotEqual, Address(regT0, FIELD_OFFSET(Structure, m_typeInfo.m_type)), Imm32(ObjectType))); + + // Optimistically load the result true, and start looping. + // Initially, regT1 still contains proto and regT2 still contains value. + // As we loop regT2 will be updated with its prototype, recursively walking the prototype chain. + move(ImmPtr(JSValue::encode(jsBoolean(true))), regT0); + Label loop(this); + + // Load the prototype of the object in regT2. If this is equal to regT1 - WIN! + // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again. + loadPtr(Address(regT2, FIELD_OFFSET(JSCell, m_structure)), regT2); + loadPtr(Address(regT2, FIELD_OFFSET(Structure, m_prototype)), regT2); + Jump isInstance = branchPtr(Equal, regT2, regT1); + branchPtr(NotEqual, regT2, ImmPtr(JSValue::encode(jsNull())), loop); + + // We get here either by dropping out of the loop, or if value was not an Object. Result is false. + valueIsImmediate.link(this); + valueIsNotObject.link(this); + move(ImmPtr(JSValue::encode(jsBoolean(false))), regT0); + + // isInstance jumps right down to here, to skip setting the result to false (it has already set true). + isInstance.link(this); + emitPutVirtualRegister(currentInstruction[1].u.operand); +} + +void JIT::emit_op_new_func(Instruction* currentInstruction) +{ + JITStubCall stubCall(this, JITStubs::cti_op_new_func); + stubCall.addArgument(ImmPtr(m_codeBlock->function(currentInstruction[2].u.operand))); + stubCall.call(currentInstruction[1].u.operand); +} + +void JIT::emit_op_call(Instruction* currentInstruction) +{ + compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++); +} + +void JIT::emit_op_call_eval(Instruction* currentInstruction) +{ + compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex++); +} + +void JIT::emit_op_load_varargs(Instruction* currentInstruction) +{ + JITStubCall stubCall(this, JITStubs::cti_op_load_varargs); + stubCall.addArgument(Imm32(currentInstruction[2].u.operand)); + stubCall.call(currentInstruction[1].u.operand); +} + +void JIT::emit_op_call_varargs(Instruction* currentInstruction) +{ + compileOpCallVarargs(currentInstruction); +} + +void JIT::emit_op_construct(Instruction* currentInstruction) +{ + compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++); +} + +void JIT::emit_op_get_global_var(Instruction* currentInstruction) +{ + JSVariableObject* globalObject = static_cast<JSVariableObject*>(currentInstruction[2].u.jsCell); + move(ImmPtr(globalObject), regT0); + emitGetVariableObjectRegister(regT0, currentInstruction[3].u.operand, regT0); + emitPutVirtualRegister(currentInstruction[1].u.operand); +} + +void JIT::emit_op_put_global_var(Instruction* currentInstruction) +{ + emitGetVirtualRegister(currentInstruction[3].u.operand, regT1); + JSVariableObject* globalObject = static_cast<JSVariableObject*>(currentInstruction[1].u.jsCell); + move(ImmPtr(globalObject), regT0); + emitPutVariableObjectRegister(regT1, regT0, currentInstruction[2].u.operand); +} + +void JIT::emit_op_get_scoped_var(Instruction* currentInstruction) +{ + int skip = currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain(); + + emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0); + while (skip--) + loadPtr(Address(regT0, FIELD_OFFSET(ScopeChainNode, next)), regT0); + + loadPtr(Address(regT0, FIELD_OFFSET(ScopeChainNode, object)), regT0); + emitGetVariableObjectRegister(regT0, currentInstruction[2].u.operand, regT0); + emitPutVirtualRegister(currentInstruction[1].u.operand); +} + +void JIT::emit_op_put_scoped_var(Instruction* currentInstruction) +{ + int skip = currentInstruction[2].u.operand + m_codeBlock->needsFullScopeChain(); + + emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1); + emitGetVirtualRegister(currentInstruction[3].u.operand, regT0); + while (skip--) + loadPtr(Address(regT1, FIELD_OFFSET(ScopeChainNode, next)), regT1); + + loadPtr(Address(regT1, FIELD_OFFSET(ScopeChainNode, object)), regT1); + emitPutVariableObjectRegister(regT0, regT1, currentInstruction[1].u.operand); +} + +void JIT::emit_op_tear_off_activation(Instruction* currentInstruction) +{ + JITStubCall stubCall(this, JITStubs::cti_op_tear_off_activation); + stubCall.addArgument(currentInstruction[1].u.operand, regT2); + stubCall.call(); +} + +void JIT::emit_op_tear_off_arguments(Instruction*) +{ + JITStubCall(this, JITStubs::cti_op_tear_off_arguments).call(); +} + +void JIT::emit_op_ret(Instruction* currentInstruction) +{ + // We could JIT generate the deref, only calling out to C when the refcount hits zero. + if (m_codeBlock->needsFullScopeChain()) + JITStubCall(this, JITStubs::cti_op_ret_scopeChain).call(); + + ASSERT(callFrameRegister != regT1); + ASSERT(regT1 != returnValueRegister); + ASSERT(returnValueRegister != callFrameRegister); + + // Return the result in %eax. + emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister); + + // Grab the return address. + emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1); + + // Restore our caller's "r". + emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister); + + // Return. + restoreReturnAddressBeforeReturn(regT1); + ret(); +} + +void JIT::emit_op_new_array(Instruction* currentInstruction) +{ + JITStubCall stubCall(this, JITStubs::cti_op_new_array); + stubCall.addArgument(Imm32(currentInstruction[2].u.operand)); + stubCall.addArgument(Imm32(currentInstruction[3].u.operand)); + stubCall.call(currentInstruction[1].u.operand); +} + +void JIT::emit_op_resolve(Instruction* currentInstruction) +{ + JITStubCall stubCall(this, JITStubs::cti_op_resolve); + stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); + stubCall.call(currentInstruction[1].u.operand); +} + +void JIT::emit_op_construct_verify(Instruction* currentInstruction) +{ + emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); + + emitJumpSlowCaseIfNotJSCell(regT0); + loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2); + addSlowCase(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType))); + +} + +void JIT::emit_op_to_primitive(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int src = currentInstruction[2].u.operand; + + emitGetVirtualRegister(src, regT0); + + Jump isImm = emitJumpIfNotJSCell(regT0); + addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr))); + isImm.link(this); + + if (dst != src) + emitPutVirtualRegister(dst); + +} + +void JIT::emit_op_strcat(Instruction* currentInstruction) +{ + JITStubCall stubCall(this, JITStubs::cti_op_strcat); + stubCall.addArgument(Imm32(currentInstruction[2].u.operand)); + stubCall.addArgument(Imm32(currentInstruction[3].u.operand)); + stubCall.call(currentInstruction[1].u.operand); +} + +void JIT::emit_op_resolve_func(Instruction* currentInstruction) +{ + JITStubCall stubCall(this, JITStubs::cti_op_resolve_func); + stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand))); + stubCall.addArgument(Imm32(currentInstruction[1].u.operand)); + stubCall.call(currentInstruction[2].u.operand); +} + +void JIT::emit_op_loop_if_true(Instruction* currentInstruction) +{ + emitTimeoutCheck(); + + unsigned target = currentInstruction[2].u.operand; + emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); + + Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))); + addJump(emitJumpIfImmediateInteger(regT0), target + 2); + + addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))), target + 2); + addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(false))))); + + isZero.link(this); +}; +void JIT::emit_op_resolve_base(Instruction* currentInstruction) +{ + JITStubCall stubCall(this, JITStubs::cti_op_resolve_base); + stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); + stubCall.call(currentInstruction[1].u.operand); +} + +void JIT::emit_op_resolve_skip(Instruction* currentInstruction) +{ + JITStubCall stubCall(this, JITStubs::cti_op_resolve_skip); + stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); + stubCall.addArgument(Imm32(currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain())); + stubCall.call(currentInstruction[1].u.operand); +} + +void JIT::emit_op_resolve_global(Instruction* currentInstruction) +{ + // Fast case + void* globalObject = currentInstruction[2].u.jsCell; + Identifier* ident = &m_codeBlock->identifier(currentInstruction[3].u.operand); + + unsigned currentIndex = m_globalResolveInfoIndex++; + void* structureAddress = &(m_codeBlock->globalResolveInfo(currentIndex).structure); + void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset); + + // Check Structure of global object + move(ImmPtr(globalObject), regT0); + loadPtr(structureAddress, regT1); + Jump noMatch = branchPtr(NotEqual, regT1, Address(regT0, FIELD_OFFSET(JSCell, m_structure))); // Structures don't match + + // Load cached property + // Assume that the global object always uses external storage. + loadPtr(Address(regT0, FIELD_OFFSET(JSGlobalObject, m_externalStorage)), regT0); + load32(offsetAddr, regT1); + loadPtr(BaseIndex(regT0, regT1, ScalePtr), regT0); + emitPutVirtualRegister(currentInstruction[1].u.operand); + Jump end = jump(); + + // Slow case + noMatch.link(this); + JITStubCall stubCall(this, JITStubs::cti_op_resolve_global); + stubCall.addArgument(ImmPtr(globalObject)); + stubCall.addArgument(ImmPtr(ident)); + stubCall.addArgument(Imm32(currentIndex)); + stubCall.call(currentInstruction[1].u.operand); + end.link(this); +} + +void JIT::emit_op_not(Instruction* currentInstruction) +{ + emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); + xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), regT0); + addSlowCase(branchTestPtr(NonZero, regT0, Imm32(static_cast<int32_t>(~JSImmediate::ExtendedPayloadBitBoolValue)))); + xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue)), regT0); + emitPutVirtualRegister(currentInstruction[1].u.operand); +} + +void JIT::emit_op_jfalse(Instruction* currentInstruction) +{ + unsigned target = currentInstruction[2].u.operand; + emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); + + addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))), target + 2); + Jump isNonZero = emitJumpIfImmediateInteger(regT0); + + addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))), target + 2); + addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(true))))); + + isNonZero.link(this); + RECORD_JUMP_TARGET(target + 2); +}; +void JIT::emit_op_jeq_null(Instruction* currentInstruction) +{ + unsigned src = currentInstruction[1].u.operand; + unsigned target = currentInstruction[2].u.operand; + + emitGetVirtualRegister(src, regT0); + Jump isImmediate = emitJumpIfNotJSCell(regT0); + + // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure. + loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2); + addJump(branchTest32(NonZero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2); + Jump wasNotImmediate = jump(); + + // Now handle the immediate cases - undefined & null + isImmediate.link(this); + andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0); + addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNull()))), target + 2); + + wasNotImmediate.link(this); + RECORD_JUMP_TARGET(target + 2); +}; +void JIT::emit_op_jneq_null(Instruction* currentInstruction) +{ + unsigned src = currentInstruction[1].u.operand; + unsigned target = currentInstruction[2].u.operand; + + emitGetVirtualRegister(src, regT0); + Jump isImmediate = emitJumpIfNotJSCell(regT0); + + // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure. + loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2); + addJump(branchTest32(Zero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2); + Jump wasNotImmediate = jump(); + + // Now handle the immediate cases - undefined & null + isImmediate.link(this); + andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0); + addJump(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsNull()))), target + 2); + + wasNotImmediate.link(this); + RECORD_JUMP_TARGET(target + 2); +} + +void JIT::emit_op_jneq_ptr(Instruction* currentInstruction) +{ + unsigned src = currentInstruction[1].u.operand; + JSCell* ptr = currentInstruction[2].u.jsCell; + unsigned target = currentInstruction[3].u.operand; + + emitGetVirtualRegister(src, regT0); + addJump(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(JSValue(ptr)))), target + 3); + + RECORD_JUMP_TARGET(target + 3); +} + +void JIT::emit_op_unexpected_load(Instruction* currentInstruction) +{ + JSValue v = m_codeBlock->unexpectedConstant(currentInstruction[2].u.operand); + move(ImmPtr(JSValue::encode(v)), regT0); + emitPutVirtualRegister(currentInstruction[1].u.operand); +} + +void JIT::emit_op_jsr(Instruction* currentInstruction) +{ + int retAddrDst = currentInstruction[1].u.operand; + int target = currentInstruction[2].u.operand; + DataLabelPtr storeLocation = storePtrWithPatch(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst)); + addJump(jump(), target + 2); + m_jsrSites.append(JSRInfo(storeLocation, label())); + killLastResultRegister(); + RECORD_JUMP_TARGET(target + 2); +} + +void JIT::emit_op_sret(Instruction* currentInstruction) +{ + jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand)); + killLastResultRegister(); +} + +void JIT::emit_op_eq(Instruction* currentInstruction) +{ + emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1); + emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2); + set32(Equal, regT1, regT0, regT0); + emitTagAsBoolImmediate(regT0); + emitPutVirtualRegister(currentInstruction[1].u.operand); +} + +void JIT::emit_op_bitnot(Instruction* currentInstruction) +{ + emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); + emitJumpSlowCaseIfNotImmediateInteger(regT0); +#if USE(ALTERNATE_JSIMMEDIATE) + not32(regT0); + emitFastArithIntToImmNoCheck(regT0, regT0); +#else + xorPtr(Imm32(~JSImmediate::TagTypeNumber), regT0); +#endif + emitPutVirtualRegister(currentInstruction[1].u.operand); +} + +void JIT::emit_op_resolve_with_base(Instruction* currentInstruction) +{ + JITStubCall stubCall(this, JITStubs::cti_op_resolve_with_base); + stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand))); + stubCall.addArgument(Imm32(currentInstruction[1].u.operand)); + stubCall.call(currentInstruction[2].u.operand); +} + +void JIT::emit_op_new_func_exp(Instruction* currentInstruction) +{ + JITStubCall stubCall(this, JITStubs::cti_op_new_func_exp); + stubCall.addArgument(ImmPtr(m_codeBlock->functionExpression(currentInstruction[2].u.operand))); + stubCall.call(currentInstruction[1].u.operand); +} + +void JIT::emit_op_jtrue(Instruction* currentInstruction) +{ + unsigned target = currentInstruction[2].u.operand; + emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); + + Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))); + addJump(emitJumpIfImmediateInteger(regT0), target + 2); + + addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))), target + 2); + addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(false))))); + + isZero.link(this); + RECORD_JUMP_TARGET(target + 2); +} + +void JIT::emit_op_neq(Instruction* currentInstruction) +{ + emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1); + emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2); + set32(NotEqual, regT1, regT0, regT0); + emitTagAsBoolImmediate(regT0); + + emitPutVirtualRegister(currentInstruction[1].u.operand); + +} + +void JIT::emit_op_bitxor(Instruction* currentInstruction) +{ + emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1); + emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2); + xorPtr(regT1, regT0); + emitFastArithReTagImmediate(regT0, regT0); + emitPutVirtualRegister(currentInstruction[1].u.operand); +} + +void JIT::emit_op_new_regexp(Instruction* currentInstruction) +{ + JITStubCall stubCall(this, JITStubs::cti_op_new_regexp); + stubCall.addArgument(ImmPtr(m_codeBlock->regexp(currentInstruction[2].u.operand))); + stubCall.call(currentInstruction[1].u.operand); +} + +void JIT::emit_op_bitor(Instruction* currentInstruction) +{ + emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1); + emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2); + orPtr(regT1, regT0); + emitPutVirtualRegister(currentInstruction[1].u.operand); +} + +void JIT::emit_op_throw(Instruction* currentInstruction) +{ + JITStubCall stubCall(this, JITStubs::cti_op_throw); + stubCall.addArgument(currentInstruction[1].u.operand, regT2); + stubCall.call(); + ASSERT(regT0 == returnValueRegister); +#ifndef NDEBUG + // cti_op_throw always changes it's return address, + // this point in the code should never be reached. + breakpoint(); +#endif +} + +void JIT::emit_op_next_pname(Instruction* currentInstruction) +{ + JITStubCall stubCall(this, JITStubs::cti_op_next_pname); + stubCall.addArgument(currentInstruction[2].u.operand, regT2); + stubCall.call(); + Jump endOfIter = branchTestPtr(Zero, regT0); + emitPutVirtualRegister(currentInstruction[1].u.operand); + addJump(jump(), currentInstruction[3].u.operand + 3); + endOfIter.link(this); +} + +void JIT::emit_op_push_scope(Instruction* currentInstruction) +{ + JITStubCall stubCall(this, JITStubs::cti_op_push_scope); + stubCall.addArgument(currentInstruction[1].u.operand, regT2); + stubCall.call(currentInstruction[1].u.operand); +} + +void JIT::emit_op_pop_scope(Instruction*) +{ + JITStubCall(this, JITStubs::cti_op_pop_scope).call(); +} + +void JIT::emit_op_stricteq(Instruction* currentInstruction) +{ + compileOpStrictEq(currentInstruction, OpStrictEq); +} + +void JIT::emit_op_nstricteq(Instruction* currentInstruction) +{ + compileOpStrictEq(currentInstruction, OpNStrictEq); +} + +void JIT::emit_op_to_jsnumber(Instruction* currentInstruction) +{ + int srcVReg = currentInstruction[2].u.operand; + emitGetVirtualRegister(srcVReg, regT0); + + Jump wasImmediate = emitJumpIfImmediateInteger(regT0); + + emitJumpSlowCaseIfNotJSCell(regT0, srcVReg); + loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2); + addSlowCase(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_type)), Imm32(NumberType))); + + wasImmediate.link(this); + + emitPutVirtualRegister(currentInstruction[1].u.operand); +} + +void JIT::emit_op_push_new_scope(Instruction* currentInstruction) +{ + JITStubCall stubCall(this, JITStubs::cti_op_push_new_scope); + stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); + stubCall.addArgument(currentInstruction[3].u.operand, regT2); + stubCall.call(currentInstruction[1].u.operand); +} + +void JIT::emit_op_catch(Instruction* currentInstruction) +{ + killLastResultRegister(); // FIXME: Implicitly treat op_catch as a labeled statement, and remove this line of code. + peek(callFrameRegister, offsetof(struct JITStackFrame, callFrame) / sizeof (void*)); + emitPutVirtualRegister(currentInstruction[1].u.operand); +} + +void JIT::emit_op_jmp_scopes(Instruction* currentInstruction) +{ + JITStubCall stubCall(this, JITStubs::cti_op_jmp_scopes); + stubCall.addArgument(Imm32(currentInstruction[1].u.operand)); + stubCall.call(); + addJump(jump(), currentInstruction[2].u.operand + 2); + RECORD_JUMP_TARGET(currentInstruction[2].u.operand + 2); +} + +void JIT::emit_op_switch_imm(Instruction* currentInstruction) +{ + unsigned tableIndex = currentInstruction[1].u.operand; + unsigned defaultOffset = currentInstruction[2].u.operand; + unsigned scrutinee = currentInstruction[3].u.operand; + + // create jump table for switch destinations, track this switch statement. + SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex); + m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate)); + jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size()); + + JITStubCall stubCall(this, JITStubs::cti_op_switch_imm); + stubCall.addArgument(scrutinee, regT2); + stubCall.addArgument(Imm32(tableIndex)); + stubCall.call(); + jump(regT0); +} + +void JIT::emit_op_switch_char(Instruction* currentInstruction) +{ + unsigned tableIndex = currentInstruction[1].u.operand; + unsigned defaultOffset = currentInstruction[2].u.operand; + unsigned scrutinee = currentInstruction[3].u.operand; + + // create jump table for switch destinations, track this switch statement. + SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex); + m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Character)); + jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size()); + + JITStubCall stubCall(this, JITStubs::cti_op_switch_char); + stubCall.addArgument(scrutinee, regT2); + stubCall.addArgument(Imm32(tableIndex)); + stubCall.call(); + jump(regT0); +} + +void JIT::emit_op_switch_string(Instruction* currentInstruction) +{ + unsigned tableIndex = currentInstruction[1].u.operand; + unsigned defaultOffset = currentInstruction[2].u.operand; + unsigned scrutinee = currentInstruction[3].u.operand; + + // create jump table for switch destinations, track this switch statement. + StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex); + m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset)); + + JITStubCall stubCall(this, JITStubs::cti_op_switch_string); + stubCall.addArgument(scrutinee, regT2); + stubCall.addArgument(Imm32(tableIndex)); + stubCall.call(); + jump(regT0); +} + +void JIT::emit_op_new_error(Instruction* currentInstruction) +{ + JITStubCall stubCall(this, JITStubs::cti_op_new_error); + stubCall.addArgument(Imm32(currentInstruction[2].u.operand)); + stubCall.addArgument(ImmPtr(JSValue::encode(m_codeBlock->unexpectedConstant(currentInstruction[3].u.operand)))); + stubCall.addArgument(Imm32(m_bytecodeIndex)); + stubCall.call(currentInstruction[1].u.operand); +} + +void JIT::emit_op_debug(Instruction* currentInstruction) +{ + JITStubCall stubCall(this, JITStubs::cti_op_debug); + stubCall.addArgument(Imm32(currentInstruction[1].u.operand)); + stubCall.addArgument(Imm32(currentInstruction[2].u.operand)); + stubCall.addArgument(Imm32(currentInstruction[3].u.operand)); + stubCall.call(); +} + +void JIT::emit_op_eq_null(Instruction* currentInstruction) +{ + unsigned dst = currentInstruction[1].u.operand; + unsigned src1 = currentInstruction[2].u.operand; + + emitGetVirtualRegister(src1, regT0); + Jump isImmediate = emitJumpIfNotJSCell(regT0); + + loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2); + setTest32(NonZero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0); + + Jump wasNotImmediate = jump(); + + isImmediate.link(this); + + andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0); + setPtr(Equal, regT0, Imm32(JSImmediate::FullTagTypeNull), regT0); + + wasNotImmediate.link(this); + + emitTagAsBoolImmediate(regT0); + emitPutVirtualRegister(dst); + +} + +void JIT::emit_op_neq_null(Instruction* currentInstruction) +{ + unsigned dst = currentInstruction[1].u.operand; + unsigned src1 = currentInstruction[2].u.operand; + + emitGetVirtualRegister(src1, regT0); + Jump isImmediate = emitJumpIfNotJSCell(regT0); + + loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2); + setTest32(Zero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0); + + Jump wasNotImmediate = jump(); + + isImmediate.link(this); + + andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0); + setPtr(NotEqual, regT0, Imm32(JSImmediate::FullTagTypeNull), regT0); + + wasNotImmediate.link(this); + + emitTagAsBoolImmediate(regT0); + emitPutVirtualRegister(dst); + +} + +void JIT::emit_op_enter(Instruction*) +{ + // Even though CTI doesn't use them, we initialize our constant + // registers to zap stale pointers, to avoid unnecessarily prolonging + // object lifetime and increasing GC pressure. + size_t count = m_codeBlock->m_numVars + m_codeBlock->numberOfConstantRegisters(); + for (size_t j = 0; j < count; ++j) + emitInitRegister(j); + +} + +void JIT::emit_op_enter_with_activation(Instruction* currentInstruction) +{ + // Even though CTI doesn't use them, we initialize our constant + // registers to zap stale pointers, to avoid unnecessarily prolonging + // object lifetime and increasing GC pressure. + size_t count = m_codeBlock->m_numVars + m_codeBlock->numberOfConstantRegisters(); + for (size_t j = 0; j < count; ++j) + emitInitRegister(j); + + JITStubCall(this, JITStubs::cti_op_push_activation).call(currentInstruction[1].u.operand); +} + +void JIT::emit_op_create_arguments(Instruction*) +{ + Jump argsCreated = branchTestPtr(NonZero, Address(callFrameRegister, sizeof(Register) * RegisterFile::ArgumentsRegister)); + if (m_codeBlock->m_numParameters == 1) + JITStubCall(this, JITStubs::cti_op_create_arguments_no_params).call(); + else + JITStubCall(this, JITStubs::cti_op_create_arguments).call(); + argsCreated.link(this); +} + +void JIT::emit_op_init_arguments(Instruction*) +{ + storePtr(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * RegisterFile::ArgumentsRegister)); +} + +void JIT::emit_op_convert_this(Instruction* currentInstruction) +{ + emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); + + emitJumpSlowCaseIfNotJSCell(regT0); + loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT1); + addSlowCase(branchTest32(NonZero, Address(regT1, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion))); + +} + +void JIT::emit_op_profile_will_call(Instruction* currentInstruction) +{ + peek(regT1, FIELD_OFFSET(JITStackFrame, enabledProfilerReference) / sizeof (void*)); + Jump noProfiler = branchTestPtr(Zero, Address(regT1)); + + JITStubCall stubCall(this, JITStubs::cti_op_profile_will_call); + stubCall.addArgument(currentInstruction[1].u.operand, regT1); + stubCall.call(); + noProfiler.link(this); + +} + +void JIT::emit_op_profile_did_call(Instruction* currentInstruction) +{ + peek(regT1, FIELD_OFFSET(JITStackFrame, enabledProfilerReference) / sizeof (void*)); + Jump noProfiler = branchTestPtr(Zero, Address(regT1)); + + JITStubCall stubCall(this, JITStubs::cti_op_profile_did_call); + stubCall.addArgument(currentInstruction[1].u.operand, regT1); + stubCall.call(); + noProfiler.link(this); +} + + +// Slow cases + +void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + linkSlowCase(iter); + linkSlowCase(iter); + JITStubCall stubCall(this, JITStubs::cti_op_convert_this); + stubCall.addArgument(regT0); + stubCall.call(currentInstruction[1].u.operand); +} + +void JIT::emitSlow_op_construct_verify(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + linkSlowCase(iter); + linkSlowCase(iter); + emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); + emitPutVirtualRegister(currentInstruction[1].u.operand); +} + +void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + linkSlowCase(iter); + + JITStubCall stubCall(this, JITStubs::cti_op_to_primitive); + stubCall.addArgument(regT0); + stubCall.call(currentInstruction[1].u.operand); +} + +void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + // The slow void JIT::emitSlow_that handles accesses to arrays (below) may jump back up to here. + Label beginGetByValSlow(this); + + Jump notImm = getSlowCase(iter); + linkSlowCase(iter); + linkSlowCase(iter); + emitFastArithIntToImmNoCheck(regT1, regT1); + + notImm.link(this); + JITStubCall stubCall(this, JITStubs::cti_op_get_by_val); + stubCall.addArgument(regT0); + stubCall.addArgument(regT1); + stubCall.call(currentInstruction[1].u.operand); + emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val)); + + // This is slow void JIT::emitSlow_that handles accesses to arrays above the fast cut-off. + // First, check if this is an access to the vector + linkSlowCase(iter); + branch32(AboveOrEqual, regT1, Address(regT2, FIELD_OFFSET(ArrayStorage, m_vectorLength)), beginGetByValSlow); + + // okay, missed the fast region, but it is still in the vector. Get the value. + loadPtr(BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])), regT2); + // Check whether the value loaded is zero; if so we need to return undefined. + branchTestPtr(Zero, regT2, beginGetByValSlow); + move(regT2, regT0); + emitPutVirtualRegister(currentInstruction[1].u.operand, regT0); +} + +void JIT::emitSlow_op_loop_if_less(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + unsigned op1 = currentInstruction[1].u.operand; + unsigned op2 = currentInstruction[2].u.operand; + unsigned target = currentInstruction[3].u.operand; + if (isOperandConstantImmediateInt(op2)) { + linkSlowCase(iter); + JITStubCall stubCall(this, JITStubs::cti_op_loop_if_less); + stubCall.addArgument(regT0); + stubCall.addArgument(op2, regT2); + stubCall.call(); + emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3); + } else if (isOperandConstantImmediateInt(op1)) { + linkSlowCase(iter); + JITStubCall stubCall(this, JITStubs::cti_op_loop_if_less); + stubCall.addArgument(op1, regT2); + stubCall.addArgument(regT0); + stubCall.call(); + emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3); + } else { + linkSlowCase(iter); + linkSlowCase(iter); + JITStubCall stubCall(this, JITStubs::cti_op_loop_if_less); + stubCall.addArgument(regT0); + stubCall.addArgument(regT1); + stubCall.call(); + emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3); + } +} + +void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + unsigned op2 = currentInstruction[2].u.operand; + unsigned target = currentInstruction[3].u.operand; + if (isOperandConstantImmediateInt(op2)) { + linkSlowCase(iter); + JITStubCall stubCall(this, JITStubs::cti_op_loop_if_lesseq); + stubCall.addArgument(regT0); + stubCall.addArgument(currentInstruction[2].u.operand, regT2); + stubCall.call(); + emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3); + } else { + linkSlowCase(iter); + linkSlowCase(iter); + JITStubCall stubCall(this, JITStubs::cti_op_loop_if_lesseq); + stubCall.addArgument(regT0); + stubCall.addArgument(regT1); + stubCall.call(); + emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3); + } +} + +void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + // Normal slow cases - either is not an immediate imm, or is an array. + Jump notImm = getSlowCase(iter); + linkSlowCase(iter); + linkSlowCase(iter); + emitFastArithIntToImmNoCheck(regT1, regT1); + + notImm.link(this); { + JITStubCall stubCall(this, JITStubs::cti_op_put_by_val); + stubCall.addArgument(regT0); + stubCall.addArgument(regT1); + stubCall.addArgument(currentInstruction[3].u.operand, regT2); + stubCall.call(); + emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_put_by_val)); + } + + // slow cases for immediate int accesses to arrays + linkSlowCase(iter); + linkSlowCase(iter); { + JITStubCall stubCall(this, JITStubs::cti_op_put_by_val_array); + stubCall.addArgument(regT0); + stubCall.addArgument(regT1); + stubCall.addArgument(currentInstruction[3].u.operand, regT2); + stubCall.call(); + } +} + +void JIT::emitSlow_op_loop_if_true(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + linkSlowCase(iter); + JITStubCall stubCall(this, JITStubs::cti_op_jtrue); + stubCall.addArgument(regT0); + stubCall.call(); + emitJumpSlowToHot(branchTest32(NonZero, regT0), currentInstruction[2].u.operand + 2); +} + +void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + linkSlowCase(iter); + xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), regT0); + JITStubCall stubCall(this, JITStubs::cti_op_not); + stubCall.addArgument(regT0); + stubCall.call(currentInstruction[1].u.operand); +} + +void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + linkSlowCase(iter); + JITStubCall stubCall(this, JITStubs::cti_op_jtrue); + stubCall.addArgument(regT0); + stubCall.call(); + emitJumpSlowToHot(branchTest32(Zero, regT0), currentInstruction[2].u.operand + 2); // inverted! +} + +void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + linkSlowCase(iter); + JITStubCall stubCall(this, JITStubs::cti_op_bitnot); + stubCall.addArgument(regT0); + stubCall.call(currentInstruction[1].u.operand); +} + +void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + linkSlowCase(iter); + JITStubCall stubCall(this, JITStubs::cti_op_jtrue); + stubCall.addArgument(regT0); + stubCall.call(); + emitJumpSlowToHot(branchTest32(NonZero, regT0), currentInstruction[2].u.operand + 2); +} + +void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + linkSlowCase(iter); + JITStubCall stubCall(this, JITStubs::cti_op_bitxor); + stubCall.addArgument(regT0); + stubCall.addArgument(regT1); + stubCall.call(currentInstruction[1].u.operand); +} + +void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + linkSlowCase(iter); + JITStubCall stubCall(this, JITStubs::cti_op_bitor); + stubCall.addArgument(regT0); + stubCall.addArgument(regT1); + stubCall.call(currentInstruction[1].u.operand); +} + +void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + linkSlowCase(iter); + JITStubCall stubCall(this, JITStubs::cti_op_eq); + stubCall.addArgument(regT0); + stubCall.addArgument(regT1); + stubCall.call(currentInstruction[1].u.operand); +} + +void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + linkSlowCase(iter); + JITStubCall stubCall(this, JITStubs::cti_op_neq); + stubCall.addArgument(regT0); + stubCall.addArgument(regT1); + stubCall.call(currentInstruction[1].u.operand); +} + +void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + linkSlowCase(iter); + linkSlowCase(iter); + JITStubCall stubCall(this, JITStubs::cti_op_stricteq); + stubCall.addArgument(regT0); + stubCall.addArgument(regT1); + stubCall.call(currentInstruction[1].u.operand); +} + +void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + linkSlowCase(iter); + linkSlowCase(iter); + JITStubCall stubCall(this, JITStubs::cti_op_nstricteq); + stubCall.addArgument(regT0); + stubCall.addArgument(regT1); + stubCall.call(currentInstruction[1].u.operand); +} + +void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + linkSlowCase(iter); + linkSlowCase(iter); + linkSlowCase(iter); + linkSlowCase(iter); + linkSlowCase(iter); + JITStubCall stubCall(this, JITStubs::cti_op_instanceof); + stubCall.addArgument(currentInstruction[2].u.operand, regT2); + stubCall.addArgument(currentInstruction[3].u.operand, regT2); + stubCall.addArgument(currentInstruction[4].u.operand, regT2); + stubCall.call(currentInstruction[1].u.operand); +} + +void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call); +} + +void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call_eval); +} + +void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + compileOpCallVarargsSlowCase(currentInstruction, iter); +} + +void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_construct); +} + +void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + linkSlowCaseIfNotJSCell(iter, currentInstruction[2].u.operand); + linkSlowCase(iter); + + JITStubCall stubCall(this, JITStubs::cti_op_to_jsnumber); + stubCall.addArgument(regT0); + stubCall.call(currentInstruction[1].u.operand); +} + + +} // namespace JSC + +#endif // ENABLE(JIT) |