summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerX86Common.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerX86Common.h')
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerX86Common.h119
1 files changed, 93 insertions, 26 deletions
diff --git a/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerX86Common.h b/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerX86Common.h
index 5ebefa7..073f563 100644
--- a/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerX86Common.h
+++ b/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerX86Common.h
@@ -26,8 +26,6 @@
#ifndef MacroAssemblerX86Common_h
#define MacroAssemblerX86Common_h
-#include <wtf/Platform.h>
-
#if ENABLE(ASSEMBLER)
#include "X86Assembler.h"
@@ -36,6 +34,10 @@
namespace JSC {
class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
+ static const int DoubleConditionBitInvert = 0x10;
+ static const int DoubleConditionBitSpecial = 0x20;
+ static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
+
public:
enum Condition {
@@ -56,13 +58,24 @@ public:
};
enum DoubleCondition {
- DoubleEqual = X86Assembler::ConditionE,
+ // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+ DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial,
DoubleNotEqual = X86Assembler::ConditionNE,
DoubleGreaterThan = X86Assembler::ConditionA,
DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
- DoubleLessThan = X86Assembler::ConditionB,
- DoubleLessThanOrEqual = X86Assembler::ConditionBE,
+ DoubleLessThan = X86Assembler::ConditionA | DoubleConditionBitInvert,
+ DoubleLessThanOrEqual = X86Assembler::ConditionAE | DoubleConditionBitInvert,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleEqualOrUnordered = X86Assembler::ConditionE,
+ DoubleNotEqualOrUnordered = X86Assembler::ConditionNE | DoubleConditionBitSpecial,
+ DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert,
+ DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert,
+ DoubleLessThanOrUnordered = X86Assembler::ConditionB,
+ DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE,
};
+ COMPILE_ASSERT(
+ !((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits),
+ DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes);
static const RegisterID stackPointerRegister = X86Registers::esp;
@@ -416,20 +429,35 @@ public:
void convertInt32ToDouble(Address src, FPRegisterID dest)
{
+ ASSERT(isSSE2Present());
m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
}
Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
{
ASSERT(isSSE2Present());
- m_assembler.ucomisd_rr(right, left);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
- Jump branchDouble(DoubleCondition cond, FPRegisterID left, Address right)
- {
- m_assembler.ucomisd_mr(right.offset, right.base, left);
- return Jump(m_assembler.jCC(x86Condition(cond)));
+ if (cond & DoubleConditionBitInvert)
+ m_assembler.ucomisd_rr(left, right);
+ else
+ m_assembler.ucomisd_rr(right, left);
+
+ if (cond == DoubleEqual) {
+ Jump isUnordered(m_assembler.jp());
+ Jump result = Jump(m_assembler.je());
+ isUnordered.link(this);
+ return result;
+ } else if (cond == DoubleNotEqualOrUnordered) {
+ Jump isUnordered(m_assembler.jp());
+ Jump isEqual(m_assembler.je());
+ isUnordered.link(this);
+ Jump result = jump();
+ isEqual.link(this);
+ return result;
+ }
+
+ ASSERT(!(cond & DoubleConditionBitSpecial));
+ return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits)));
}
// Truncates 'src' to an integer, and places the resulting 'dest'.
@@ -443,6 +471,25 @@ public:
return branch32(Equal, dest, Imm32(0x80000000));
}
+ // Convert 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, 0).
+ void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvttsd2si_rr(src, dest);
+
+ // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
+ failureCases.append(branchTest32(Zero, dest));
+
+ // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
+ convertInt32ToDouble(dest, fpTemp);
+ m_assembler.ucomisd_rr(fpTemp, src);
+ failureCases.append(m_assembler.jp());
+ failureCases.append(m_assembler.jne());
+ }
+
void zeroDouble(FPRegisterID srcDest)
{
ASSERT(isSSE2Present());
@@ -493,7 +540,7 @@ public:
m_assembler.movl_i32r(imm.m_value, dest);
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void move(RegisterID src, RegisterID dest)
{
// Note: on 64-bit this is is a full register move; perhaps it would be
@@ -509,7 +556,8 @@ public:
void swap(RegisterID reg1, RegisterID reg2)
{
- m_assembler.xchgq_rr(reg1, reg2);
+ if (reg1 != reg2)
+ m_assembler.xchgq_rr(reg1, reg2);
}
void signExtend32ToPtr(RegisterID src, RegisterID dest)
@@ -570,6 +618,12 @@ public:
// an optional second operand of a mask under which to perform the test.
public:
+ Jump branch8(Condition cond, Address left, Imm32 right)
+ {
+ m_assembler.cmpb_im(right.m_value, left.offset, left.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
Jump branch32(Condition cond, RegisterID left, RegisterID right)
{
m_assembler.cmpl_rr(right, left);
@@ -667,6 +721,16 @@ public:
m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
+
+ Jump branchTest8(Condition cond, Address address, Imm32 mask = Imm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ if (mask.m_value == -1)
+ m_assembler.cmpb_im(0, address.offset, address.base);
+ else
+ m_assembler.testb_im(mask.m_value, address.offset, address.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
Jump jump()
{
@@ -786,6 +850,13 @@ public:
return Jump(m_assembler.jCC(x86Condition(cond)));
}
+ Jump branchNeg32(Condition cond, RegisterID srcDest)
+ {
+ ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
+ neg32(srcDest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
Jump branchOr32(Condition cond, RegisterID src, RegisterID dest)
{
ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
@@ -867,10 +938,11 @@ public:
void setTest8(Condition cond, Address address, Imm32 mask, RegisterID dest)
{
if (mask.m_value == -1)
- m_assembler.cmpl_im(0, address.offset, address.base);
+ m_assembler.cmpb_im(0, address.offset, address.base);
else
- m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
+ m_assembler.testb_im(mask.m_value, address.offset, address.base);
m_assembler.setCC_r(x86Condition(cond), dest);
+ m_assembler.movzbl_rr(dest, dest);
}
void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
@@ -889,18 +961,13 @@ protected:
return static_cast<X86Assembler::Condition>(cond);
}
- X86Assembler::Condition x86Condition(DoubleCondition cond)
- {
- return static_cast<X86Assembler::Condition>(cond);
- }
-
private:
// Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
// x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
friend class MacroAssemblerX86;
-#if PLATFORM(X86)
-#if PLATFORM(MAC)
+#if CPU(X86)
+#if OS(MAC_OS_X)
// All X86 Macs are guaranteed to support at least SSE2,
static bool isSSE2Present()
@@ -908,7 +975,7 @@ private:
return true;
}
-#else // PLATFORM(MAC)
+#else // OS(MAC_OS_X)
enum SSE2CheckState {
NotCheckedSSE2,
@@ -951,8 +1018,8 @@ private:
static SSE2CheckState s_sse2CheckState;
-#endif // PLATFORM(MAC)
-#elif !defined(NDEBUG) // PLATFORM(X86)
+#endif // OS(MAC_OS_X)
+#elif !defined(NDEBUG) // CPU(X86)
// On x86-64 we should never be checking for SSE2 in a non-debug build,
// but non debug add this method to keep the asserts above happy.