summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/javascriptcore/JavaScriptCore/assembler
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/javascriptcore/JavaScriptCore/assembler')
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/ARMAssembler.cpp92
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/ARMAssembler.h91
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/ARMv7Assembler.h177
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/AbstractMacroAssembler.h8
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssembler.h23
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerARM.cpp11
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerARM.h148
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerARMv7.h57
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerCodeRef.h12
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerX86.h2
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerX86Common.h89
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerX86_64.h29
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/assembler/X86Assembler.h64
13 files changed, 551 insertions, 252 deletions
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/ARMAssembler.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/ARMAssembler.cpp
index 1324586..6dd2b87 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/ARMAssembler.cpp
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/ARMAssembler.cpp
@@ -26,7 +26,7 @@
#include "config.h"
-#if ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)
+#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
#include "ARMAssembler.h"
@@ -34,39 +34,6 @@ namespace JSC {
// Patching helpers
-ARMWord* ARMAssembler::getLdrImmAddress(ARMWord* insn, uint32_t* constPool)
-{
- // Must be an ldr ..., [pc +/- imm]
- ASSERT((*insn & 0x0f7f0000) == 0x051f0000);
-
- if (constPool && (*insn & 0x1))
- return reinterpret_cast<ARMWord*>(constPool + ((*insn & SDT_OFFSET_MASK) >> 1));
-
- ARMWord addr = reinterpret_cast<ARMWord>(insn) + 2 * sizeof(ARMWord);
- if (*insn & DT_UP)
- return reinterpret_cast<ARMWord*>(addr + (*insn & SDT_OFFSET_MASK));
- else
- return reinterpret_cast<ARMWord*>(addr - (*insn & SDT_OFFSET_MASK));
-}
-
-void ARMAssembler::linkBranch(void* code, JmpSrc from, void* to, int useConstantPool)
-{
- ARMWord* insn = reinterpret_cast<ARMWord*>(code) + (from.m_offset / sizeof(ARMWord));
-
- if (!useConstantPool) {
- int diff = reinterpret_cast<ARMWord*>(to) - reinterpret_cast<ARMWord*>(insn + 2);
-
- if ((diff <= BOFFSET_MAX && diff >= BOFFSET_MIN)) {
- *insn = B | getConditionalField(*insn) | (diff & BRANCH_MASK);
- ExecutableAllocator::cacheFlush(insn, sizeof(ARMWord));
- return;
- }
- }
- ARMWord* addr = getLdrImmAddress(insn);
- *addr = reinterpret_cast<ARMWord>(to);
- ExecutableAllocator::cacheFlush(addr, sizeof(ARMWord));
-}
-
void ARMAssembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
{
ARMWord *ldr = reinterpret_cast<ARMWord*>(loadAddr);
@@ -118,7 +85,7 @@ ARMWord ARMAssembler::getOp2(ARMWord imm)
if ((imm & 0x00ffffff) == 0)
return OP2_IMM | (imm >> 24) | (rol << 8);
- return 0;
+ return INVALID_IMM;
}
int ARMAssembler::genInt(int reg, ARMWord imm, bool positive)
@@ -236,25 +203,18 @@ ARMWord ARMAssembler::getImm(ARMWord imm, int tmpReg, bool invert)
// Do it by 1 instruction
tmp = getOp2(imm);
- if (tmp)
+ if (tmp != INVALID_IMM)
return tmp;
tmp = getOp2(~imm);
- if (tmp) {
+ if (tmp != INVALID_IMM) {
if (invert)
return tmp | OP2_INV_IMM;
mvn_r(tmpReg, tmp);
return tmpReg;
}
- // Do it by 2 instruction
- if (genInt(tmpReg, imm, true))
- return tmpReg;
- if (genInt(tmpReg, ~imm, false))
- return tmpReg;
-
- ldr_imm(tmpReg, imm);
- return tmpReg;
+ return encodeComplexImm(imm, tmpReg);
}
void ARMAssembler::moveImm(ARMWord imm, int dest)
@@ -263,24 +223,41 @@ void ARMAssembler::moveImm(ARMWord imm, int dest)
// Do it by 1 instruction
tmp = getOp2(imm);
- if (tmp) {
+ if (tmp != INVALID_IMM) {
mov_r(dest, tmp);
return;
}
tmp = getOp2(~imm);
- if (tmp) {
+ if (tmp != INVALID_IMM) {
mvn_r(dest, tmp);
return;
}
+ encodeComplexImm(imm, dest);
+}
+
+ARMWord ARMAssembler::encodeComplexImm(ARMWord imm, int dest)
+{
+#if WTF_ARM_ARCH_AT_LEAST(7)
+ ARMWord tmp = getImm16Op2(imm);
+ if (tmp != INVALID_IMM) {
+ movw_r(dest, tmp);
+ return dest;
+ }
+ movw_r(dest, getImm16Op2(imm & 0xffff));
+ movt_r(dest, getImm16Op2(imm >> 16));
+ return dest;
+#else
// Do it by 2 instruction
if (genInt(dest, imm, true))
- return;
+ return dest;
if (genInt(dest, ~imm, false))
- return;
+ return dest;
ldr_imm(dest, imm);
+ return dest;
+#endif
}
// Memory load/store helpers
@@ -378,10 +355,17 @@ void* ARMAssembler::executableCopy(ExecutablePool* allocator)
// The last bit is set if the constant must be placed on constant pool.
int pos = (*iter) & (~0x1);
ARMWord* ldrAddr = reinterpret_cast<ARMWord*>(data + pos);
- ARMWord offset = *getLdrImmAddress(ldrAddr);
- if (offset != 0xffffffff) {
- JmpSrc jmpSrc(pos);
- linkBranch(data, jmpSrc, data + offset, ((*iter) & 1));
+ ARMWord* addr = getLdrImmAddress(ldrAddr);
+ if (*addr != 0xffffffff) {
+ if (!(*iter & 1)) {
+ int diff = reinterpret_cast<ARMWord*>(data + *addr) - (ldrAddr + DefaultPrefetching);
+
+ if ((diff <= BOFFSET_MAX && diff >= BOFFSET_MIN)) {
+ *ldrAddr = B | getConditionalField(*ldrAddr) | (diff & BRANCH_MASK);
+ continue;
+ }
+ }
+ *addr = reinterpret_cast<ARMWord>(data + *addr);
}
}
@@ -390,4 +374,4 @@ void* ARMAssembler::executableCopy(ExecutablePool* allocator)
} // namespace JSC
-#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/ARMAssembler.h b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/ARMAssembler.h
index 9f9a450..6967b37 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/ARMAssembler.h
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/ARMAssembler.h
@@ -29,7 +29,7 @@
#include <wtf/Platform.h>
-#if ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)
+#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
#include "AssemblerBufferWithConstantPool.h"
#include <wtf/Assertions.h>
@@ -121,6 +121,7 @@ namespace JSC {
MUL = 0x00000090,
MULL = 0x00c00090,
FADDD = 0x0e300b00,
+ FDIVD = 0x0e800b00,
FSUBD = 0x0e300b40,
FMULD = 0x0e200b00,
FCMPD = 0x0eb40b40,
@@ -133,12 +134,18 @@ namespace JSC {
B = 0x0a000000,
BL = 0x0b000000,
FMSR = 0x0e000a10,
+ FMRS = 0x0e100a10,
FSITOD = 0x0eb80bc0,
+ FTOSID = 0x0ebd0b40,
FMSTAT = 0x0ef1fa10,
-#if ARM_ARCH_VERSION >= 5
+#if WTF_ARM_ARCH_AT_LEAST(5)
CLZ = 0x016f0f10,
BKPT = 0xe120070,
#endif
+#if WTF_ARM_ARCH_AT_LEAST(7)
+ MOVW = 0x03000000,
+ MOVT = 0x03400000,
+#endif
};
enum {
@@ -175,6 +182,9 @@ namespace JSC {
padForAlign32 = 0xee120070,
};
+ static const ARMWord INVALID_IMM = 0xf0000000;
+ static const int DefaultPrefetching = 2;
+
class JmpSrc {
friend class ARMAssembler;
public:
@@ -333,6 +343,20 @@ namespace JSC {
emitInst(static_cast<ARMWord>(cc) | MOV, rd, ARMRegisters::r0, op2);
}
+#if WTF_ARM_ARCH_AT_LEAST(7)
+ void movw_r(int rd, ARMWord op2, Condition cc = AL)
+ {
+ ASSERT((op2 | 0xf0fff) == 0xf0fff);
+ m_buffer.putInt(static_cast<ARMWord>(cc) | MOVW | RD(rd) | op2);
+ }
+
+ void movt_r(int rd, ARMWord op2, Condition cc = AL)
+ {
+ ASSERT((op2 | 0xf0fff) == 0xf0fff);
+ m_buffer.putInt(static_cast<ARMWord>(cc) | MOVT | RD(rd) | op2);
+ }
+#endif
+
void movs_r(int rd, ARMWord op2, Condition cc = AL)
{
emitInst(static_cast<ARMWord>(cc) | MOV | SET_CC, rd, ARMRegisters::r0, op2);
@@ -378,6 +402,11 @@ namespace JSC {
emitInst(static_cast<ARMWord>(cc) | FADDD, dd, dn, dm);
}
+ void fdivd_r(int dd, int dn, int dm, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | FDIVD, dd, dn, dm);
+ }
+
void fsubd_r(int dd, int dn, int dm, Condition cc = AL)
{
emitInst(static_cast<ARMWord>(cc) | FSUBD, dd, dn, dm);
@@ -482,17 +511,27 @@ namespace JSC {
emitInst(static_cast<ARMWord>(cc) | FMSR, rn, dd, 0);
}
+ void fmrs_r(int rd, int dn, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | FMRS, rd, dn, 0);
+ }
+
void fsitod_r(int dd, int dm, Condition cc = AL)
{
emitInst(static_cast<ARMWord>(cc) | FSITOD, dd, 0, dm);
}
+ void ftosid_r(int fd, int dm, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | FTOSID, fd, 0, dm);
+ }
+
void fmstat(Condition cc = AL)
{
m_buffer.putInt(static_cast<ARMWord>(cc) | FMSTAT);
}
-#if ARM_ARCH_VERSION >= 5
+#if WTF_ARM_ARCH_AT_LEAST(5)
void clz_r(int rd, int rm, Condition cc = AL)
{
m_buffer.putInt(static_cast<ARMWord>(cc) | CLZ | RD(rd) | RM(rm));
@@ -501,7 +540,7 @@ namespace JSC {
void bkpt(ARMWord value)
{
-#if ARM_ARCH_VERSION >= 5
+#if WTF_ARM_ARCH_AT_LEAST(5)
m_buffer.putInt(BKPT | ((value & 0xff0) << 4) | (value & 0xf));
#else
// Cannot access to Zero memory address
@@ -594,15 +633,32 @@ namespace JSC {
// Patching helpers
- static ARMWord* getLdrImmAddress(ARMWord* insn, uint32_t* constPool = 0);
- static void linkBranch(void* code, JmpSrc from, void* to, int useConstantPool = 0);
+ static ARMWord* getLdrImmAddress(ARMWord* insn)
+ {
+ // Must be an ldr ..., [pc +/- imm]
+ ASSERT((*insn & 0x0f7f0000) == 0x051f0000);
+
+ ARMWord addr = reinterpret_cast<ARMWord>(insn) + DefaultPrefetching * sizeof(ARMWord);
+ if (*insn & DT_UP)
+ return reinterpret_cast<ARMWord*>(addr + (*insn & SDT_OFFSET_MASK));
+ return reinterpret_cast<ARMWord*>(addr - (*insn & SDT_OFFSET_MASK));
+ }
+
+ static ARMWord* getLdrImmAddressOnPool(ARMWord* insn, uint32_t* constPool)
+ {
+ // Must be an ldr ..., [pc +/- imm]
+ ASSERT((*insn & 0x0f7f0000) == 0x051f0000);
+
+ if (*insn & 0x1)
+ return reinterpret_cast<ARMWord*>(constPool + ((*insn & SDT_OFFSET_MASK) >> 1));
+ return getLdrImmAddress(insn);
+ }
static void patchPointerInternal(intptr_t from, void* to)
{
ARMWord* insn = reinterpret_cast<ARMWord*>(from);
ARMWord* addr = getLdrImmAddress(insn);
*addr = reinterpret_cast<ARMWord>(to);
- ExecutableAllocator::cacheFlush(addr, sizeof(ARMWord));
}
static ARMWord patchConstantPoolLoad(ARMWord load, ARMWord value)
@@ -647,12 +703,13 @@ namespace JSC {
void linkJump(JmpSrc from, JmpDst to)
{
ARMWord* insn = reinterpret_cast<ARMWord*>(m_buffer.data()) + (from.m_offset / sizeof(ARMWord));
- *getLdrImmAddress(insn, m_buffer.poolAddress()) = static_cast<ARMWord>(to.m_offset);
+ ARMWord* addr = getLdrImmAddressOnPool(insn, m_buffer.poolAddress());
+ *addr = static_cast<ARMWord>(to.m_offset);
}
static void linkJump(void* code, JmpSrc from, void* to)
{
- linkBranch(code, from, to);
+ patchPointerInternal(reinterpret_cast<intptr_t>(code) + from.m_offset, to);
}
static void relinkJump(void* from, void* to)
@@ -662,12 +719,12 @@ namespace JSC {
static void linkCall(void* code, JmpSrc from, void* to)
{
- linkBranch(code, from, to, true);
+ patchPointerInternal(reinterpret_cast<intptr_t>(code) + from.m_offset, to);
}
static void relinkCall(void* from, void* to)
{
- relinkJump(from, to);
+ patchPointerInternal(reinterpret_cast<intptr_t>(from) - sizeof(ARMWord), to);
}
// Address operations
@@ -708,8 +765,18 @@ namespace JSC {
}
static ARMWord getOp2(ARMWord imm);
+
+#if WTF_ARM_ARCH_AT_LEAST(7)
+ static ARMWord getImm16Op2(ARMWord imm)
+ {
+ if (imm <= 0xffff)
+ return (imm & 0xf000) << 4 | (imm & 0xfff);
+ return INVALID_IMM;
+ }
+#endif
ARMWord getImm(ARMWord imm, int tmpReg, bool invert = false);
void moveImm(ARMWord imm, int dest);
+ ARMWord encodeComplexImm(ARMWord imm, int dest);
// Memory load/store helpers
@@ -764,6 +831,6 @@ namespace JSC {
} // namespace JSC
-#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
#endif // ARMAssembler_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/ARMv7Assembler.h b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/ARMv7Assembler.h
index 078de44..6cde63b 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/ARMv7Assembler.h
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/ARMv7Assembler.h
@@ -28,7 +28,7 @@
#include <wtf/Platform.h>
-#if ENABLE(ASSEMBLER) && PLATFORM(ARM_THUMB2)
+#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
#include "AssemblerBuffer.h"
#include <wtf/Assertions.h>
@@ -236,6 +236,11 @@ class ARMThumbImmediate {
ARMThumbImmediate(ThumbImmediateType type, uint16_t value)
: m_type(TypeUInt16)
{
+ // Make sure this constructor is only reached with type TypeUInt16;
+ // this extra parameter makes the code a little clearer by making it
+ // explicit at call sites which type is being constructed
+ ASSERT_UNUSED(type, type == TypeUInt16);
+
m_value.asInt = value;
}
@@ -407,6 +412,11 @@ register writeback
class ARMv7Assembler {
public:
+ ~ARMv7Assembler()
+ {
+ ASSERT(m_jumpsToLink.isEmpty());
+ }
+
typedef ARMRegisters::RegisterID RegisterID;
typedef ARMRegisters::FPRegisterID FPRegisterID;
@@ -477,6 +487,17 @@ public:
private:
+ struct LinkRecord {
+ LinkRecord(intptr_t from, intptr_t to)
+ : from(from)
+ , to(to)
+ {
+ }
+
+ intptr_t from;
+ intptr_t to;
+ };
+
// ARMv7, Appx-A.6.3
bool BadReg(RegisterID reg)
{
@@ -574,6 +595,7 @@ private:
OP_SUB_SP_imm_T1 = 0xB080,
OP_BKPT = 0xBE00,
OP_IT = 0xBF00,
+ OP_NOP_T1 = 0xBF00,
} OpcodeID;
typedef enum {
@@ -608,6 +630,7 @@ private:
OP_MOV_imm_T3 = 0xF240,
OP_SUB_imm_T4 = 0xF2A0,
OP_MOVT = 0xF2C0,
+ OP_NOP_T2a = 0xF3AF,
OP_LDRH_reg_T2 = 0xF830,
OP_LDRH_imm_T3 = 0xF830,
OP_STR_imm_T4 = 0xF840,
@@ -626,6 +649,7 @@ private:
typedef enum {
OP_B_T4b = 0x9000,
+ OP_NOP_T2b = 0x8000,
} OpcodeID2;
struct FourFours {
@@ -1481,6 +1505,15 @@ public:
void* executableCopy(ExecutablePool* allocator)
{
void* copy = m_formatter.executableCopy(allocator);
+
+ unsigned jumpCount = m_jumpsToLink.size();
+ for (unsigned i = 0; i < jumpCount; ++i) {
+ uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(copy) + m_jumpsToLink[i].from);
+ uint16_t* target = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(copy) + m_jumpsToLink[i].to);
+ linkJumpAbsolute(location, target);
+ }
+ m_jumpsToLink.clear();
+
ASSERT(copy);
return copy;
}
@@ -1503,11 +1536,7 @@ public:
{
ASSERT(to.m_offset != -1);
ASSERT(from.m_offset != -1);
-
- uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(m_formatter.data()) + from.m_offset);
- intptr_t relative = to.m_offset - from.m_offset;
-
- linkWithOffset(location, relative);
+ m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset));
}
static void linkJump(void* code, JmpSrc from, void* to)
@@ -1515,9 +1544,7 @@ public:
ASSERT(from.m_offset != -1);
uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
- intptr_t relative = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(location);
-
- linkWithOffset(location, relative);
+ linkJumpAbsolute(location, to);
}
// bah, this mathod should really be static, since it is used by the LinkBuffer.
@@ -1541,10 +1568,9 @@ public:
ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
ASSERT(!(reinterpret_cast<intptr_t>(to) & 1));
- intptr_t relative = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
- linkWithOffset(reinterpret_cast<uint16_t*>(from), relative);
+ linkJumpAbsolute(reinterpret_cast<uint16_t*>(from), to);
- ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(from) - 2, 2 * sizeof(uint16_t));
+ ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 5 * sizeof(uint16_t));
}
static void relinkCall(void* from, void* to)
@@ -1613,14 +1639,14 @@ private:
static void setInt32(void* code, uint32_t value)
{
uint16_t* location = reinterpret_cast<uint16_t*>(code);
+ ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
- uint16_t lo16 = value;
- uint16_t hi16 = value >> 16;
-
- spliceHi5(location - 4, lo16);
- spliceLo11(location - 3, lo16);
- spliceHi5(location - 2, hi16);
- spliceLo11(location - 1, hi16);
+ ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value));
+ ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value >> 16));
+ location[-4] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+ location[-3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-3] >> 8) & 0xf, lo16);
+ location[-2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+ location[-1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-1] >> 8) & 0xf, hi16);
ExecutableAllocator::cacheFlush(location - 4, 4 * sizeof(uint16_t));
}
@@ -1630,41 +1656,89 @@ private:
setInt32(code, reinterpret_cast<uint32_t>(value));
}
- // Linking & patching:
- // This method assumes that the JmpSrc being linked is a T4 b instruction.
- static void linkWithOffset(uint16_t* instruction, intptr_t relative)
- {
- // Currently branches > 16m = mostly deathy.
- if (((relative << 7) >> 7) != relative) {
- // FIXME: This CRASH means we cannot turn the JIT on by default on arm-v7.
- fprintf(stderr, "Error: Cannot link T4b.\n");
- CRASH();
- }
-
- // ARM encoding for the top two bits below the sign bit is 'peculiar'.
- if (relative >= 0)
- relative ^= 0xC00000;
+ static bool isB(void* address)
+ {
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return ((instruction[0] & 0xf800) == OP_B_T4a) && ((instruction[1] & 0xd000) == OP_B_T4b);
+ }
- // All branch offsets should be an even distance.
- ASSERT(!(relative & 1));
+ static bool isBX(void* address)
+ {
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return (instruction[0] & 0xff87) == OP_BX;
+ }
- int word1 = ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
- int word2 = ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
+ static bool isMOV_imm_T3(void* address)
+ {
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return ((instruction[0] & 0xFBF0) == OP_MOV_imm_T3) && ((instruction[1] & 0x8000) == 0);
+ }
- instruction[-2] = OP_B_T4a | word1;
- instruction[-1] = OP_B_T4b | word2;
+ static bool isMOVT(void* address)
+ {
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return ((instruction[0] & 0xFBF0) == OP_MOVT) && ((instruction[1] & 0x8000) == 0);
}
- // These functions can be used to splice 16-bit immediates back into previously generated instructions.
- static void spliceHi5(uint16_t* where, uint16_t what)
+ static bool isNOP_T1(void* address)
{
- uint16_t pattern = (what >> 12) | ((what & 0x0800) >> 1);
- *where = (*where & 0xFBF0) | pattern;
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return instruction[0] == OP_NOP_T1;
}
- static void spliceLo11(uint16_t* where, uint16_t what)
+
+ static bool isNOP_T2(void* address)
{
- uint16_t pattern = ((what & 0x0700) << 4) | (what & 0x00FF);
- *where = (*where & 0x8F00) | pattern;
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return (instruction[0] == OP_NOP_T2a) && (instruction[1] == OP_NOP_T2b);
+ }
+
+ static void linkJumpAbsolute(uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
+
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ ASSERT( (isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1))
+ || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)) );
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ if (((relative << 7) >> 7) == relative) {
+ // ARM encoding for the top two bits below the sign bit is 'peculiar'.
+ if (relative >= 0)
+ relative ^= 0xC00000;
+
+ // All branch offsets should be an even distance.
+ ASSERT(!(relative & 1));
+ // There may be a better way to fix this, but right now put the NOPs first, since in the
+ // case of an conditional branch this will be coming after an ITTT predicating *three*
+ // instructions! Looking backwards to modify the ITTT to an IT is not easy, due to
+ // variable wdith encoding - the previous instruction might *look* like an ITTT but
+ // actually be the second half of a 2-word op.
+ instruction[-5] = OP_NOP_T1;
+ instruction[-4] = OP_NOP_T2a;
+ instruction[-3] = OP_NOP_T2b;
+ instruction[-2] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
+ instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
+ } else {
+ ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
+ ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
+ instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+ instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
+ instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+ instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
+ instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
+ }
+ }
+
+ static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op, ARMThumbImmediate imm)
+ {
+ return op | (imm.m_value.i << 10) | imm.m_value.imm4;
+ }
+ static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd, ARMThumbImmediate imm)
+ {
+ return (imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8;
}
class ARMInstructionFormatter {
@@ -1723,8 +1797,11 @@ private:
void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm)
{
- m_buffer.putShort(op | (imm.m_value.i << 10) | imm4);
- m_buffer.putShort((imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8);
+ ARMThumbImmediate newImm = imm;
+ newImm.m_value.imm4 = imm4;
+
+ m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmFirst(op, newImm));
+ m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, newImm));
}
void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm)
@@ -1749,10 +1826,12 @@ private:
private:
AssemblerBuffer m_buffer;
} m_formatter;
+
+ Vector<LinkRecord> m_jumpsToLink;
};
} // namespace JSC
-#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_THUMB2)
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
#endif // ARMAssembler_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/AbstractMacroAssembler.h b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/AbstractMacroAssembler.h
index 525fe98..198e8d1 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/AbstractMacroAssembler.h
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/AbstractMacroAssembler.h
@@ -173,16 +173,16 @@ public:
struct Imm32 {
explicit Imm32(int32_t value)
: m_value(value)
-#if PLATFORM(ARM)
+#if CPU(ARM)
, m_isPointer(false)
#endif
{
}
-#if !PLATFORM(X86_64)
+#if !CPU(X86_64)
explicit Imm32(ImmPtr ptr)
: m_value(ptr.asIntptr())
-#if PLATFORM(ARM)
+#if CPU(ARM)
, m_isPointer(true)
#endif
{
@@ -190,7 +190,7 @@ public:
#endif
int32_t m_value;
-#if PLATFORM(ARM)
+#if CPU(ARM)
// We rely on being able to regenerate code to recover exception handling
// information. Since ARMv7 supports 16-bit immediates there is a danger
// that if pointer values change the layout of the generated code will change.
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssembler.h b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssembler.h
index 2743ab4..76bd205 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssembler.h
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssembler.h
@@ -30,19 +30,19 @@
#if ENABLE(ASSEMBLER)
-#if PLATFORM(ARM_THUMB2)
+#if CPU(ARM_THUMB2)
#include "MacroAssemblerARMv7.h"
namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; };
-#elif PLATFORM(ARM_TRADITIONAL)
+#elif CPU(ARM_TRADITIONAL)
#include "MacroAssemblerARM.h"
namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; };
-#elif PLATFORM(X86)
+#elif CPU(X86)
#include "MacroAssemblerX86.h"
namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; };
-#elif PLATFORM(X86_64)
+#elif CPU(X86_64)
#include "MacroAssemblerX86_64.h"
namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; };
@@ -60,7 +60,7 @@ public:
using MacroAssemblerBase::jump;
using MacroAssemblerBase::branch32;
using MacroAssemblerBase::branch16;
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
using MacroAssemblerBase::branchPtr;
using MacroAssemblerBase::branchTestPtr;
#endif
@@ -133,7 +133,8 @@ public:
// Ptr methods
// On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
-#if !PLATFORM(X86_64)
+ // FIXME: should this use a test for 32-bitness instead of this specific exception?
+#if !CPU(X86_64)
void addPtr(RegisterID src, RegisterID dest)
{
add32(src, dest);
@@ -179,16 +180,6 @@ public:
or32(imm, dest);
}
- void rshiftPtr(RegisterID shift_amount, RegisterID dest)
- {
- rshift32(shift_amount, dest);
- }
-
- void rshiftPtr(Imm32 imm, RegisterID dest)
- {
- rshift32(imm, dest);
- }
-
void subPtr(RegisterID src, RegisterID dest)
{
sub32(src, dest);
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerARM.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerARM.cpp
index d726ecd..b5b20fa 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerARM.cpp
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerARM.cpp
@@ -26,11 +26,11 @@
#include "config.h"
-#if ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)
+#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
#include "MacroAssemblerARM.h"
-#if PLATFORM(LINUX)
+#if OS(LINUX)
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
@@ -43,7 +43,7 @@ namespace JSC {
static bool isVFPPresent()
{
-#if PLATFORM(LINUX)
+#if OS(LINUX)
int fd = open("/proc/self/auxv", O_RDONLY);
if (fd > 0) {
Elf32_auxv_t aux;
@@ -62,7 +62,8 @@ static bool isVFPPresent()
const bool MacroAssemblerARM::s_isVFPPresent = isVFPPresent();
-#if defined(ARM_REQUIRE_NATURAL_ALIGNMENT) && ARM_REQUIRE_NATURAL_ALIGNMENT
+#if CPU(ARMV5_OR_LOWER)
+/* On ARMv5 and below, natural alignment is required. */
void MacroAssemblerARM::load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
{
ARMWord op2;
@@ -91,4 +92,4 @@ void MacroAssemblerARM::load32WithUnalignedHalfWords(BaseIndex address, Register
}
-#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerARM.h b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerARM.h
index aa8cbb0..21b8de8 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerARM.h
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerARM.h
@@ -30,7 +30,7 @@
#include <wtf/Platform.h>
-#if ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)
+#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
#include "ARMAssembler.h"
#include "AbstractMacroAssembler.h"
@@ -38,6 +38,9 @@
namespace JSC {
class MacroAssemblerARM : public AbstractMacroAssembler<ARMAssembler> {
+ static const int DoubleConditionMask = 0x0f;
+ static const int DoubleConditionBitSpecial = 0x10;
+ COMPILE_ASSERT(!(DoubleConditionBitSpecial & DoubleConditionMask), DoubleConditionBitSpecial_should_not_interfere_with_ARMAssembler_Condition_codes);
public:
enum Condition {
Equal = ARMAssembler::EQ,
@@ -57,14 +60,24 @@ public:
};
enum DoubleCondition {
+ // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
DoubleEqual = ARMAssembler::EQ,
+ DoubleNotEqual = ARMAssembler::NE | DoubleConditionBitSpecial,
DoubleGreaterThan = ARMAssembler::GT,
DoubleGreaterThanOrEqual = ARMAssembler::GE,
- DoubleLessThan = ARMAssembler::LT,
- DoubleLessThanOrEqual = ARMAssembler::LE,
+ DoubleLessThan = ARMAssembler::CC,
+ DoubleLessThanOrEqual = ARMAssembler::LS,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleEqualOrUnordered = ARMAssembler::EQ | DoubleConditionBitSpecial,
+ DoubleNotEqualOrUnordered = ARMAssembler::NE,
+ DoubleGreaterThanOrUnordered = ARMAssembler::HI,
+ DoubleGreaterThanOrEqualOrUnordered = ARMAssembler::CS,
+ DoubleLessThanOrUnordered = ARMAssembler::LT,
+ DoubleLessThanOrEqualOrUnordered = ARMAssembler::LE,
};
static const RegisterID stackPointerRegister = ARMRegisters::sp;
+ static const RegisterID linkRegister = ARMRegisters::lr;
static const Scale ScalePtr = TimesFour;
@@ -105,14 +118,18 @@ public:
m_assembler.ands_r(dest, dest, w);
}
- void lshift32(Imm32 imm, RegisterID dest)
+ void lshift32(RegisterID shift_amount, RegisterID dest)
{
- m_assembler.movs_r(dest, m_assembler.lsl(dest, imm.m_value & 0x1f));
+ ARMWord w = ARMAssembler::getOp2(0x1f);
+ ASSERT(w != ARMAssembler::INVALID_IMM);
+ m_assembler.and_r(ARMRegisters::S0, shift_amount, w);
+
+ m_assembler.movs_r(dest, m_assembler.lsl_r(dest, ARMRegisters::S0));
}
- void lshift32(RegisterID shift_amount, RegisterID dest)
+ void lshift32(Imm32 imm, RegisterID dest)
{
- m_assembler.movs_r(dest, m_assembler.lsl_r(dest, shift_amount));
+ m_assembler.movs_r(dest, m_assembler.lsl(dest, imm.m_value & 0x1f));
}
void mul32(RegisterID src, RegisterID dest)
@@ -130,6 +147,11 @@ public:
m_assembler.muls_r(dest, src, ARMRegisters::S0);
}
+ void neg32(RegisterID srcDest)
+ {
+ m_assembler.rsbs_r(srcDest, srcDest, ARMAssembler::getOp2(0));
+ }
+
void not32(RegisterID dest)
{
m_assembler.mvns_r(dest, dest);
@@ -147,7 +169,11 @@ public:
void rshift32(RegisterID shift_amount, RegisterID dest)
{
- m_assembler.movs_r(dest, m_assembler.asr_r(dest, shift_amount));
+ ARMWord w = ARMAssembler::getOp2(0x1f);
+ ASSERT(w != ARMAssembler::INVALID_IMM);
+ m_assembler.and_r(ARMRegisters::S0, shift_amount, w);
+
+ m_assembler.movs_r(dest, m_assembler.asr_r(dest, ARMRegisters::S0));
}
void rshift32(Imm32 imm, RegisterID dest)
@@ -198,7 +224,7 @@ public:
m_assembler.baseIndexTransfer32(true, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
}
-#if defined(ARM_REQUIRE_NATURAL_ALIGNMENT) && ARM_REQUIRE_NATURAL_ALIGNMENT
+#if CPU(ARMV5_OR_LOWER)
void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest);
#else
void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
@@ -504,6 +530,13 @@ public:
return Jump(m_assembler.jmp(ARMCondition(cond)));
}
+ Jump branchOr32(Condition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
+ or32(src, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
void breakpoint()
{
m_assembler.bkpt(0);
@@ -530,7 +563,7 @@ public:
void ret()
{
- pop(ARMRegisters::pc);
+ m_assembler.mov_r(ARMRegisters::pc, linkRegister);
}
void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
@@ -547,6 +580,25 @@ public:
m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
}
+ void set8(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ // ARM doesn't have byte registers
+ set32(cond, left, right, dest);
+ }
+
+ void set8(Condition cond, Address left, RegisterID right, RegisterID dest)
+ {
+ // ARM doesn't have byte registers
+ load32(left, ARMRegisters::S1);
+ set32(cond, ARMRegisters::S1, right, dest);
+ }
+
+ void set8(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
+ {
+ // ARM doesn't have byte registers
+ set32(cond, left, right, dest);
+ }
+
void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
{
load32(address, ARMRegisters::S1);
@@ -558,6 +610,12 @@ public:
m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
}
+ void setTest8(Condition cond, Address address, Imm32 mask, RegisterID dest)
+ {
+ // ARM doesn't have byte registers
+ setTest32(cond, address, mask, dest);
+ }
+
void add32(Imm32 imm, RegisterID src, RegisterID dest)
{
m_assembler.add_r(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
@@ -665,6 +723,12 @@ public:
m_assembler.doubleTransfer(true, dest, address.base, address.offset);
}
+ void loadDouble(void* address, FPRegisterID dest)
+ {
+ m_assembler.ldr_un_imm(ARMRegisters::S0, (ARMWord)address);
+ m_assembler.fdtr_u(true, dest, ARMRegisters::S0, 0);
+ }
+
void storeDouble(FPRegisterID src, ImplicitAddress address)
{
m_assembler.doubleTransfer(false, src, address.base, address.offset);
@@ -681,6 +745,18 @@ public:
addDouble(ARMRegisters::SD0, dest);
}
+ void divDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.fdivd_r(dest, dest, src);
+ }
+
+ void divDouble(Address src, FPRegisterID dest)
+ {
+ ASSERT_NOT_REACHED(); // Untested
+ loadDouble(src, ARMRegisters::SD0);
+ divDouble(ARMRegisters::SD0, dest);
+ }
+
void subDouble(FPRegisterID src, FPRegisterID dest)
{
m_assembler.fsubd_r(dest, dest, src);
@@ -709,11 +785,30 @@ public:
m_assembler.fsitod_r(dest, dest);
}
+ void convertInt32ToDouble(Address src, FPRegisterID dest)
+ {
+ ASSERT_NOT_REACHED(); // Untested
+ // flds does not worth the effort here
+ load32(src, ARMRegisters::S1);
+ convertInt32ToDouble(ARMRegisters::S1, dest);
+ }
+
+ void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
+ {
+ ASSERT_NOT_REACHED(); // Untested
+ // flds does not worth the effort here
+ m_assembler.ldr_un_imm(ARMRegisters::S1, (ARMWord)src.m_ptr);
+ m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0);
+ convertInt32ToDouble(ARMRegisters::S1, dest);
+ }
+
Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
{
m_assembler.fcmpd_r(left, right);
m_assembler.fmstat();
- return Jump(m_assembler.jmp(static_cast<ARMAssembler::Condition>(cond)));
+ if (cond & DoubleConditionBitSpecial)
+ m_assembler.cmp_r(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::VS);
+ return Jump(m_assembler.jmp(static_cast<ARMAssembler::Condition>(cond & ~DoubleConditionMask)));
}
// Truncates 'src' to an integer, and places the resulting 'dest'.
@@ -728,6 +823,29 @@ public:
return jump();
}
+ // Convert 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, 0).
+ void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
+ {
+ m_assembler.ftosid_r(ARMRegisters::SD0, src);
+ m_assembler.fmrs_r(dest, ARMRegisters::SD0);
+
+ // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
+ m_assembler.fsitod_r(ARMRegisters::SD0, ARMRegisters::SD0);
+ failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, ARMRegisters::SD0));
+
+ // If the result is zero, it might have been -0.0, and 0.0 equals to -0.0
+ failureCases.append(branchTest32(Zero, dest));
+ }
+
+ void zeroDouble(FPRegisterID srcDest)
+ {
+ m_assembler.mov_r(ARMRegisters::S0, ARMAssembler::getOp2(0));
+ convertInt32ToDouble(ARMRegisters::S0, srcDest);
+ }
+
protected:
ARMAssembler::Condition ARMCondition(Condition cond)
{
@@ -746,11 +864,9 @@ protected:
void prepareCall()
{
- ensureSpace(3 * sizeof(ARMWord), sizeof(ARMWord));
+ ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
- // S0 might be used for parameter passing
- m_assembler.add_r(ARMRegisters::S1, ARMRegisters::pc, ARMAssembler::OP2_IMM | 0x4);
- m_assembler.push_r(ARMRegisters::S1);
+ m_assembler.mov_r(linkRegister, ARMRegisters::pc);
}
void call32(RegisterID base, int32_t offset)
@@ -812,6 +928,6 @@ private:
}
-#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
#endif // MacroAssemblerARM_h
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerARMv7.h b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerARMv7.h
index a549604..532a9cf 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerARMv7.h
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerARMv7.h
@@ -93,13 +93,21 @@ public:
Zero = ARMv7Assembler::ConditionEQ,
NonZero = ARMv7Assembler::ConditionNE
};
-
enum DoubleCondition {
+ // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
DoubleEqual = ARMv7Assembler::ConditionEQ,
+ DoubleNotEqual = ARMv7Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
DoubleGreaterThan = ARMv7Assembler::ConditionGT,
DoubleGreaterThanOrEqual = ARMv7Assembler::ConditionGE,
DoubleLessThan = ARMv7Assembler::ConditionLO,
DoubleLessThanOrEqual = ARMv7Assembler::ConditionLS,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleEqualOrUnordered = ARMv7Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
+ DoubleNotEqualOrUnordered = ARMv7Assembler::ConditionNE,
+ DoubleGreaterThanOrUnordered = ARMv7Assembler::ConditionHI,
+ DoubleGreaterThanOrEqualOrUnordered = ARMv7Assembler::ConditionHS,
+ DoubleLessThanOrUnordered = ARMv7Assembler::ConditionLT,
+ DoubleLessThanOrEqualOrUnordered = ARMv7Assembler::ConditionLE,
};
static const RegisterID stackPointerRegister = ARMRegisters::sp;
@@ -189,14 +197,19 @@ public:
}
}
- void lshift32(Imm32 imm, RegisterID dest)
+ void lshift32(RegisterID shift_amount, RegisterID dest)
{
- m_assembler.lsl(dest, dest, imm.m_value);
+ // Clamp the shift to the range 0..31
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
+ ASSERT(armImm.isValid());
+ m_assembler.ARM_and(dataTempRegister, shift_amount, armImm);
+
+ m_assembler.lsl(dest, dest, dataTempRegister);
}
- void lshift32(RegisterID shift_amount, RegisterID dest)
+ void lshift32(Imm32 imm, RegisterID dest)
{
- m_assembler.lsl(dest, dest, shift_amount);
+ m_assembler.lsl(dest, dest, imm.m_value & 0x1f);
}
void mul32(RegisterID src, RegisterID dest)
@@ -233,12 +246,17 @@ public:
void rshift32(RegisterID shift_amount, RegisterID dest)
{
- m_assembler.asr(dest, dest, shift_amount);
+ // Clamp the shift to the range 0..31
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
+ ASSERT(armImm.isValid());
+ m_assembler.ARM_and(dataTempRegister, shift_amount, armImm);
+
+ m_assembler.asr(dest, dest, dataTempRegister);
}
void rshift32(Imm32 imm, RegisterID dest)
{
- m_assembler.asr(dest, dest, imm.m_value);
+ m_assembler.asr(dest, dest, imm.m_value & 0x1f);
}
void sub32(RegisterID src, RegisterID dest)
@@ -531,6 +549,23 @@ public:
{
m_assembler.vcmp_F64(left, right);
m_assembler.vmrs_APSR_nzcv_FPSCR();
+
+ if (cond == DoubleNotEqual) {
+ // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
+ Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
+ Jump result = makeBranch(ARMv7Assembler::ConditionNE);
+ unordered.link(this);
+ return result;
+ }
+ if (cond == DoubleEqualOrUnordered) {
+ Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
+ Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
+ unordered.link(this);
+ // We get here if either unordered, or equal.
+ Jump result = makeJump();
+ notEqual.link(this);
+ return result;
+ }
return makeBranch(cond);
}
@@ -990,13 +1025,15 @@ public:
protected:
ARMv7Assembler::JmpSrc makeJump()
{
- return m_assembler.b();
+ moveFixedWidthEncoding(Imm32(0), dataTempRegister);
+ return m_assembler.bx(dataTempRegister);
}
ARMv7Assembler::JmpSrc makeBranch(ARMv7Assembler::Condition cond)
{
- m_assembler.it(cond);
- return m_assembler.b();
+ m_assembler.it(cond, true, true);
+ moveFixedWidthEncoding(Imm32(0), dataTempRegister);
+ return m_assembler.bx(dataTempRegister);
}
ARMv7Assembler::JmpSrc makeBranch(Condition cond) { return makeBranch(armV7Condition(cond)); }
ARMv7Assembler::JmpSrc makeBranch(DoubleCondition cond) { return makeBranch(armV7Condition(cond)); }
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerCodeRef.h b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
index 568260a..cae8bf6 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
@@ -37,7 +37,7 @@
// ASSERT_VALID_CODE_POINTER checks that ptr is a non-null pointer, and that it is a valid
// instruction address on the platform (for example, check any alignment requirements).
-#if PLATFORM(ARM_THUMB2)
+#if CPU(ARM_THUMB2)
// ARM/thumb instructions must be 16-bit aligned, but all code pointers to be loaded
// into the processor are decorated with the bottom bit set, indicating that this is
// thumb code (as oposed to 32-bit traditional ARM). The first test checks for both
@@ -69,7 +69,13 @@ public:
template<typename FunctionType>
explicit FunctionPtr(FunctionType* value)
+#if COMPILER(RVCT)
+ // RVTC compiler needs C-style cast as it fails with the following error
+ // Error: #694: reinterpret_cast cannot cast away const or other type qualifiers
+ : m_value((void*)(value))
+#else
: m_value(reinterpret_cast<void*>(value))
+#endif
{
ASSERT_VALID_CODE_POINTER(m_value);
}
@@ -124,7 +130,7 @@ public:
}
explicit MacroAssemblerCodePtr(void* value)
-#if PLATFORM(ARM_THUMB2)
+#if CPU(ARM_THUMB2)
// Decorate the pointer as a thumb code pointer.
: m_value(reinterpret_cast<char*>(value) + 1)
#else
@@ -141,7 +147,7 @@ public:
}
void* executableAddress() const { return m_value; }
-#if PLATFORM(ARM_THUMB2)
+#if CPU(ARM_THUMB2)
// To use this pointer as a data address remove the decoration.
void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return reinterpret_cast<char*>(m_value) - 1; }
#else
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerX86.h b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerX86.h
index 6e96240..ca7c31a 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerX86.h
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerX86.h
@@ -28,7 +28,7 @@
#include <wtf/Platform.h>
-#if ENABLE(ASSEMBLER) && PLATFORM(X86)
+#if ENABLE(ASSEMBLER) && CPU(X86)
#include "MacroAssemblerX86Common.h"
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerX86Common.h b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerX86Common.h
index 5ebefa7..449df86 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerX86Common.h
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerX86Common.h
@@ -36,6 +36,10 @@
namespace JSC {
class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
+ static const int DoubleConditionBitInvert = 0x10;
+ static const int DoubleConditionBitSpecial = 0x20;
+ static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
+
public:
enum Condition {
@@ -56,13 +60,24 @@ public:
};
enum DoubleCondition {
- DoubleEqual = X86Assembler::ConditionE,
+ // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+ DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial,
DoubleNotEqual = X86Assembler::ConditionNE,
DoubleGreaterThan = X86Assembler::ConditionA,
DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
- DoubleLessThan = X86Assembler::ConditionB,
- DoubleLessThanOrEqual = X86Assembler::ConditionBE,
+ DoubleLessThan = X86Assembler::ConditionA | DoubleConditionBitInvert,
+ DoubleLessThanOrEqual = X86Assembler::ConditionAE | DoubleConditionBitInvert,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleEqualOrUnordered = X86Assembler::ConditionE,
+ DoubleNotEqualOrUnordered = X86Assembler::ConditionNE | DoubleConditionBitSpecial,
+ DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert,
+ DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert,
+ DoubleLessThanOrUnordered = X86Assembler::ConditionB,
+ DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE,
};
+ COMPILE_ASSERT(
+ !((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits),
+ DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes);
static const RegisterID stackPointerRegister = X86Registers::esp;
@@ -416,20 +431,35 @@ public:
void convertInt32ToDouble(Address src, FPRegisterID dest)
{
+ ASSERT(isSSE2Present());
m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
}
Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
{
ASSERT(isSSE2Present());
- m_assembler.ucomisd_rr(right, left);
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
- Jump branchDouble(DoubleCondition cond, FPRegisterID left, Address right)
- {
- m_assembler.ucomisd_mr(right.offset, right.base, left);
- return Jump(m_assembler.jCC(x86Condition(cond)));
+ if (cond & DoubleConditionBitInvert)
+ m_assembler.ucomisd_rr(left, right);
+ else
+ m_assembler.ucomisd_rr(right, left);
+
+ if (cond == DoubleEqual) {
+ Jump isUnordered(m_assembler.jp());
+ Jump result = Jump(m_assembler.je());
+ isUnordered.link(this);
+ return result;
+ } else if (cond == DoubleNotEqualOrUnordered) {
+ Jump isUnordered(m_assembler.jp());
+ Jump isEqual(m_assembler.je());
+ isUnordered.link(this);
+ Jump result = jump();
+ isEqual.link(this);
+ return result;
+ }
+
+ ASSERT(!(cond & DoubleConditionBitSpecial));
+ return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits)));
}
// Truncates 'src' to an integer, and places the resulting 'dest'.
@@ -443,6 +473,25 @@ public:
return branch32(Equal, dest, Imm32(0x80000000));
}
+ // Convert 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, 0).
+ void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvttsd2si_rr(src, dest);
+
+ // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
+ failureCases.append(branchTest32(Zero, dest));
+
+ // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
+ convertInt32ToDouble(dest, fpTemp);
+ m_assembler.ucomisd_rr(fpTemp, src);
+ failureCases.append(m_assembler.jp());
+ failureCases.append(m_assembler.jne());
+ }
+
void zeroDouble(FPRegisterID srcDest)
{
ASSERT(isSSE2Present());
@@ -493,7 +542,7 @@ public:
m_assembler.movl_i32r(imm.m_value, dest);
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void move(RegisterID src, RegisterID dest)
{
// Note: on 64-bit this is is a full register move; perhaps it would be
@@ -509,7 +558,8 @@ public:
void swap(RegisterID reg1, RegisterID reg2)
{
- m_assembler.xchgq_rr(reg1, reg2);
+ if (reg1 != reg2)
+ m_assembler.xchgq_rr(reg1, reg2);
}
void signExtend32ToPtr(RegisterID src, RegisterID dest)
@@ -889,18 +939,13 @@ protected:
return static_cast<X86Assembler::Condition>(cond);
}
- X86Assembler::Condition x86Condition(DoubleCondition cond)
- {
- return static_cast<X86Assembler::Condition>(cond);
- }
-
private:
// Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
// x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
friend class MacroAssemblerX86;
-#if PLATFORM(X86)
-#if PLATFORM(MAC)
+#if CPU(X86)
+#if OS(MAC_OS_X)
// All X86 Macs are guaranteed to support at least SSE2,
static bool isSSE2Present()
@@ -908,7 +953,7 @@ private:
return true;
}
-#else // PLATFORM(MAC)
+#else // OS(MAC_OS_X)
enum SSE2CheckState {
NotCheckedSSE2,
@@ -951,8 +996,8 @@ private:
static SSE2CheckState s_sse2CheckState;
-#endif // PLATFORM(MAC)
-#elif !defined(NDEBUG) // PLATFORM(X86)
+#endif // OS(MAC_OS_X)
+#elif !defined(NDEBUG) // CPU(X86)
// On x86-64 we should never be checking for SSE2 in a non-debug build,
// but non debug add this method to keep the asserts above happy.
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerX86_64.h b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerX86_64.h
index 0f95fe6..ec93f8c 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerX86_64.h
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/MacroAssemblerX86_64.h
@@ -28,7 +28,7 @@
#include <wtf/Platform.h>
-#if ENABLE(ASSEMBLER) && PLATFORM(X86_64)
+#if ENABLE(ASSEMBLER) && CPU(X86_64)
#include "MacroAssemblerX86Common.h"
@@ -192,33 +192,6 @@ public:
m_assembler.orq_ir(imm.m_value, dest);
}
- void rshiftPtr(RegisterID shift_amount, RegisterID dest)
- {
- // On x86 we can only shift by ecx; if asked to shift by another register we'll
- // need rejig the shift amount into ecx first, and restore the registers afterwards.
- if (shift_amount != X86Registers::ecx) {
- swap(shift_amount, X86Registers::ecx);
-
- // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
- if (dest == shift_amount)
- m_assembler.sarq_CLr(X86Registers::ecx);
- // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
- else if (dest == X86Registers::ecx)
- m_assembler.sarq_CLr(shift_amount);
- // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
- else
- m_assembler.sarq_CLr(dest);
-
- swap(shift_amount, X86Registers::ecx);
- } else
- m_assembler.sarq_CLr(dest);
- }
-
- void rshiftPtr(Imm32 imm, RegisterID dest)
- {
- m_assembler.sarq_i8r(imm.m_value, dest);
- }
-
void subPtr(RegisterID src, RegisterID dest)
{
m_assembler.subq_rr(src, dest);
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/X86Assembler.h b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/X86Assembler.h
index cbbaaa5..ab3d05f 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/assembler/X86Assembler.h
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/assembler/X86Assembler.h
@@ -28,7 +28,7 @@
#include <wtf/Platform.h>
-#if ENABLE(ASSEMBLER) && (PLATFORM(X86) || PLATFORM(X86_64))
+#if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
#include "AssemblerBuffer.h"
#include <stdint.h>
@@ -50,7 +50,7 @@ namespace X86Registers {
esi,
edi,
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
r8,
r9,
r10,
@@ -118,12 +118,12 @@ private:
OP_XOR_GvEv = 0x33,
OP_CMP_EvGv = 0x39,
OP_CMP_GvEv = 0x3B,
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
PRE_REX = 0x40,
#endif
OP_PUSH_EAX = 0x50,
OP_POP_EAX = 0x58,
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
OP_MOVSXD_GvEv = 0x63,
#endif
PRE_OPERAND_SIZE = 0x66,
@@ -296,7 +296,7 @@ public:
// Arithmetic operations:
-#if !PLATFORM(X86_64)
+#if !CPU(X86_64)
void adcl_im(int imm, void* addr)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
@@ -346,7 +346,7 @@ public:
}
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void addq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_ADD_EvGv, src, dst);
@@ -423,7 +423,7 @@ public:
}
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void andq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_AND_EvGv, src, dst);
@@ -509,7 +509,7 @@ public:
}
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void orq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_OR_EvGv, src, dst);
@@ -575,7 +575,7 @@ public:
}
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void subq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_SUB_EvGv, src, dst);
@@ -641,7 +641,7 @@ public:
}
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void xorq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_XOR_EvGv, src, dst);
@@ -689,7 +689,7 @@ public:
m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst);
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void sarq_CLr(RegisterID dst)
{
m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
@@ -789,7 +789,7 @@ public:
m_formatter.immediate32(imm);
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void cmpq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_CMP_EvGv, src, dst);
@@ -897,7 +897,7 @@ public:
m_formatter.immediate32(imm);
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void testq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_TEST_EvGv, src, dst);
@@ -971,7 +971,7 @@ public:
m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void xchgq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
@@ -1001,7 +1001,7 @@ public:
void movl_mEAX(void* addr)
{
m_formatter.oneByteOp(OP_MOV_EAXOv);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
#else
m_formatter.immediate32(reinterpret_cast<int>(addr));
@@ -1038,14 +1038,14 @@ public:
void movl_EAXm(void* addr)
{
m_formatter.oneByteOp(OP_MOV_OvEAX);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
#else
m_formatter.immediate32(reinterpret_cast<int>(addr));
#endif
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void movq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_MOV_EvGv, src, dst);
@@ -1157,7 +1157,7 @@ public:
{
m_formatter.oneByteOp(OP_LEA, dst, base, offset);
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void leaq_mr(int offset, RegisterID base, RegisterID dst)
{
m_formatter.oneByteOp64(OP_LEA, dst, base, offset);
@@ -1323,7 +1323,7 @@ public:
m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset);
}
-#if !PLATFORM(X86_64)
+#if !CPU(X86_64)
void cvtsi2sd_mr(void* address, XMMRegisterID dst)
{
m_formatter.prefix(PRE_SSE_F2);
@@ -1343,7 +1343,7 @@ public:
m_formatter.twoByteOp(OP2_MOVD_EdVd, (RegisterID)src, dst);
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void movq_rr(XMMRegisterID src, RegisterID dst)
{
m_formatter.prefix(PRE_SSE_66);
@@ -1369,7 +1369,7 @@ public:
m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
}
-#if !PLATFORM(X86_64)
+#if !CPU(X86_64)
void movsd_mr(void* address, XMMRegisterID dst)
{
m_formatter.prefix(PRE_SSE_F2);
@@ -1535,7 +1535,7 @@ public:
static void repatchLoadPtrToLEA(void* where)
{
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
// On x86-64 pointer memory accesses require a 64-bit operand, and as such a REX prefix.
// Skip over the prefix byte.
where = reinterpret_cast<char*>(where) + 1;
@@ -1679,7 +1679,7 @@ private:
memoryModRM(reg, base, index, scale, offset);
}
-#if !PLATFORM(X86_64)
+#if !CPU(X86_64)
void oneByteOp(OneByteOpcodeID opcode, int reg, void* address)
{
m_buffer.ensureSpace(maxInstructionSize);
@@ -1722,7 +1722,7 @@ private:
memoryModRM(reg, base, index, scale, offset);
}
-#if !PLATFORM(X86_64)
+#if !CPU(X86_64)
void twoByteOp(TwoByteOpcodeID opcode, int reg, void* address)
{
m_buffer.ensureSpace(maxInstructionSize);
@@ -1732,7 +1732,7 @@ private:
}
#endif
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
// Quad-word-sized operands:
//
// Used to format 64-bit operantions, planting a REX.w prefix.
@@ -1891,7 +1891,7 @@ private:
static const RegisterID noBase = X86Registers::ebp;
static const RegisterID hasSib = X86Registers::esp;
static const RegisterID noIndex = X86Registers::esp;
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
static const RegisterID noBase2 = X86Registers::r13;
static const RegisterID hasSib2 = X86Registers::r12;
@@ -1967,7 +1967,7 @@ private:
void memoryModRM(int reg, RegisterID base, int offset)
{
// A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
if ((base == hasSib) || (base == hasSib2)) {
#else
if (base == hasSib) {
@@ -1982,7 +1982,7 @@ private:
m_buffer.putIntUnchecked(offset);
}
} else {
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
if (!offset && (base != noBase) && (base != noBase2))
#else
if (!offset && (base != noBase))
@@ -2001,7 +2001,7 @@ private:
void memoryModRM_disp32(int reg, RegisterID base, int offset)
{
// A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
if ((base == hasSib) || (base == hasSib2)) {
#else
if (base == hasSib) {
@@ -2018,7 +2018,7 @@ private:
{
ASSERT(index != noIndex);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
if (!offset && (base != noBase) && (base != noBase2))
#else
if (!offset && (base != noBase))
@@ -2033,7 +2033,7 @@ private:
}
}
-#if !PLATFORM(X86_64)
+#if !CPU(X86_64)
void memoryModRM(int reg, void* address)
{
// noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
@@ -2048,6 +2048,6 @@ private:
} // namespace JSC
-#endif // ENABLE(ASSEMBLER) && PLATFORM(X86)
+#endif // ENABLE(ASSEMBLER) && CPU(X86)
#endif // X86Assembler_h