summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/webkit/JavaScriptCore/assembler
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/webkit/JavaScriptCore/assembler')
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/assembler/ARMAssembler.cpp353
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/assembler/ARMAssembler.h706
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/assembler/ARMv7Assembler.h41
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/assembler/AbstractMacroAssembler.h322
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/assembler/AssemblerBuffer.h17
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h305
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/assembler/LinkBuffer.h195
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssembler.h4
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerARM.h797
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerARMv7.h19
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerCodeRef.h10
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerX86.h18
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerX86_64.h25
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/assembler/RepatchBuffer.h136
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/assembler/X86Assembler.h44
15 files changed, 2684 insertions, 308 deletions
diff --git a/src/3rdparty/webkit/JavaScriptCore/assembler/ARMAssembler.cpp b/src/3rdparty/webkit/JavaScriptCore/assembler/ARMAssembler.cpp
new file mode 100644
index 0000000..dafc482
--- /dev/null
+++ b/src/3rdparty/webkit/JavaScriptCore/assembler/ARMAssembler.cpp
@@ -0,0 +1,353 @@
+/*
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && PLATFORM(ARM)
+
+#include "ARMAssembler.h"
+
+namespace JSC {
+
+// Patching helpers
+
+ARMWord* ARMAssembler::getLdrImmAddress(ARMWord* insn, uint32_t* constPool)
+{
+ // Must be an ldr ..., [pc +/- imm]
+ ASSERT((*insn & 0x0f7f0000) == 0x051f0000);
+
+ if (constPool && (*insn & 0x1))
+ return reinterpret_cast<ARMWord*>(constPool + ((*insn & SDT_OFFSET_MASK) >> 1));
+
+ ARMWord addr = reinterpret_cast<ARMWord>(insn) + 2 * sizeof(ARMWord);
+ if (*insn & DT_UP)
+ return reinterpret_cast<ARMWord*>(addr + (*insn & SDT_OFFSET_MASK));
+ else
+ return reinterpret_cast<ARMWord*>(addr - (*insn & SDT_OFFSET_MASK));
+}
+
+void ARMAssembler::linkBranch(void* code, JmpSrc from, void* to)
+{
+ ARMWord* insn = reinterpret_cast<ARMWord*>(code) + (from.m_offset / sizeof(ARMWord));
+
+ if (!from.m_latePatch) {
+ int diff = reinterpret_cast<ARMWord*>(to) - reinterpret_cast<ARMWord*>(insn + 2);
+
+ if ((diff <= BOFFSET_MAX && diff >= BOFFSET_MIN)) {
+ *insn = B | getConditionalField(*insn) | (diff & BRANCH_MASK);
+ ExecutableAllocator::cacheFlush(insn, sizeof(ARMWord));
+ return;
+ }
+ }
+ ARMWord* addr = getLdrImmAddress(insn);
+ *addr = reinterpret_cast<ARMWord>(to);
+ ExecutableAllocator::cacheFlush(addr, sizeof(ARMWord));
+}
+
+void ARMAssembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
+{
+ ARMWord *ldr = reinterpret_cast<ARMWord*>(loadAddr);
+ ARMWord diff = reinterpret_cast<ARMWord*>(constPoolAddr) - ldr;
+ ARMWord index = (*ldr & 0xfff) >> 1;
+
+ ASSERT(diff >= 1);
+ if (diff >= 2 || index > 0) {
+ diff = (diff + index - 2) * sizeof(ARMWord);
+ ASSERT(diff <= 0xfff);
+ *ldr = (*ldr & ~0xfff) | diff;
+ } else
+ *ldr = (*ldr & ~(0xfff | ARMAssembler::DT_UP)) | sizeof(ARMWord);
+}
+
+// Handle immediates
+
+ARMWord ARMAssembler::getOp2(ARMWord imm)
+{
+ int rol;
+
+ if (imm <= 0xff)
+ return OP2_IMM | imm;
+
+ if ((imm & 0xff000000) == 0) {
+ imm <<= 8;
+ rol = 8;
+ }
+ else {
+ imm = (imm << 24) | (imm >> 8);
+ rol = 0;
+ }
+
+ if ((imm & 0xff000000) == 0) {
+ imm <<= 8;
+ rol += 4;
+ }
+
+ if ((imm & 0xf0000000) == 0) {
+ imm <<= 4;
+ rol += 2;
+ }
+
+ if ((imm & 0xc0000000) == 0) {
+ imm <<= 2;
+ rol += 1;
+ }
+
+ if ((imm & 0x00ffffff) == 0)
+ return OP2_IMM | (imm >> 24) | (rol << 8);
+
+ return 0;
+}
+
+int ARMAssembler::genInt(int reg, ARMWord imm, bool positive)
+{
+ // Step1: Search a non-immediate part
+ ARMWord mask;
+ ARMWord imm1;
+ ARMWord imm2;
+ int rol;
+
+ mask = 0xff000000;
+ rol = 8;
+ while(1) {
+ if ((imm & mask) == 0) {
+ imm = (imm << rol) | (imm >> (32 - rol));
+ rol = 4 + (rol >> 1);
+ break;
+ }
+ rol += 2;
+ mask >>= 2;
+ if (mask & 0x3) {
+ // rol 8
+ imm = (imm << 8) | (imm >> 24);
+ mask = 0xff00;
+ rol = 24;
+ while (1) {
+ if ((imm & mask) == 0) {
+ imm = (imm << rol) | (imm >> (32 - rol));
+ rol = (rol >> 1) - 8;
+ break;
+ }
+ rol += 2;
+ mask >>= 2;
+ if (mask & 0x3)
+ return 0;
+ }
+ break;
+ }
+ }
+
+ ASSERT((imm & 0xff) == 0);
+
+ if ((imm & 0xff000000) == 0) {
+ imm1 = OP2_IMM | ((imm >> 16) & 0xff) | (((rol + 4) & 0xf) << 8);
+ imm2 = OP2_IMM | ((imm >> 8) & 0xff) | (((rol + 8) & 0xf) << 8);
+ } else if (imm & 0xc0000000) {
+ imm1 = OP2_IMM | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
+ imm <<= 8;
+ rol += 4;
+
+ if ((imm & 0xff000000) == 0) {
+ imm <<= 8;
+ rol += 4;
+ }
+
+ if ((imm & 0xf0000000) == 0) {
+ imm <<= 4;
+ rol += 2;
+ }
+
+ if ((imm & 0xc0000000) == 0) {
+ imm <<= 2;
+ rol += 1;
+ }
+
+ if ((imm & 0x00ffffff) == 0)
+ imm2 = OP2_IMM | (imm >> 24) | ((rol & 0xf) << 8);
+ else
+ return 0;
+ } else {
+ if ((imm & 0xf0000000) == 0) {
+ imm <<= 4;
+ rol += 2;
+ }
+
+ if ((imm & 0xc0000000) == 0) {
+ imm <<= 2;
+ rol += 1;
+ }
+
+ imm1 = OP2_IMM | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
+ imm <<= 8;
+ rol += 4;
+
+ if ((imm & 0xf0000000) == 0) {
+ imm <<= 4;
+ rol += 2;
+ }
+
+ if ((imm & 0xc0000000) == 0) {
+ imm <<= 2;
+ rol += 1;
+ }
+
+ if ((imm & 0x00ffffff) == 0)
+ imm2 = OP2_IMM | (imm >> 24) | ((rol & 0xf) << 8);
+ else
+ return 0;
+ }
+
+ if (positive) {
+ mov_r(reg, imm1);
+ orr_r(reg, reg, imm2);
+ } else {
+ mvn_r(reg, imm1);
+ bic_r(reg, reg, imm2);
+ }
+
+ return 1;
+}
+
+ARMWord ARMAssembler::getImm(ARMWord imm, int tmpReg, bool invert)
+{
+ ARMWord tmp;
+
+ // Do it by 1 instruction
+ tmp = getOp2(imm);
+ if (tmp)
+ return tmp;
+
+ tmp = getOp2(~imm);
+ if (tmp) {
+ if (invert)
+ return tmp | OP2_INV_IMM;
+ mvn_r(tmpReg, tmp);
+ return tmpReg;
+ }
+
+ // Do it by 2 instruction
+ if (genInt(tmpReg, imm, true))
+ return tmpReg;
+ if (genInt(tmpReg, ~imm, false))
+ return tmpReg;
+
+ ldr_imm(tmpReg, imm);
+ return tmpReg;
+}
+
+void ARMAssembler::moveImm(ARMWord imm, int dest)
+{
+ ARMWord tmp;
+
+ // Do it by 1 instruction
+ tmp = getOp2(imm);
+ if (tmp) {
+ mov_r(dest, tmp);
+ return;
+ }
+
+ tmp = getOp2(~imm);
+ if (tmp) {
+ mvn_r(dest, tmp);
+ return;
+ }
+
+ // Do it by 2 instruction
+ if (genInt(dest, imm, true))
+ return;
+ if (genInt(dest, ~imm, false))
+ return;
+
+ ldr_imm(dest, imm);
+}
+
+// Memory load/store helpers
+
+void ARMAssembler::dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, int32_t offset)
+{
+ if (offset >= 0) {
+ if (offset <= 0xfff)
+ dtr_u(isLoad, srcDst, base, offset);
+ else if (offset <= 0xfffff) {
+ add_r(ARM::S0, base, OP2_IMM | (offset >> 12) | (10 << 8));
+ dtr_u(isLoad, srcDst, ARM::S0, offset & 0xfff);
+ } else {
+ ARMWord reg = getImm(offset, ARM::S0);
+ dtr_ur(isLoad, srcDst, base, reg);
+ }
+ } else {
+ offset = -offset;
+ if (offset <= 0xfff)
+ dtr_d(isLoad, srcDst, base, offset);
+ else if (offset <= 0xfffff) {
+ sub_r(ARM::S0, base, OP2_IMM | (offset >> 12) | (10 << 8));
+ dtr_d(isLoad, srcDst, ARM::S0, offset & 0xfff);
+ } else {
+ ARMWord reg = getImm(offset, ARM::S0);
+ dtr_dr(isLoad, srcDst, base, reg);
+ }
+ }
+}
+
+void ARMAssembler::baseIndexTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
+{
+ ARMWord op2;
+
+ ASSERT(scale >= 0 && scale <= 3);
+ op2 = lsl(index, scale);
+
+ if (offset >= 0 && offset <= 0xfff) {
+ add_r(ARM::S0, base, op2);
+ dtr_u(isLoad, srcDst, ARM::S0, offset);
+ return;
+ }
+ if (offset <= 0 && offset >= -0xfff) {
+ add_r(ARM::S0, base, op2);
+ dtr_d(isLoad, srcDst, ARM::S0, -offset);
+ return;
+ }
+
+ moveImm(offset, ARM::S0);
+ add_r(ARM::S0, ARM::S0, op2);
+ dtr_ur(isLoad, srcDst, base, ARM::S0);
+}
+
+void* ARMAssembler::executableCopy(ExecutablePool* allocator)
+{
+ char* data = reinterpret_cast<char*>(m_buffer.executableCopy(allocator));
+
+ for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) {
+ ARMWord* ldrAddr = reinterpret_cast<ARMWord*>(data + *iter);
+ ARMWord* offset = getLdrImmAddress(ldrAddr);
+ if (*offset != 0xffffffff)
+ linkBranch(data, JmpSrc(*iter), data + *offset);
+ }
+
+ return data;
+}
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM)
diff --git a/src/3rdparty/webkit/JavaScriptCore/assembler/ARMAssembler.h b/src/3rdparty/webkit/JavaScriptCore/assembler/ARMAssembler.h
new file mode 100644
index 0000000..d6bb43e
--- /dev/null
+++ b/src/3rdparty/webkit/JavaScriptCore/assembler/ARMAssembler.h
@@ -0,0 +1,706 @@
+/*
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ARMAssembler_h
+#define ARMAssembler_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER) && PLATFORM(ARM)
+
+#include "AssemblerBufferWithConstantPool.h"
+#include <wtf/Assertions.h>
+namespace JSC {
+
+typedef uint32_t ARMWord;
+
+namespace ARM {
+ typedef enum {
+ r0 = 0,
+ r1,
+ r2,
+ r3,
+ S0 = r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ r8,
+ S1 = r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ sp = r13,
+ r14,
+ lr = r14,
+ r15,
+ pc = r15
+ } RegisterID;
+
+ typedef enum {
+ fp0 //FIXME
+ } FPRegisterID;
+} // namespace ARM
+
+ class ARMAssembler {
+ public:
+ typedef ARM::RegisterID RegisterID;
+ typedef ARM::FPRegisterID FPRegisterID;
+ typedef AssemblerBufferWithConstantPool<2048, 4, 4, ARMAssembler> ARMBuffer;
+ typedef WTF::SegmentedVector<int, 64> Jumps;
+
+ ARMAssembler() { }
+
+ // ARM conditional constants
+ typedef enum {
+ EQ = 0x00000000, // Zero
+ NE = 0x10000000, // Non-zero
+ CS = 0x20000000,
+ CC = 0x30000000,
+ MI = 0x40000000,
+ PL = 0x50000000,
+ VS = 0x60000000,
+ VC = 0x70000000,
+ HI = 0x80000000,
+ LS = 0x90000000,
+ GE = 0xa0000000,
+ LT = 0xb0000000,
+ GT = 0xc0000000,
+ LE = 0xd0000000,
+ AL = 0xe0000000
+ } Condition;
+
+ // ARM instruction constants
+ enum {
+ AND = (0x0 << 21),
+ EOR = (0x1 << 21),
+ SUB = (0x2 << 21),
+ RSB = (0x3 << 21),
+ ADD = (0x4 << 21),
+ ADC = (0x5 << 21),
+ SBC = (0x6 << 21),
+ RSC = (0x7 << 21),
+ TST = (0x8 << 21),
+ TEQ = (0x9 << 21),
+ CMP = (0xa << 21),
+ CMN = (0xb << 21),
+ ORR = (0xc << 21),
+ MOV = (0xd << 21),
+ BIC = (0xe << 21),
+ MVN = (0xf << 21),
+ MUL = 0x00000090,
+ MULL = 0x00c00090,
+ DTR = 0x05000000,
+ LDRH = 0x00100090,
+ STRH = 0x00000090,
+ STMDB = 0x09200000,
+ LDMIA = 0x08b00000,
+ B = 0x0a000000,
+ BL = 0x0b000000,
+#if ARM_ARCH_VERSION >= 5
+ CLZ = 0x016f0f10,
+ BKPT = 0xe120070,
+#endif
+ };
+
+ enum {
+ OP2_IMM = (1 << 25),
+ OP2_IMMh = (1 << 22),
+ OP2_INV_IMM = (1 << 26),
+ SET_CC = (1 << 20),
+ OP2_OFSREG = (1 << 25),
+ DT_UP = (1 << 23),
+ DT_WB = (1 << 21),
+ // This flag is inlcuded in LDR and STR
+ DT_PRE = (1 << 24),
+ HDT_UH = (1 << 5),
+ DT_LOAD = (1 << 20),
+ };
+
+ // Masks of ARM instructions
+ enum {
+ BRANCH_MASK = 0x00ffffff,
+ NONARM = 0xf0000000,
+ SDT_MASK = 0x0c000000,
+ SDT_OFFSET_MASK = 0xfff,
+ };
+
+ enum {
+ BOFFSET_MIN = -0x00800000,
+ BOFFSET_MAX = 0x007fffff,
+ SDT = 0x04000000,
+ };
+
+ enum {
+ padForAlign8 = 0x00,
+ padForAlign16 = 0x0000,
+ padForAlign32 = 0xee120070,
+ };
+
+ class JmpSrc {
+ friend class ARMAssembler;
+ public:
+ JmpSrc()
+ : m_offset(-1)
+ , m_latePatch(false)
+ {
+ }
+
+ void enableLatePatch() { m_latePatch = true; }
+ private:
+ JmpSrc(int offset)
+ : m_offset(offset)
+ , m_latePatch(false)
+ {
+ }
+
+ int m_offset : 31;
+ int m_latePatch : 1;
+ };
+
+ class JmpDst {
+ friend class ARMAssembler;
+ public:
+ JmpDst()
+ : m_offset(-1)
+ , m_used(false)
+ {
+ }
+
+ bool isUsed() const { return m_used; }
+ void used() { m_used = true; }
+ private:
+ JmpDst(int offset)
+ : m_offset(offset)
+ , m_used(false)
+ {
+ ASSERT(m_offset == offset);
+ }
+
+ int m_offset : 31;
+ int m_used : 1;
+ };
+
+ // Instruction formating
+
+ void emitInst(ARMWord op, int rd, int rn, ARMWord op2)
+ {
+ ASSERT ( ((op2 & ~OP2_IMM) <= 0xfff) || (((op2 & ~OP2_IMMh) <= 0xfff)) );
+ m_buffer.putInt(op | RN(rn) | RD(rd) | op2);
+ }
+
+ void and_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | AND, rd, rn, op2);
+ }
+
+ void ands_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | AND | SET_CC, rd, rn, op2);
+ }
+
+ void eor_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | EOR, rd, rn, op2);
+ }
+
+ void eors_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | EOR | SET_CC, rd, rn, op2);
+ }
+
+ void sub_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | SUB, rd, rn, op2);
+ }
+
+ void subs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | SUB | SET_CC, rd, rn, op2);
+ }
+
+ void rsb_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | RSB, rd, rn, op2);
+ }
+
+ void rsbs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | RSB | SET_CC, rd, rn, op2);
+ }
+
+ void add_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | ADD, rd, rn, op2);
+ }
+
+ void adds_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | ADD | SET_CC, rd, rn, op2);
+ }
+
+ void adc_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | ADC, rd, rn, op2);
+ }
+
+ void adcs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | ADC | SET_CC, rd, rn, op2);
+ }
+
+ void sbc_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | SBC, rd, rn, op2);
+ }
+
+ void sbcs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | SBC | SET_CC, rd, rn, op2);
+ }
+
+ void rsc_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | RSC, rd, rn, op2);
+ }
+
+ void rscs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | RSC | SET_CC, rd, rn, op2);
+ }
+
+ void tst_r(int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | TST | SET_CC, 0, rn, op2);
+ }
+
+ void teq_r(int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | TEQ | SET_CC, 0, rn, op2);
+ }
+
+ void cmp_r(int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | CMP | SET_CC, 0, rn, op2);
+ }
+
+ void orr_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | ORR, rd, rn, op2);
+ }
+
+ void orrs_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | ORR | SET_CC, rd, rn, op2);
+ }
+
+ void mov_r(int rd, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | MOV, rd, ARM::r0, op2);
+ }
+
+ void movs_r(int rd, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | MOV | SET_CC, rd, ARM::r0, op2);
+ }
+
+ void bic_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | BIC, rd, rn, op2);
+ }
+
+ void bics_r(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | BIC | SET_CC, rd, rn, op2);
+ }
+
+ void mvn_r(int rd, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | MVN, rd, ARM::r0, op2);
+ }
+
+ void mvns_r(int rd, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | MVN | SET_CC, rd, ARM::r0, op2);
+ }
+
+ void mul_r(int rd, int rn, int rm, Condition cc = AL)
+ {
+ m_buffer.putInt(static_cast<ARMWord>(cc) | MUL | RN(rd) | RS(rn) | RM(rm));
+ }
+
+ void muls_r(int rd, int rn, int rm, Condition cc = AL)
+ {
+ m_buffer.putInt(static_cast<ARMWord>(cc) | MUL | SET_CC | RN(rd) | RS(rn) | RM(rm));
+ }
+
+ void mull_r(int rdhi, int rdlo, int rn, int rm, Condition cc = AL)
+ {
+ m_buffer.putInt(static_cast<ARMWord>(cc) | MULL | RN(rdhi) | RD(rdlo) | RS(rn) | RM(rm));
+ }
+
+ void ldr_imm(int rd, ARMWord imm, Condition cc = AL)
+ {
+ m_buffer.putIntWithConstantInt(static_cast<ARMWord>(cc) | DTR | DT_LOAD | DT_UP | RN(ARM::pc) | RD(rd), imm, true);
+ }
+
+ void ldr_un_imm(int rd, ARMWord imm, Condition cc = AL)
+ {
+ m_buffer.putIntWithConstantInt(static_cast<ARMWord>(cc) | DTR | DT_LOAD | DT_UP | RN(ARM::pc) | RD(rd), imm);
+ }
+
+ void dtr_u(bool isLoad, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | DTR | (isLoad ? DT_LOAD : 0) | DT_UP, rd, rb, op2);
+ }
+
+ void dtr_ur(bool isLoad, int rd, int rb, int rm, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | DTR | (isLoad ? DT_LOAD : 0) | DT_UP | OP2_OFSREG, rd, rb, rm);
+ }
+
+ void dtr_d(bool isLoad, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | DTR | (isLoad ? DT_LOAD : 0), rd, rb, op2);
+ }
+
+ void dtr_dr(bool isLoad, int rd, int rb, int rm, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | DTR | (isLoad ? DT_LOAD : 0) | OP2_OFSREG, rd, rb, rm);
+ }
+
+ void ldrh_r(int rd, int rn, int rm, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | LDRH | HDT_UH | DT_UP | DT_PRE, rd, rn, rm);
+ }
+
+ void ldrh_d(int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | LDRH | HDT_UH | DT_PRE, rd, rb, op2);
+ }
+
+ void ldrh_u(int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | LDRH | HDT_UH | DT_UP | DT_PRE, rd, rb, op2);
+ }
+
+ void strh_r(int rn, int rm, int rd, Condition cc = AL)
+ {
+ emitInst(static_cast<ARMWord>(cc) | STRH | HDT_UH | DT_UP | DT_PRE, rd, rn, rm);
+ }
+
+ void push_r(int reg, Condition cc = AL)
+ {
+ ASSERT(ARMWord(reg) <= 0xf);
+ m_buffer.putInt(cc | DTR | DT_WB | RN(ARM::sp) | RD(reg) | 0x4);
+ }
+
+ void pop_r(int reg, Condition cc = AL)
+ {
+ ASSERT(ARMWord(reg) <= 0xf);
+ m_buffer.putInt(cc | (DTR ^ DT_PRE) | DT_LOAD | DT_UP | RN(ARM::sp) | RD(reg) | 0x4);
+ }
+
+ inline void poke_r(int reg, Condition cc = AL)
+ {
+ dtr_d(false, ARM::sp, 0, reg, cc);
+ }
+
+ inline void peek_r(int reg, Condition cc = AL)
+ {
+ dtr_u(true, reg, ARM::sp, 0, cc);
+ }
+
+#if ARM_ARCH_VERSION >= 5
+ void clz_r(int rd, int rm, Condition cc = AL)
+ {
+ m_buffer.putInt(static_cast<ARMWord>(cc) | CLZ | RD(rd) | RM(rm));
+ }
+#endif
+
+ void bkpt(ARMWord value)
+ {
+#if ARM_ARCH_VERSION >= 5
+ m_buffer.putInt(BKPT | ((value & 0xff0) << 4) | (value & 0xf));
+#else
+ // Cannot access to Zero memory address
+ dtr_dr(true, ARM::S0, ARM::S0, ARM::S0);
+#endif
+ }
+
+ static ARMWord lsl(int reg, ARMWord value)
+ {
+ ASSERT(reg <= ARM::pc);
+ ASSERT(value <= 0x1f);
+ return reg | (value << 7) | 0x00;
+ }
+
+ static ARMWord lsr(int reg, ARMWord value)
+ {
+ ASSERT(reg <= ARM::pc);
+ ASSERT(value <= 0x1f);
+ return reg | (value << 7) | 0x20;
+ }
+
+ static ARMWord asr(int reg, ARMWord value)
+ {
+ ASSERT(reg <= ARM::pc);
+ ASSERT(value <= 0x1f);
+ return reg | (value << 7) | 0x40;
+ }
+
+ static ARMWord lsl_r(int reg, int shiftReg)
+ {
+ ASSERT(reg <= ARM::pc);
+ ASSERT(shiftReg <= ARM::pc);
+ return reg | (shiftReg << 8) | 0x10;
+ }
+
+ static ARMWord lsr_r(int reg, int shiftReg)
+ {
+ ASSERT(reg <= ARM::pc);
+ ASSERT(shiftReg <= ARM::pc);
+ return reg | (shiftReg << 8) | 0x30;
+ }
+
+ static ARMWord asr_r(int reg, int shiftReg)
+ {
+ ASSERT(reg <= ARM::pc);
+ ASSERT(shiftReg <= ARM::pc);
+ return reg | (shiftReg << 8) | 0x50;
+ }
+
+ // General helpers
+
+ int size()
+ {
+ return m_buffer.size();
+ }
+
+ void ensureSpace(int insnSpace, int constSpace)
+ {
+ m_buffer.ensureSpace(insnSpace, constSpace);
+ }
+
+ JmpDst label()
+ {
+ return JmpDst(m_buffer.size());
+ }
+
+ JmpDst align(int alignment)
+ {
+ while (!m_buffer.isAligned(alignment))
+ mov_r(ARM::r0, ARM::r0);
+
+ return label();
+ }
+
+ JmpSrc jmp(Condition cc = AL)
+ {
+ int s = size();
+ ldr_un_imm(ARM::pc, 0xffffffff, cc);
+ m_jumps.append(s);
+ return JmpSrc(s);
+ }
+
+ void* executableCopy(ExecutablePool* allocator);
+
+ // Patching helpers
+
+ static ARMWord* getLdrImmAddress(ARMWord* insn, uint32_t* constPool = 0);
+ static void linkBranch(void* code, JmpSrc from, void* to);
+
+ static void patchPointerInternal(intptr_t from, void* to)
+ {
+ ARMWord* insn = reinterpret_cast<ARMWord*>(from);
+ ARMWord* addr = getLdrImmAddress(insn);
+ *addr = reinterpret_cast<ARMWord>(to);
+ ExecutableAllocator::cacheFlush(addr, sizeof(ARMWord));
+ }
+
+ static ARMWord patchConstantPoolLoad(ARMWord load, ARMWord value)
+ {
+ value = (value << 1) + 1;
+ ASSERT(!(value & ~0xfff));
+ return (load & ~0xfff) | value;
+ }
+
+ static void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
+
+ // Patch pointers
+
+ static void linkPointer(void* code, JmpDst from, void* to)
+ {
+ patchPointerInternal(reinterpret_cast<intptr_t>(code) + from.m_offset, to);
+ }
+
+ static void repatchInt32(void* from, int32_t to)
+ {
+ patchPointerInternal(reinterpret_cast<intptr_t>(from), reinterpret_cast<void*>(to));
+ }
+
+ static void repatchPointer(void* from, void* to)
+ {
+ patchPointerInternal(reinterpret_cast<intptr_t>(from), to);
+ }
+
+ static void repatchLoadPtrToLEA(void* from)
+ {
+ // On arm, this is a patch from LDR to ADD. It is restricted conversion,
+ // from special case to special case, altough enough for its purpose
+ ARMWord* insn = reinterpret_cast<ARMWord*>(from);
+ ASSERT((*insn & 0x0ff00f00) == 0x05900000);
+
+ *insn = (*insn & 0xf00ff0ff) | 0x02800000;
+ ExecutableAllocator::cacheFlush(insn, sizeof(ARMWord));
+ }
+
+ // Linkers
+
+ void linkJump(JmpSrc from, JmpDst to)
+ {
+ ARMWord* insn = reinterpret_cast<ARMWord*>(m_buffer.data()) + (from.m_offset / sizeof(ARMWord));
+ *getLdrImmAddress(insn, m_buffer.poolAddress()) = static_cast<ARMWord>(to.m_offset);
+ }
+
+ static void linkJump(void* code, JmpSrc from, void* to)
+ {
+ linkBranch(code, from, to);
+ }
+
+ static void relinkJump(void* from, void* to)
+ {
+ patchPointerInternal(reinterpret_cast<intptr_t>(from) - sizeof(ARMWord), to);
+ }
+
+ static void linkCall(void* code, JmpSrc from, void* to)
+ {
+ linkBranch(code, from, to);
+ }
+
+ static void relinkCall(void* from, void* to)
+ {
+ relinkJump(from, to);
+ }
+
+ // Address operations
+
+ static void* getRelocatedAddress(void* code, JmpSrc jump)
+ {
+ return reinterpret_cast<void*>(reinterpret_cast<ARMWord*>(code) + jump.m_offset / sizeof(ARMWord) + 1);
+ }
+
+ static void* getRelocatedAddress(void* code, JmpDst label)
+ {
+ return reinterpret_cast<void*>(reinterpret_cast<ARMWord*>(code) + label.m_offset / sizeof(ARMWord));
+ }
+
+ // Address differences
+
+ static int getDifferenceBetweenLabels(JmpDst from, JmpSrc to)
+ {
+ return (to.m_offset + sizeof(ARMWord)) - from.m_offset;
+ }
+
+ static int getDifferenceBetweenLabels(JmpDst from, JmpDst to)
+ {
+ return to.m_offset - from.m_offset;
+ }
+
+ static unsigned getCallReturnOffset(JmpSrc call)
+ {
+ return call.m_offset + sizeof(ARMWord);
+ }
+
+ // Handle immediates
+
+ static ARMWord getOp2Byte(ARMWord imm)
+ {
+ ASSERT(imm <= 0xff);
+ return OP2_IMMh | (imm & 0x0f) | ((imm & 0xf0) << 4) ;
+ }
+
+ static ARMWord getOp2(ARMWord imm);
+ ARMWord getImm(ARMWord imm, int tmpReg, bool invert = false);
+ void moveImm(ARMWord imm, int dest);
+
+ // Memory load/store helpers
+
+ void dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, int32_t offset);
+ void baseIndexTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset);
+
+ // Constant pool hnadlers
+
+ static ARMWord placeConstantPoolBarrier(int offset)
+ {
+ offset = (offset - sizeof(ARMWord)) >> 2;
+ ASSERT((offset <= BOFFSET_MAX && offset >= BOFFSET_MIN));
+ return AL | B | (offset & BRANCH_MASK);
+ }
+
+ private:
+ ARMWord RM(int reg)
+ {
+ ASSERT(reg <= ARM::pc);
+ return reg;
+ }
+
+ ARMWord RS(int reg)
+ {
+ ASSERT(reg <= ARM::pc);
+ return reg << 8;
+ }
+
+ ARMWord RD(int reg)
+ {
+ ASSERT(reg <= ARM::pc);
+ return reg << 12;
+ }
+
+ ARMWord RN(int reg)
+ {
+ ASSERT(reg <= ARM::pc);
+ return reg << 16;
+ }
+
+ static ARMWord getConditionalField(ARMWord i)
+ {
+ return i & 0xf0000000;
+ }
+
+ int genInt(int reg, ARMWord imm, bool positive);
+
+ ARMBuffer m_buffer;
+ Jumps m_jumps;
+ };
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && PLATFORM(ARM)
+
+#endif // ARMAssembler_h
diff --git a/src/3rdparty/webkit/JavaScriptCore/assembler/ARMv7Assembler.h b/src/3rdparty/webkit/JavaScriptCore/assembler/ARMv7Assembler.h
index 9745d6d..f7e2fb4 100644
--- a/src/3rdparty/webkit/JavaScriptCore/assembler/ARMv7Assembler.h
+++ b/src/3rdparty/webkit/JavaScriptCore/assembler/ARMv7Assembler.h
@@ -442,6 +442,7 @@ public:
{
}
+ void enableLatePatch() { }
private:
JmpSrc(int offset)
: m_offset(offset)
@@ -1528,51 +1529,51 @@ public:
ASSERT(from.m_offset != -1);
ASSERT(reinterpret_cast<intptr_t>(to) & 1);
- patchPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset) - 1, to);
+ setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset) - 1, to);
}
- static void patchPointer(void* code, JmpDst where, void* value)
+ static void linkPointer(void* code, JmpDst where, void* value)
{
- patchPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
+ setPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
}
static void relinkJump(void* from, void* to)
{
- ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<uint16_t*>(from) - 2, 2 * sizeof(uint16_t));
-
ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
ASSERT(!(reinterpret_cast<intptr_t>(to) & 1));
intptr_t relative = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
linkWithOffset(reinterpret_cast<uint16_t*>(from), relative);
+
+ ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(from) - 2, 2 * sizeof(uint16_t));
}
static void relinkCall(void* from, void* to)
{
- ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<uint16_t*>(from) - 5, 4 * sizeof(uint16_t));
-
ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
ASSERT(reinterpret_cast<intptr_t>(to) & 1);
- patchPointer(reinterpret_cast<uint16_t*>(from) - 1, to);
+ setPointer(reinterpret_cast<uint16_t*>(from) - 1, to);
+
+ ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 4 * sizeof(uint16_t));
}
static void repatchInt32(void* where, int32_t value)
{
- ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<uint16_t*>(where) - 4, 4 * sizeof(uint16_t));
-
ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
- patchInt32(where, value);
+ setInt32(where, value);
+
+ ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(where) - 4, 4 * sizeof(uint16_t));
}
static void repatchPointer(void* where, void* value)
{
- ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<uint16_t*>(where) - 4, 4 * sizeof(uint16_t));
-
ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
- patchPointer(where, value);
+ setPointer(where, value);
+
+ ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(where) - 4, 4 * sizeof(uint16_t));
}
static void repatchLoadPtrToLEA(void* where)
@@ -1582,8 +1583,8 @@ public:
uint16_t* loadOp = reinterpret_cast<uint16_t*>(where) + 4;
ASSERT((*loadOp & 0xfff0) == OP_LDR_reg_T2);
- ExecutableAllocator::MakeWritable unprotect(loadOp, sizeof(uint16_t));
*loadOp = OP_ADD_reg_T3 | (*loadOp & 0xf);
+ ExecutableAllocator::cacheFlush(loadOp, sizeof(uint16_t));
}
private:
@@ -1610,12 +1611,10 @@ private:
m_formatter.vfpOp(0x0b00ed00 | offset | (up << 7) | (isLoad << 4) | doubleRegisterMask(rd, 6, 28) | rn);
}
- static void patchInt32(void* code, uint32_t value)
+ static void setInt32(void* code, uint32_t value)
{
uint16_t* location = reinterpret_cast<uint16_t*>(code);
- ExecutableAllocator::MakeWritable unprotect(location - 4, 4 * sizeof(uint16_t));
-
uint16_t lo16 = value;
uint16_t hi16 = value >> 16;
@@ -1623,11 +1622,13 @@ private:
spliceLo11(location - 3, lo16);
spliceHi5(location - 2, hi16);
spliceLo11(location - 1, hi16);
+
+ ExecutableAllocator::cacheFlush(location - 4, 4 * sizeof(uint16_t));
}
- static void patchPointer(void* code, void* value)
+ static void setPointer(void* code, void* value)
{
- patchInt32(code, reinterpret_cast<uint32_t>(value));
+ setInt32(code, reinterpret_cast<uint32_t>(value));
}
// Linking & patching:
diff --git a/src/3rdparty/webkit/JavaScriptCore/assembler/AbstractMacroAssembler.h b/src/3rdparty/webkit/JavaScriptCore/assembler/AbstractMacroAssembler.h
index cf94677..95b5afc 100644
--- a/src/3rdparty/webkit/JavaScriptCore/assembler/AbstractMacroAssembler.h
+++ b/src/3rdparty/webkit/JavaScriptCore/assembler/AbstractMacroAssembler.h
@@ -35,22 +35,20 @@
#if ENABLE(ASSEMBLER)
-// FIXME: keep transitioning this out into MacroAssemblerX86_64.
-#if PLATFORM(X86_64)
-#define REPTACH_OFFSET_CALL_R11 3
-#endif
-
namespace JSC {
+class LinkBuffer;
+class RepatchBuffer;
+
template <class AssemblerType>
class AbstractMacroAssembler {
public:
+ typedef AssemblerType AssemblerType_T;
+
typedef MacroAssemblerCodePtr CodePtr;
typedef MacroAssemblerCodeRef CodeRef;
class Jump;
- class LinkBuffer;
- class RepatchBuffer;
typedef typename AssemblerType::RegisterID RegisterID;
typedef typename AssemblerType::FPRegisterID FPRegisterID;
@@ -292,7 +290,7 @@ public:
class Call {
template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
- friend class LinkBuffer;
+
public:
enum Flags {
None = 0x0,
@@ -322,8 +320,13 @@ public:
return Call(jump.m_jmp, Linkable);
}
- private:
+ void enableLatePatch()
+ {
+ m_jmp.enableLatePatch();
+ }
+
JmpSrc m_jmp;
+ private:
Flags m_flags;
};
@@ -358,6 +361,11 @@ public:
masm->m_assembler.linkJump(m_jmp, label.m_label);
}
+ void enableLatePatch()
+ {
+ m_jmp.enableLatePatch();
+ }
+
private:
JmpSrc m_jmp;
};
@@ -406,254 +414,13 @@ public:
};
- // Section 3: LinkBuffer - utility to finalize code generation.
+ // Section 3: Misc admin methods
static CodePtr trampolineAt(CodeRef ref, Label label)
{
return CodePtr(AssemblerType::getRelocatedAddress(ref.m_code.dataLocation(), label.m_label));
}
- // LinkBuffer:
- //
- // This class assists in linking code generated by the macro assembler, once code generation
- // has been completed, and the code has been copied to is final location in memory. At this
- // time pointers to labels within the code may be resolved, and relative offsets to external
- // addresses may be fixed.
- //
- // Specifically:
- // * Jump objects may be linked to external targets,
- // * The address of Jump objects may taken, such that it can later be relinked.
- // * The return address of a Jump object representing a call may be acquired.
- // * The address of a Label pointing into the code may be resolved.
- // * The value referenced by a DataLabel may be fixed.
- //
- // FIXME: distinguish between Calls & Jumps (make a specific call to obtain the return
- // address of calls, as opposed to a point that can be used to later relink a Jump -
- // possibly wrap the later up in an object that can do just that).
- class LinkBuffer : public Noncopyable {
- public:
- // Note: Initialization sequence is significant, since executablePool is a PassRefPtr.
- // First, executablePool is copied into m_executablePool, then the initialization of
- // m_code uses m_executablePool, *not* executablePool, since this is no longer valid.
- LinkBuffer(AbstractMacroAssembler<AssemblerType>* masm, PassRefPtr<ExecutablePool> executablePool)
- : m_executablePool(executablePool)
- , m_code(masm->m_assembler.executableCopy(m_executablePool.get()))
- , m_size(masm->m_assembler.size())
-#ifndef NDEBUG
- , m_completed(false)
-#endif
- {
- }
-
- ~LinkBuffer()
- {
- ASSERT(m_completed);
- }
-
- // These methods are used to link or set values at code generation time.
-
- void link(Call call, FunctionPtr function)
- {
- ASSERT(call.isFlagSet(Call::Linkable));
-#if PLATFORM(X86_64)
- if (!call.isFlagSet(Call::Near)) {
- char* callLocation = reinterpret_cast<char*>(AssemblerType::getRelocatedAddress(code(), call.m_jmp)) - REPTACH_OFFSET_CALL_R11;
- AssemblerType::patchPointerForCall(callLocation, function.value());
- } else
-#endif
- AssemblerType::linkCall(code(), call.m_jmp, function.value());
- }
-
- void link(Jump jump, CodeLocationLabel label)
- {
- AssemblerType::linkJump(code(), jump.m_jmp, label.dataLocation());
- }
-
- void link(JumpList list, CodeLocationLabel label)
- {
- for (unsigned i = 0; i < list.m_jumps.size(); ++i)
- AssemblerType::linkJump(code(), list.m_jumps[i].m_jmp, label.dataLocation());
- }
-
- void patch(DataLabelPtr label, void* value)
- {
- AssemblerType::patchPointer(code(), label.m_label, value);
- }
-
- void patch(DataLabelPtr label, CodeLocationLabel value)
- {
- AssemblerType::patchPointer(code(), label.m_label, value.executableAddress());
- }
-
- // These methods are used to obtain handles to allow the code to be relinked / repatched later.
-
- CodeLocationCall locationOf(Call call)
- {
- ASSERT(call.isFlagSet(Call::Linkable));
- ASSERT(!call.isFlagSet(Call::Near));
- return CodeLocationCall(AssemblerType::getRelocatedAddress(code(), call.m_jmp));
- }
-
- CodeLocationNearCall locationOfNearCall(Call call)
- {
- ASSERT(call.isFlagSet(Call::Linkable));
- ASSERT(call.isFlagSet(Call::Near));
- return CodeLocationNearCall(AssemblerType::getRelocatedAddress(code(), call.m_jmp));
- }
-
- CodeLocationLabel locationOf(Label label)
- {
- return CodeLocationLabel(AssemblerType::getRelocatedAddress(code(), label.m_label));
- }
-
- CodeLocationDataLabelPtr locationOf(DataLabelPtr label)
- {
- return CodeLocationDataLabelPtr(AssemblerType::getRelocatedAddress(code(), label.m_label));
- }
-
- CodeLocationDataLabel32 locationOf(DataLabel32 label)
- {
- return CodeLocationDataLabel32(AssemblerType::getRelocatedAddress(code(), label.m_label));
- }
-
- // This method obtains the return address of the call, given as an offset from
- // the start of the code.
- unsigned returnAddressOffset(Call call)
- {
- return AssemblerType::getCallReturnOffset(call.m_jmp);
- }
-
- // Upon completion of all patching either 'finalizeCode()' or 'finalizeCodeAddendum()' should be called
- // once to complete generation of the code. 'finalizeCode()' is suited to situations
- // where the executable pool must also be retained, the lighter-weight 'finalizeCodeAddendum()' is
- // suited to adding to an existing allocation.
- CodeRef finalizeCode()
- {
- performFinalization();
-
- return CodeRef(m_code, m_executablePool, m_size);
- }
- CodeLocationLabel finalizeCodeAddendum()
- {
- performFinalization();
-
- return CodeLocationLabel(code());
- }
-
- private:
- // Keep this private! - the underlying code should only be obtained externally via
- // finalizeCode() or finalizeCodeAddendum().
- void* code()
- {
- return m_code;
- }
-
- void performFinalization()
- {
-#ifndef NDEBUG
- ASSERT(!m_completed);
- m_completed = true;
-#endif
-
- ExecutableAllocator::makeExecutable(code(), m_size);
- }
-
- RefPtr<ExecutablePool> m_executablePool;
- void* m_code;
- size_t m_size;
-#ifndef NDEBUG
- bool m_completed;
-#endif
- };
-
- class RepatchBuffer {
- public:
- RepatchBuffer()
- {
- }
-
- void relink(CodeLocationJump jump, CodeLocationLabel destination)
- {
- AssemblerType::relinkJump(jump.dataLocation(), destination.dataLocation());
- }
-
- void relink(CodeLocationCall call, CodeLocationLabel destination)
- {
-#if PLATFORM(X86_64)
- repatch(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11), destination.executableAddress());
-#else
- AssemblerType::relinkCall(call.dataLocation(), destination.executableAddress());
-#endif
- }
-
- void relink(CodeLocationCall call, FunctionPtr destination)
- {
-#if PLATFORM(X86_64)
- repatch(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11), destination.executableAddress());
-#else
- AssemblerType::relinkCall(call.dataLocation(), destination.executableAddress());
-#endif
- }
-
- void relink(CodeLocationNearCall nearCall, CodePtr destination)
- {
- AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress());
- }
-
- void relink(CodeLocationNearCall nearCall, CodeLocationLabel destination)
- {
- AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress());
- }
-
- void relink(CodeLocationNearCall nearCall, FunctionPtr destination)
- {
- AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress());
- }
-
- void repatch(CodeLocationDataLabel32 dataLabel32, int32_t value)
- {
- AssemblerType::repatchInt32(dataLabel32.dataLocation(), value);
- }
-
- void repatch(CodeLocationDataLabelPtr dataLabelPtr, void* value)
- {
- AssemblerType::repatchPointer(dataLabelPtr.dataLocation(), value);
- }
-
- void relinkCallerToTrampoline(ReturnAddressPtr returnAddress, CodeLocationLabel label)
- {
- relink(CodeLocationCall(CodePtr(returnAddress)), label);
- }
-
- void relinkCallerToTrampoline(ReturnAddressPtr returnAddress, CodePtr newCalleeFunction)
- {
- relinkCallerToTrampoline(returnAddress, CodeLocationLabel(newCalleeFunction));
- }
-
- void relinkCallerToFunction(ReturnAddressPtr returnAddress, FunctionPtr function)
- {
- relink(CodeLocationCall(CodePtr(returnAddress)), function);
- }
-
- void relinkNearCallerToTrampoline(ReturnAddressPtr returnAddress, CodeLocationLabel label)
- {
- relink(CodeLocationNearCall(CodePtr(returnAddress)), label);
- }
-
- void relinkNearCallerToTrampoline(ReturnAddressPtr returnAddress, CodePtr newCalleeFunction)
- {
- relinkNearCallerToTrampoline(returnAddress, CodeLocationLabel(newCalleeFunction));
- }
-
- void repatchLoadPtrToLEA(CodeLocationInstruction instruction)
- {
- AssemblerType::repatchLoadPtrToLEA(instruction.dataLocation());
- }
- };
-
-
- // Section 4: Misc admin methods
-
size_t size()
{
return m_assembler.size();
@@ -712,6 +479,59 @@ public:
protected:
AssemblerType m_assembler;
+
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkJump(void* code, Jump jump, CodeLocationLabel target)
+ {
+ AssemblerType::linkJump(code, jump.m_jmp, target.dataLocation());
+ }
+
+ static void linkPointer(void* code, typename AssemblerType::JmpDst label, void* value)
+ {
+ AssemblerType::linkPointer(code, label, value);
+ }
+
+ static void* getLinkerAddress(void* code, typename AssemblerType::JmpSrc label)
+ {
+ return AssemblerType::getRelocatedAddress(code, label);
+ }
+
+ static void* getLinkerAddress(void* code, typename AssemblerType::JmpDst label)
+ {
+ return AssemblerType::getRelocatedAddress(code, label);
+ }
+
+ static unsigned getLinkerCallReturnOffset(Call call)
+ {
+ return AssemblerType::getCallReturnOffset(call.m_jmp);
+ }
+
+ static void repatchJump(CodeLocationJump jump, CodeLocationLabel destination)
+ {
+ AssemblerType::relinkJump(jump.dataLocation(), destination.dataLocation());
+ }
+
+ static void repatchNearCall(CodeLocationNearCall nearCall, CodeLocationLabel destination)
+ {
+ AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchInt32(CodeLocationDataLabel32 dataLabel32, int32_t value)
+ {
+ AssemblerType::repatchInt32(dataLabel32.dataLocation(), value);
+ }
+
+ static void repatchPointer(CodeLocationDataLabelPtr dataLabelPtr, void* value)
+ {
+ AssemblerType::repatchPointer(dataLabelPtr.dataLocation(), value);
+ }
+
+ static void repatchLoadPtrToLEA(CodeLocationInstruction instruction)
+ {
+ AssemblerType::repatchLoadPtrToLEA(instruction.dataLocation());
+ }
};
} // namespace JSC
diff --git a/src/3rdparty/webkit/JavaScriptCore/assembler/AssemblerBuffer.h b/src/3rdparty/webkit/JavaScriptCore/assembler/AssemblerBuffer.h
index 7a5a8d3..073906a 100644
--- a/src/3rdparty/webkit/JavaScriptCore/assembler/AssemblerBuffer.h
+++ b/src/3rdparty/webkit/JavaScriptCore/assembler/AssemblerBuffer.h
@@ -95,12 +95,14 @@ namespace JSC {
void putIntUnchecked(int value)
{
+ ASSERT(!(m_size > m_capacity - 4));
*reinterpret_cast<int*>(&m_buffer[m_size]) = value;
m_size += 4;
}
void putInt64Unchecked(int64_t value)
{
+ ASSERT(!(m_size > m_capacity - 8));
*reinterpret_cast<int64_t*>(&m_buffer[m_size]) = value;
m_size += 8;
}
@@ -137,10 +139,19 @@ namespace JSC {
return memcpy(result, m_buffer, m_size);
}
- private:
- void grow()
+ protected:
+ void append(const char* data, int size)
+ {
+ if (m_size > m_capacity - size)
+ grow(size);
+
+ memcpy(m_buffer + m_size, data, size);
+ m_size += size;
+ }
+
+ void grow(int extraCapacity = 0)
{
- m_capacity += m_capacity / 2;
+ m_capacity += m_capacity / 2 + extraCapacity;
if (m_buffer == m_inlineBuffer) {
char* newBuffer = static_cast<char*>(fastMalloc(m_capacity));
diff --git a/src/3rdparty/webkit/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h b/src/3rdparty/webkit/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h
new file mode 100644
index 0000000..f15b7f3
--- /dev/null
+++ b/src/3rdparty/webkit/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h
@@ -0,0 +1,305 @@
+/*
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AssemblerBufferWithConstantPool_h
+#define AssemblerBufferWithConstantPool_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER)
+
+#include "AssemblerBuffer.h"
+#include <wtf/SegmentedVector.h>
+
+namespace JSC {
+
+/*
+ On a constant pool 4 or 8 bytes data can be stored. The values can be
+ constants or addresses. The addresses should be 32 or 64 bits. The constants
+ should be double-precisions float or integer numbers which are hard to be
+ encoded as few machine instructions.
+
+ TODO: The pool is desinged to handle both 32 and 64 bits values, but
+ currently only the 4 bytes constants are implemented and tested.
+
+ The AssemblerBuffer can contain multiple constant pools. Each pool is inserted
+ into the instruction stream - protected by a jump instruction from the
+ execution flow.
+
+ The flush mechanism is called when no space remain to insert the next instruction
+ into the pool. Three values are used to determine when the constant pool itself
+ have to be inserted into the instruction stream (Assembler Buffer):
+
+ - maxPoolSize: size of the constant pool in bytes, this value cannot be
+ larger than the maximum offset of a PC relative memory load
+
+ - barrierSize: size of jump instruction in bytes which protects the
+ constant pool from execution
+
+ - maxInstructionSize: maximum length of a machine instruction in bytes
+
+ There are some callbacks which solve the target architecture specific
+ address handling:
+
+ - TYPE patchConstantPoolLoad(TYPE load, int value):
+ patch the 'load' instruction with the index of the constant in the
+ constant pool and return the patched instruction.
+
+ - void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr):
+ patch the a PC relative load instruction at 'loadAddr' address with the
+ final relative offset. The offset can be computed with help of
+ 'constPoolAddr' (the address of the constant pool) and index of the
+ constant (which is stored previously in the load instruction itself).
+
+ - TYPE placeConstantPoolBarrier(int size):
+ return with a constant pool barrier instruction which jumps over the
+ constant pool.
+
+ The 'put*WithConstant*' functions should be used to place a data into the
+ constant pool.
+*/
+
+template <int maxPoolSize, int barrierSize, int maxInstructionSize, class AssemblerType>
+class AssemblerBufferWithConstantPool: public AssemblerBuffer {
+ typedef WTF::SegmentedVector<uint32_t, 512> LoadOffsets;
+public:
+ enum {
+ UniqueConst,
+ ReusableConst,
+ UnusedEntry,
+ };
+
+ AssemblerBufferWithConstantPool()
+ : AssemblerBuffer()
+ , m_numConsts(0)
+ , m_maxDistance(maxPoolSize)
+ , m_lastConstDelta(0)
+ {
+ m_pool = static_cast<uint32_t*>(fastMalloc(maxPoolSize));
+ m_mask = static_cast<char*>(fastMalloc(maxPoolSize / sizeof(uint32_t)));
+ }
+
+ ~AssemblerBufferWithConstantPool()
+ {
+ fastFree(m_mask);
+ fastFree(m_pool);
+ }
+
+ void ensureSpace(int space)
+ {
+ flushIfNoSpaceFor(space);
+ AssemblerBuffer::ensureSpace(space);
+ }
+
+ void ensureSpace(int insnSpace, int constSpace)
+ {
+ flushIfNoSpaceFor(insnSpace, constSpace);
+ AssemblerBuffer::ensureSpace(insnSpace);
+ }
+
+ bool isAligned(int alignment)
+ {
+ flushIfNoSpaceFor(alignment);
+ return AssemblerBuffer::isAligned(alignment);
+ }
+
+ void putByteUnchecked(int value)
+ {
+ AssemblerBuffer::putByteUnchecked(value);
+ correctDeltas(1);
+ }
+
+ void putByte(int value)
+ {
+ flushIfNoSpaceFor(1);
+ AssemblerBuffer::putByte(value);
+ correctDeltas(1);
+ }
+
+ void putShortUnchecked(int value)
+ {
+ AssemblerBuffer::putShortUnchecked(value);
+ correctDeltas(2);
+ }
+
+ void putShort(int value)
+ {
+ flushIfNoSpaceFor(2);
+ AssemblerBuffer::putShort(value);
+ correctDeltas(2);
+ }
+
+ void putIntUnchecked(int value)
+ {
+ AssemblerBuffer::putIntUnchecked(value);
+ correctDeltas(4);
+ }
+
+ void putInt(int value)
+ {
+ flushIfNoSpaceFor(4);
+ AssemblerBuffer::putInt(value);
+ correctDeltas(4);
+ }
+
+ void putInt64Unchecked(int64_t value)
+ {
+ AssemblerBuffer::putInt64Unchecked(value);
+ correctDeltas(8);
+ }
+
+ int size()
+ {
+ flushIfNoSpaceFor(maxInstructionSize, sizeof(uint64_t));
+ return AssemblerBuffer::size();
+ }
+
+ void* executableCopy(ExecutablePool* allocator)
+ {
+ flushConstantPool(false);
+ return AssemblerBuffer::executableCopy(allocator);
+ }
+
+ void putIntWithConstantInt(uint32_t insn, uint32_t constant, bool isReusable = false)
+ {
+ flushIfNoSpaceFor(4, 4);
+
+ m_loadOffsets.append(AssemblerBuffer::size());
+ if (isReusable)
+ for (int i = 0; i < m_numConsts; ++i) {
+ if (m_mask[i] == ReusableConst && m_pool[i] == constant) {
+ AssemblerBuffer::putInt(AssemblerType::patchConstantPoolLoad(insn, i));
+ correctDeltas(4);
+ return;
+ }
+ }
+
+ m_pool[m_numConsts] = constant;
+ m_mask[m_numConsts] = static_cast<char>(isReusable ? ReusableConst : UniqueConst);
+
+ AssemblerBuffer::putInt(AssemblerType::patchConstantPoolLoad(insn, m_numConsts));
+ ++m_numConsts;
+
+ correctDeltas(4, 4);
+ }
+
+ // This flushing mechanism can be called after any unconditional jumps.
+ void flushWithoutBarrier()
+ {
+ // Flush if constant pool is more than 60% full to avoid overuse of this function.
+ if (5 * m_numConsts > 3 * maxPoolSize / sizeof(uint32_t))
+ flushConstantPool(false);
+ }
+
+ uint32_t* poolAddress()
+ {
+ return m_pool;
+ }
+
+private:
+ void correctDeltas(int insnSize)
+ {
+ m_maxDistance -= insnSize;
+ m_lastConstDelta -= insnSize;
+ if (m_lastConstDelta < 0)
+ m_lastConstDelta = 0;
+ }
+
+ void correctDeltas(int insnSize, int constSize)
+ {
+ correctDeltas(insnSize);
+
+ m_maxDistance -= m_lastConstDelta;
+ m_lastConstDelta = constSize;
+ }
+
+ void flushConstantPool(bool useBarrier = true)
+ {
+ if (m_numConsts == 0)
+ return;
+ int alignPool = (AssemblerBuffer::size() + (useBarrier ? barrierSize : 0)) & (sizeof(uint64_t) - 1);
+
+ if (alignPool)
+ alignPool = sizeof(uint64_t) - alignPool;
+
+ // Callback to protect the constant pool from execution
+ if (useBarrier)
+ AssemblerBuffer::putInt(AssemblerType::placeConstantPoolBarrier(m_numConsts * sizeof(uint32_t) + alignPool));
+
+ if (alignPool) {
+ if (alignPool & 1)
+ AssemblerBuffer::putByte(AssemblerType::padForAlign8);
+ if (alignPool & 2)
+ AssemblerBuffer::putShort(AssemblerType::padForAlign16);
+ if (alignPool & 4)
+ AssemblerBuffer::putInt(AssemblerType::padForAlign32);
+ }
+
+ int constPoolOffset = AssemblerBuffer::size();
+ append(reinterpret_cast<char*>(m_pool), m_numConsts * sizeof(uint32_t));
+
+ // Patch each PC relative load
+ for (LoadOffsets::Iterator iter = m_loadOffsets.begin(); iter != m_loadOffsets.end(); ++iter) {
+ void* loadAddr = reinterpret_cast<void*>(m_buffer + *iter);
+ AssemblerType::patchConstantPoolLoad(loadAddr, reinterpret_cast<void*>(m_buffer + constPoolOffset));
+ }
+
+ m_loadOffsets.clear();
+ m_numConsts = 0;
+ m_maxDistance = maxPoolSize;
+ }
+
+ void flushIfNoSpaceFor(int nextInsnSize)
+ {
+ if (m_numConsts == 0)
+ return;
+ if ((m_maxDistance < nextInsnSize + m_lastConstDelta + barrierSize + (int)sizeof(uint32_t)))
+ flushConstantPool();
+ }
+
+ void flushIfNoSpaceFor(int nextInsnSize, int nextConstSize)
+ {
+ if (m_numConsts == 0)
+ return;
+ if ((m_maxDistance < nextInsnSize + m_lastConstDelta + barrierSize + (int)sizeof(uint32_t)) ||
+ (m_numConsts + nextConstSize / sizeof(uint32_t) >= maxPoolSize))
+ flushConstantPool();
+ }
+
+ uint32_t* m_pool;
+ char* m_mask;
+ LoadOffsets m_loadOffsets;
+
+ int m_numConsts;
+ int m_maxDistance;
+ int m_lastConstDelta;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // AssemblerBufferWithConstantPool_h
diff --git a/src/3rdparty/webkit/JavaScriptCore/assembler/LinkBuffer.h b/src/3rdparty/webkit/JavaScriptCore/assembler/LinkBuffer.h
new file mode 100644
index 0000000..6d08117
--- /dev/null
+++ b/src/3rdparty/webkit/JavaScriptCore/assembler/LinkBuffer.h
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LinkBuffer_h
+#define LinkBuffer_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER)
+
+#include <MacroAssembler.h>
+#include <wtf/Noncopyable.h>
+
+namespace JSC {
+
+// LinkBuffer:
+//
+// This class assists in linking code generated by the macro assembler, once code generation
+// has been completed, and the code has been copied to is final location in memory. At this
+// time pointers to labels within the code may be resolved, and relative offsets to external
+// addresses may be fixed.
+//
+// Specifically:
+// * Jump objects may be linked to external targets,
+// * The address of Jump objects may taken, such that it can later be relinked.
+// * The return address of a Call may be acquired.
+// * The address of a Label pointing into the code may be resolved.
+// * The value referenced by a DataLabel may be set.
+//
+class LinkBuffer : public Noncopyable {
+ typedef MacroAssemblerCodeRef CodeRef;
+ typedef MacroAssembler::Label Label;
+ typedef MacroAssembler::Jump Jump;
+ typedef MacroAssembler::JumpList JumpList;
+ typedef MacroAssembler::Call Call;
+ typedef MacroAssembler::DataLabel32 DataLabel32;
+ typedef MacroAssembler::DataLabelPtr DataLabelPtr;
+
+public:
+ // Note: Initialization sequence is significant, since executablePool is a PassRefPtr.
+ // First, executablePool is copied into m_executablePool, then the initialization of
+ // m_code uses m_executablePool, *not* executablePool, since this is no longer valid.
+ LinkBuffer(MacroAssembler* masm, PassRefPtr<ExecutablePool> executablePool)
+ : m_executablePool(executablePool)
+ , m_code(masm->m_assembler.executableCopy(m_executablePool.get()))
+ , m_size(masm->m_assembler.size())
+#ifndef NDEBUG
+ , m_completed(false)
+#endif
+ {
+ }
+
+ ~LinkBuffer()
+ {
+ ASSERT(m_completed);
+ }
+
+ // These methods are used to link or set values at code generation time.
+
+ void link(Call call, FunctionPtr function)
+ {
+ ASSERT(call.isFlagSet(Call::Linkable));
+ MacroAssembler::linkCall(code(), call, function);
+ }
+
+ void link(Jump jump, CodeLocationLabel label)
+ {
+ MacroAssembler::linkJump(code(), jump, label);
+ }
+
+ void link(JumpList list, CodeLocationLabel label)
+ {
+ for (unsigned i = 0; i < list.m_jumps.size(); ++i)
+ MacroAssembler::linkJump(code(), list.m_jumps[i], label);
+ }
+
+ void patch(DataLabelPtr label, void* value)
+ {
+ MacroAssembler::linkPointer(code(), label.m_label, value);
+ }
+
+ void patch(DataLabelPtr label, CodeLocationLabel value)
+ {
+ MacroAssembler::linkPointer(code(), label.m_label, value.executableAddress());
+ }
+
+ // These methods are used to obtain handles to allow the code to be relinked / repatched later.
+
+ CodeLocationCall locationOf(Call call)
+ {
+ ASSERT(call.isFlagSet(Call::Linkable));
+ ASSERT(!call.isFlagSet(Call::Near));
+ return CodeLocationCall(MacroAssembler::getLinkerAddress(code(), call.m_jmp));
+ }
+
+ CodeLocationNearCall locationOfNearCall(Call call)
+ {
+ ASSERT(call.isFlagSet(Call::Linkable));
+ ASSERT(call.isFlagSet(Call::Near));
+ return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), call.m_jmp));
+ }
+
+ CodeLocationLabel locationOf(Label label)
+ {
+ return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), label.m_label));
+ }
+
+ CodeLocationDataLabelPtr locationOf(DataLabelPtr label)
+ {
+ return CodeLocationDataLabelPtr(MacroAssembler::getLinkerAddress(code(), label.m_label));
+ }
+
+ CodeLocationDataLabel32 locationOf(DataLabel32 label)
+ {
+ return CodeLocationDataLabel32(MacroAssembler::getLinkerAddress(code(), label.m_label));
+ }
+
+ // This method obtains the return address of the call, given as an offset from
+ // the start of the code.
+ unsigned returnAddressOffset(Call call)
+ {
+ return MacroAssembler::getLinkerCallReturnOffset(call);
+ }
+
+ // Upon completion of all patching either 'finalizeCode()' or 'finalizeCodeAddendum()' should be called
+ // once to complete generation of the code. 'finalizeCode()' is suited to situations
+ // where the executable pool must also be retained, the lighter-weight 'finalizeCodeAddendum()' is
+ // suited to adding to an existing allocation.
+ CodeRef finalizeCode()
+ {
+ performFinalization();
+
+ return CodeRef(m_code, m_executablePool, m_size);
+ }
+ CodeLocationLabel finalizeCodeAddendum()
+ {
+ performFinalization();
+
+ return CodeLocationLabel(code());
+ }
+
+private:
+ // Keep this private! - the underlying code should only be obtained externally via
+ // finalizeCode() or finalizeCodeAddendum().
+ void* code()
+ {
+ return m_code;
+ }
+
+ void performFinalization()
+ {
+#ifndef NDEBUG
+ ASSERT(!m_completed);
+ m_completed = true;
+#endif
+
+ ExecutableAllocator::makeExecutable(code(), m_size);
+ ExecutableAllocator::cacheFlush(code(), m_size);
+ }
+
+ RefPtr<ExecutablePool> m_executablePool;
+ void* m_code;
+ size_t m_size;
+#ifndef NDEBUG
+ bool m_completed;
+#endif
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // LinkBuffer_h
diff --git a/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssembler.h b/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssembler.h
index 43d27e7..9e1c5d3 100644
--- a/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssembler.h
+++ b/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssembler.h
@@ -34,6 +34,10 @@
#include "MacroAssemblerARMv7.h"
namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; };
+#elif PLATFORM(ARM)
+#include "MacroAssemblerARM.h"
+namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; };
+
#elif PLATFORM(X86)
#include "MacroAssemblerX86.h"
namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; };
diff --git a/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerARM.h b/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerARM.h
new file mode 100644
index 0000000..27879a9
--- /dev/null
+++ b/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerARM.h
@@ -0,0 +1,797 @@
+/*
+ * Copyright (C) 2008 Apple Inc.
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerARM_h
+#define MacroAssemblerARM_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER) && PLATFORM(ARM)
+
+#include "ARMAssembler.h"
+#include "AbstractMacroAssembler.h"
+
+namespace JSC {
+
+class MacroAssemblerARM : public AbstractMacroAssembler<ARMAssembler> {
+public:
+ enum Condition {
+ Equal = ARMAssembler::EQ,
+ NotEqual = ARMAssembler::NE,
+ Above = ARMAssembler::HI,
+ AboveOrEqual = ARMAssembler::CS,
+ Below = ARMAssembler::CC,
+ BelowOrEqual = ARMAssembler::LS,
+ GreaterThan = ARMAssembler::GT,
+ GreaterThanOrEqual = ARMAssembler::GE,
+ LessThan = ARMAssembler::LT,
+ LessThanOrEqual = ARMAssembler::LE,
+ Overflow = ARMAssembler::VS,
+ Signed = ARMAssembler::MI,
+ Zero = ARMAssembler::EQ,
+ NonZero = ARMAssembler::NE
+ };
+
+ enum DoubleCondition {
+ DoubleEqual, //FIXME
+ DoubleNotEqual, //FIXME
+ DoubleGreaterThan, //FIXME
+ DoubleGreaterThanOrEqual, //FIXME
+ DoubleLessThan, //FIXME
+ DoubleLessThanOrEqual, //FIXME
+ };
+
+ static const RegisterID stackPointerRegister = ARM::sp;
+
+ static const Scale ScalePtr = TimesFour;
+
+ void add32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.adds_r(dest, dest, src);
+ }
+
+ void add32(Imm32 imm, Address address)
+ {
+ load32(address, ARM::S1);
+ add32(imm, ARM::S1);
+ store32(ARM::S1, address);
+ }
+
+ void add32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.adds_r(dest, dest, m_assembler.getImm(imm.m_value, ARM::S0));
+ }
+
+ void add32(Address src, RegisterID dest)
+ {
+ load32(src, ARM::S1);
+ add32(ARM::S1, dest);
+ }
+
+ void and32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.ands_r(dest, dest, src);
+ }
+
+ void and32(Imm32 imm, RegisterID dest)
+ {
+ ARMWord w = m_assembler.getImm(imm.m_value, ARM::S0, true);
+ if (w & ARMAssembler::OP2_INV_IMM)
+ m_assembler.bics_r(dest, dest, w & ~ARMAssembler::OP2_INV_IMM);
+ else
+ m_assembler.ands_r(dest, dest, w);
+ }
+
+ void lshift32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.movs_r(dest, m_assembler.lsl(dest, imm.m_value & 0x1f));
+ }
+
+ void lshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ m_assembler.movs_r(dest, m_assembler.lsl_r(dest, shift_amount));
+ }
+
+ void mul32(RegisterID src, RegisterID dest)
+ {
+ if (src == dest) {
+ move(src, ARM::S0);
+ src = ARM::S0;
+ }
+ m_assembler.muls_r(dest, dest, src);
+ }
+
+ void mul32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(imm, ARM::S0);
+ m_assembler.muls_r(dest, src, ARM::S0);
+ }
+
+ void not32(RegisterID dest)
+ {
+ m_assembler.mvns_r(dest, dest);
+ }
+
+ void or32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.orrs_r(dest, dest, src);
+ }
+
+ void or32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.orrs_r(dest, dest, m_assembler.getImm(imm.m_value, ARM::S0));
+ }
+
+ void rshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ m_assembler.movs_r(dest, m_assembler.asr_r(dest, shift_amount));
+ }
+
+ void rshift32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.movs_r(dest, m_assembler.asr(dest, imm.m_value & 0x1f));
+ }
+
+ void sub32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.subs_r(dest, dest, src);
+ }
+
+ void sub32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.subs_r(dest, dest, m_assembler.getImm(imm.m_value, ARM::S0));
+ }
+
+ void sub32(Imm32 imm, Address address)
+ {
+ load32(address, ARM::S1);
+ sub32(imm, ARM::S1);
+ store32(ARM::S1, address);
+ }
+
+ void sub32(Address src, RegisterID dest)
+ {
+ load32(src, ARM::S1);
+ sub32(ARM::S1, dest);
+ }
+
+ void xor32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.eors_r(dest, dest, src);
+ }
+
+ void xor32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.eors_r(dest, dest, m_assembler.getImm(imm.m_value, ARM::S0));
+ }
+
+ void load32(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.dataTransfer32(true, dest, address.base, address.offset);
+ }
+
+ void load32(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.baseIndexTransfer32(true, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ DataLabel32 dataLabel(this);
+ m_assembler.ldr_un_imm(ARM::S0, 0);
+ m_assembler.dtr_ur(true, dest, address.base, ARM::S0);
+ return dataLabel;
+ }
+
+ Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
+ {
+ Label label(this);
+ load32(address, dest);
+ return label;
+ }
+
+ void load16(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.add_r(ARM::S0, address.base, m_assembler.lsl(address.index, address.scale));
+ if (address.offset>=0)
+ m_assembler.ldrh_u(dest, ARM::S0, ARMAssembler::getOp2Byte(address.offset));
+ else
+ m_assembler.ldrh_d(dest, ARM::S0, ARMAssembler::getOp2Byte(-address.offset));
+ }
+
+ DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ DataLabel32 dataLabel(this);
+ m_assembler.ldr_un_imm(ARM::S0, 0);
+ m_assembler.dtr_ur(false, src, address.base, ARM::S0);
+ return dataLabel;
+ }
+
+ void store32(RegisterID src, ImplicitAddress address)
+ {
+ m_assembler.dataTransfer32(false, src, address.base, address.offset);
+ }
+
+ void store32(RegisterID src, BaseIndex address)
+ {
+ m_assembler.baseIndexTransfer32(false, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void store32(Imm32 imm, ImplicitAddress address)
+ {
+ move(imm, ARM::S1);
+ store32(ARM::S1, address);
+ }
+
+ void store32(RegisterID src, void* address)
+ {
+ m_assembler.moveImm(reinterpret_cast<ARMWord>(address), ARM::S0);
+ m_assembler.dtr_u(false, src, ARM::S0, 0);
+ }
+
+ void store32(Imm32 imm, void* address)
+ {
+ m_assembler.moveImm(reinterpret_cast<ARMWord>(address), ARM::S0);
+ m_assembler.moveImm(imm.m_value, ARM::S1);
+ m_assembler.dtr_u(false, ARM::S1, ARM::S0, 0);
+ }
+
+ void pop(RegisterID dest)
+ {
+ m_assembler.pop_r(dest);
+ }
+
+ void push(RegisterID src)
+ {
+ m_assembler.push_r(src);
+ }
+
+ void push(Address address)
+ {
+ load32(address, ARM::S1);
+ push(ARM::S1);
+ }
+
+ void push(Imm32 imm)
+ {
+ move(imm, ARM::S0);
+ push(ARM::S0);
+ }
+
+ void move(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.moveImm(imm.m_value, dest);
+ }
+
+ void move(RegisterID src, RegisterID dest)
+ {
+ m_assembler.mov_r(dest, src);
+ }
+
+ void move(ImmPtr imm, RegisterID dest)
+ {
+ m_assembler.mov_r(dest, m_assembler.getImm(reinterpret_cast<ARMWord>(imm.m_value), ARM::S0));
+ }
+
+ void swap(RegisterID reg1, RegisterID reg2)
+ {
+ m_assembler.mov_r(ARM::S0, reg1);
+ m_assembler.mov_r(reg1, reg2);
+ m_assembler.mov_r(reg2, ARM::S0);
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ }
+
+ void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ }
+
+ Jump branch32(Condition cond, RegisterID left, RegisterID right)
+ {
+ m_assembler.cmp_r(left, right);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branch32(Condition cond, RegisterID left, Imm32 right)
+ {
+ m_assembler.cmp_r(left, m_assembler.getImm(right.m_value, ARM::S0));
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branch32(Condition cond, RegisterID left, Address right)
+ {
+ load32(right, ARM::S1);
+ return branch32(cond, left, ARM::S1);
+ }
+
+ Jump branch32(Condition cond, Address left, RegisterID right)
+ {
+ load32(left, ARM::S1);
+ return branch32(cond, ARM::S1, right);
+ }
+
+ Jump branch32(Condition cond, Address left, Imm32 right)
+ {
+ load32(left, ARM::S1);
+ return branch32(cond, ARM::S1, right);
+ }
+
+ Jump branch32(Condition cond, BaseIndex left, Imm32 right)
+ {
+ load32(left, ARM::S1);
+ return branch32(cond, ARM::S1, right);
+ }
+
+ Jump branch16(Condition cond, BaseIndex left, RegisterID right)
+ {
+ UNUSED_PARAM(cond);
+ UNUSED_PARAM(left);
+ UNUSED_PARAM(right);
+ ASSERT_NOT_REACHED();
+ return jump();
+ }
+
+ Jump branch16(Condition cond, BaseIndex left, Imm32 right)
+ {
+ load16(left, ARM::S0);
+ move(right, ARM::S1);
+ m_assembler.cmp_r(ARM::S0, ARM::S1);
+ return m_assembler.jmp(ARMCondition(cond));
+ }
+
+ Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ m_assembler.tst_r(reg, mask);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ ARMWord w = m_assembler.getImm(mask.m_value, ARM::S0, true);
+ if (w & ARMAssembler::OP2_INV_IMM)
+ m_assembler.bics_r(ARM::S0, reg, w & ~ARMAssembler::OP2_INV_IMM);
+ else
+ m_assembler.tst_r(reg, w);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
+ {
+ load32(address, ARM::S1);
+ return branchTest32(cond, ARM::S1, mask);
+ }
+
+ Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
+ {
+ load32(address, ARM::S1);
+ return branchTest32(cond, ARM::S1, mask);
+ }
+
+ Jump jump()
+ {
+ return Jump(m_assembler.jmp());
+ }
+
+ void jump(RegisterID target)
+ {
+ move(target, ARM::pc);
+ }
+
+ void jump(Address address)
+ {
+ load32(address, ARM::pc);
+ }
+
+ Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ add32(src, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ add32(imm, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ void mull32(RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ if (src1 == dest) {
+ move(src1, ARM::S0);
+ src1 = ARM::S0;
+ }
+ m_assembler.mull_r(ARM::S1, dest, src2, src1);
+ m_assembler.cmp_r(ARM::S1, m_assembler.asr(dest, 31));
+ }
+
+ Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ mull32(src, dest, dest);
+ cond = NonZero;
+ }
+ else
+ mul32(src, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ move(imm, ARM::S0);
+ mull32(ARM::S0, src, dest);
+ cond = NonZero;
+ }
+ else
+ mul32(imm, src, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ sub32(src, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ sub32(imm, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ void breakpoint()
+ {
+ m_assembler.bkpt(0);
+ }
+
+ Call nearCall()
+ {
+ prepareCall();
+ return Call(m_assembler.jmp(), Call::LinkableNear);
+ }
+
+ Call call(RegisterID target)
+ {
+ prepareCall();
+ move(ARM::pc, target);
+ JmpSrc jmpSrc;
+ return Call(jmpSrc, Call::None);
+ }
+
+ void call(Address address)
+ {
+ call32(address.base, address.offset);
+ }
+
+ void ret()
+ {
+ pop(ARM::pc);
+ }
+
+ void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmp_r(left, right);
+ m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
+ m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
+ }
+
+ void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
+ {
+ m_assembler.cmp_r(left, m_assembler.getImm(right.m_value, ARM::S0));
+ m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
+ m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
+ }
+
+ void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
+ {
+ load32(address, ARM::S1);
+ if (mask.m_value == -1)
+ m_assembler.cmp_r(0, ARM::S1);
+ else
+ m_assembler.tst_r(ARM::S1, m_assembler.getImm(mask.m_value, ARM::S0));
+ m_assembler.mov_r(dest, ARMAssembler::getOp2(0));
+ m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond));
+ }
+
+ void add32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ m_assembler.add_r(dest, src, m_assembler.getImm(imm.m_value, ARM::S0));
+ }
+
+ void add32(Imm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.moveImm(reinterpret_cast<ARMWord>(address.m_ptr), ARM::S1);
+ m_assembler.dtr_u(true, ARM::S1, ARM::S1, 0);
+ add32(imm, ARM::S1);
+ m_assembler.moveImm(reinterpret_cast<ARMWord>(address.m_ptr), ARM::S0);
+ m_assembler.dtr_u(false, ARM::S1, ARM::S0, 0);
+ }
+
+ void sub32(Imm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.moveImm(reinterpret_cast<ARMWord>(address.m_ptr), ARM::S1);
+ m_assembler.dtr_u(true, ARM::S1, ARM::S1, 0);
+ sub32(imm, ARM::S1);
+ m_assembler.moveImm(reinterpret_cast<ARMWord>(address.m_ptr), ARM::S0);
+ m_assembler.dtr_u(false, ARM::S1, ARM::S0, 0);
+ }
+
+ void load32(void* address, RegisterID dest)
+ {
+ m_assembler.moveImm(reinterpret_cast<ARMWord>(address), ARM::S0);
+ m_assembler.dtr_u(true, dest, ARM::S0, 0);
+ }
+
+ Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right)
+ {
+ load32(left.m_ptr, ARM::S1);
+ return branch32(cond, ARM::S1, right);
+ }
+
+ Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right)
+ {
+ load32(left.m_ptr, ARM::S1);
+ return branch32(cond, ARM::S1, right);
+ }
+
+ Call call()
+ {
+ prepareCall();
+ return Call(m_assembler.jmp(), Call::Linkable);
+ }
+
+ Call tailRecursiveCall()
+ {
+ return Call::fromTailJump(jump());
+ }
+
+ Call makeTailRecursiveCall(Jump oldJump)
+ {
+ return Call::fromTailJump(oldJump);
+ }
+
+ DataLabelPtr moveWithPatch(ImmPtr initialValue, RegisterID dest)
+ {
+ DataLabelPtr dataLabel(this);
+ m_assembler.ldr_un_imm(dest, reinterpret_cast<ARMWord>(initialValue.m_value));
+ return dataLabel;
+ }
+
+ Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
+ {
+ dataLabel = moveWithPatch(initialRightValue, ARM::S1);
+ Jump jump = branch32(cond, left, ARM::S1);
+ jump.enableLatePatch();
+ return jump;
+ }
+
+ Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
+ {
+ load32(left, ARM::S1);
+ dataLabel = moveWithPatch(initialRightValue, ARM::S0);
+ Jump jump = branch32(cond, ARM::S0, ARM::S1);
+ jump.enableLatePatch();
+ return jump;
+ }
+
+ DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address)
+ {
+ DataLabelPtr dataLabel = moveWithPatch(initialValue, ARM::S1);
+ store32(ARM::S1, address);
+ return dataLabel;
+ }
+
+ DataLabelPtr storePtrWithPatch(ImplicitAddress address)
+ {
+ return storePtrWithPatch(ImmPtr(0), address);
+ }
+
+ // Floating point operators
+ bool supportsFloatingPoint() const
+ {
+ return false;
+ }
+
+ bool supportsFloatingPointTruncate() const
+ {
+ return false;
+ }
+
+ void loadDouble(ImplicitAddress address, FPRegisterID dest)
+ {
+ UNUSED_PARAM(address);
+ UNUSED_PARAM(dest);
+ ASSERT_NOT_REACHED();
+ }
+
+ void storeDouble(FPRegisterID src, ImplicitAddress address)
+ {
+ UNUSED_PARAM(src);
+ UNUSED_PARAM(address);
+ ASSERT_NOT_REACHED();
+ }
+
+ void addDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ UNUSED_PARAM(src);
+ UNUSED_PARAM(dest);
+ ASSERT_NOT_REACHED();
+ }
+
+ void addDouble(Address src, FPRegisterID dest)
+ {
+ UNUSED_PARAM(src);
+ UNUSED_PARAM(dest);
+ ASSERT_NOT_REACHED();
+ }
+
+ void subDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ UNUSED_PARAM(src);
+ UNUSED_PARAM(dest);
+ ASSERT_NOT_REACHED();
+ }
+
+ void subDouble(Address src, FPRegisterID dest)
+ {
+ UNUSED_PARAM(src);
+ UNUSED_PARAM(dest);
+ ASSERT_NOT_REACHED();
+ }
+
+ void mulDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ UNUSED_PARAM(src);
+ UNUSED_PARAM(dest);
+ ASSERT_NOT_REACHED();
+ }
+
+ void mulDouble(Address src, FPRegisterID dest)
+ {
+ UNUSED_PARAM(src);
+ UNUSED_PARAM(dest);
+ ASSERT_NOT_REACHED();
+ }
+
+ void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ UNUSED_PARAM(src);
+ UNUSED_PARAM(dest);
+ ASSERT_NOT_REACHED();
+ }
+
+ Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+ {
+ UNUSED_PARAM(cond);
+ UNUSED_PARAM(left);
+ UNUSED_PARAM(right);
+ ASSERT_NOT_REACHED();
+ return jump();
+ }
+
+ // Truncates 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, INT_MIN).
+ Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+ {
+ UNUSED_PARAM(src);
+ UNUSED_PARAM(dest);
+ ASSERT_NOT_REACHED();
+ return jump();
+ }
+
+protected:
+ ARMAssembler::Condition ARMCondition(Condition cond)
+ {
+ return static_cast<ARMAssembler::Condition>(cond);
+ }
+
+ void prepareCall()
+ {
+ m_assembler.ensureSpace(3 * sizeof(ARMWord), sizeof(ARMWord));
+
+ // S0 might be used for parameter passing
+ m_assembler.add_r(ARM::S1, ARM::pc, ARMAssembler::OP2_IMM | 0x4);
+ m_assembler.push_r(ARM::S1);
+ }
+
+ void call32(RegisterID base, int32_t offset)
+ {
+ if (base == ARM::sp)
+ offset += 4;
+
+ if (offset >= 0) {
+ if (offset <= 0xfff) {
+ prepareCall();
+ m_assembler.dtr_u(true, ARM::pc, base, offset);
+ } else if (offset <= 0xfffff) {
+ m_assembler.add_r(ARM::S0, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
+ prepareCall();
+ m_assembler.dtr_u(true, ARM::pc, ARM::S0, offset & 0xfff);
+ } else {
+ ARMWord reg = m_assembler.getImm(offset, ARM::S0);
+ prepareCall();
+ m_assembler.dtr_ur(true, ARM::pc, base, reg);
+ }
+ } else {
+ offset = -offset;
+ if (offset <= 0xfff) {
+ prepareCall();
+ m_assembler.dtr_d(true, ARM::pc, base, offset);
+ } else if (offset <= 0xfffff) {
+ m_assembler.sub_r(ARM::S0, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8));
+ prepareCall();
+ m_assembler.dtr_d(true, ARM::pc, ARM::S0, offset & 0xfff);
+ } else {
+ ARMWord reg = m_assembler.getImm(offset, ARM::S0);
+ prepareCall();
+ m_assembler.dtr_dr(true, ARM::pc, base, reg);
+ }
+ }
+ }
+
+private:
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ ARMAssembler::linkCall(code, call.m_jmp, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+};
+
+}
+
+#endif
+
+#endif // MacroAssemblerARM_h
diff --git a/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerARMv7.h b/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerARMv7.h
index bd83c60..f7a8402 100644
--- a/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerARMv7.h
+++ b/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerARMv7.h
@@ -1054,6 +1054,25 @@ protected:
{
return static_cast<ARMv7Assembler::Condition>(cond);
}
+
+private:
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ ARMv7Assembler::linkCall(code, call.m_jmp, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
};
} // namespace JSC
diff --git a/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerCodeRef.h b/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
index 50fca5b..341a7ff 100644
--- a/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
+++ b/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
@@ -165,28 +165,20 @@ private:
class MacroAssemblerCodeRef {
public:
MacroAssemblerCodeRef()
-#ifndef NDEBUG
: m_size(0)
-#endif
{
}
MacroAssemblerCodeRef(void* code, PassRefPtr<ExecutablePool> executablePool, size_t size)
: m_code(code)
, m_executablePool(executablePool)
+ , m_size(size)
{
-#ifndef NDEBUG
- m_size = size;
-#else
- UNUSED_PARAM(size);
-#endif
}
MacroAssemblerCodePtr m_code;
RefPtr<ExecutablePool> m_executablePool;
-#ifndef NDEBUG
size_t m_size;
-#endif
};
} // namespace JSC
diff --git a/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerX86.h b/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerX86.h
index aaf98fd..0b9ff35 100644
--- a/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerX86.h
+++ b/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerX86.h
@@ -164,6 +164,24 @@ public:
private:
const bool m_isSSE2Present;
+
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ X86Assembler::linkCall(code, call.m_jmp, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
};
} // namespace JSC
diff --git a/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerX86_64.h b/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerX86_64.h
index ffdca7c..df0090a 100644
--- a/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerX86_64.h
+++ b/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerX86_64.h
@@ -32,6 +32,8 @@
#include "MacroAssemblerX86Common.h"
+#define REPTACH_OFFSET_CALL_R11 3
+
namespace JSC {
class MacroAssemblerX86_64 : public MacroAssemblerX86Common {
@@ -446,6 +448,29 @@ public:
bool supportsFloatingPoint() const { return true; }
// See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
bool supportsFloatingPointTruncate() const { return true; }
+
+private:
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ if (!call.isFlagSet(Call::Near))
+ X86Assembler::linkPointer(code, X86Assembler::labelFor(call.m_jmp, -REPTACH_OFFSET_CALL_R11), function.value());
+ else
+ X86Assembler::linkCall(code, call.m_jmp, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
+ }
+
};
} // namespace JSC
diff --git a/src/3rdparty/webkit/JavaScriptCore/assembler/RepatchBuffer.h b/src/3rdparty/webkit/JavaScriptCore/assembler/RepatchBuffer.h
new file mode 100644
index 0000000..89cbf06
--- /dev/null
+++ b/src/3rdparty/webkit/JavaScriptCore/assembler/RepatchBuffer.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RepatchBuffer_h
+#define RepatchBuffer_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER)
+
+#include <MacroAssembler.h>
+#include <wtf/Noncopyable.h>
+
+namespace JSC {
+
+// RepatchBuffer:
+//
+// This class is used to modify code after code generation has been completed,
+// and after the code has potentially already been executed. This mechanism is
+// used to apply optimizations to the code.
+//
+class RepatchBuffer {
+ typedef MacroAssemblerCodePtr CodePtr;
+
+public:
+ RepatchBuffer(CodeBlock* codeBlock)
+ {
+ JITCode& code = codeBlock->getJITCode();
+ m_start = code.start();
+ m_size = code.size();
+
+ ExecutableAllocator::makeWritable(m_start, m_size);
+ }
+
+ ~RepatchBuffer()
+ {
+ ExecutableAllocator::makeExecutable(m_start, m_size);
+ }
+
+ void relink(CodeLocationJump jump, CodeLocationLabel destination)
+ {
+ MacroAssembler::repatchJump(jump, destination);
+ }
+
+ void relink(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ MacroAssembler::repatchCall(call, destination);
+ }
+
+ void relink(CodeLocationCall call, FunctionPtr destination)
+ {
+ MacroAssembler::repatchCall(call, destination);
+ }
+
+ void relink(CodeLocationNearCall nearCall, CodePtr destination)
+ {
+ MacroAssembler::repatchNearCall(nearCall, CodeLocationLabel(destination));
+ }
+
+ void relink(CodeLocationNearCall nearCall, CodeLocationLabel destination)
+ {
+ MacroAssembler::repatchNearCall(nearCall, destination);
+ }
+
+ void repatch(CodeLocationDataLabel32 dataLabel32, int32_t value)
+ {
+ MacroAssembler::repatchInt32(dataLabel32, value);
+ }
+
+ void repatch(CodeLocationDataLabelPtr dataLabelPtr, void* value)
+ {
+ MacroAssembler::repatchPointer(dataLabelPtr, value);
+ }
+
+ void repatchLoadPtrToLEA(CodeLocationInstruction instruction)
+ {
+ MacroAssembler::repatchLoadPtrToLEA(instruction);
+ }
+
+ void relinkCallerToTrampoline(ReturnAddressPtr returnAddress, CodeLocationLabel label)
+ {
+ relink(CodeLocationCall(CodePtr(returnAddress)), label);
+ }
+
+ void relinkCallerToTrampoline(ReturnAddressPtr returnAddress, CodePtr newCalleeFunction)
+ {
+ relinkCallerToTrampoline(returnAddress, CodeLocationLabel(newCalleeFunction));
+ }
+
+ void relinkCallerToFunction(ReturnAddressPtr returnAddress, FunctionPtr function)
+ {
+ relink(CodeLocationCall(CodePtr(returnAddress)), function);
+ }
+
+ void relinkNearCallerToTrampoline(ReturnAddressPtr returnAddress, CodeLocationLabel label)
+ {
+ relink(CodeLocationNearCall(CodePtr(returnAddress)), label);
+ }
+
+ void relinkNearCallerToTrampoline(ReturnAddressPtr returnAddress, CodePtr newCalleeFunction)
+ {
+ relinkNearCallerToTrampoline(returnAddress, CodeLocationLabel(newCalleeFunction));
+ }
+
+private:
+ void* m_start;
+ size_t m_size;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // RepatchBuffer_h
diff --git a/src/3rdparty/webkit/JavaScriptCore/assembler/X86Assembler.h b/src/3rdparty/webkit/JavaScriptCore/assembler/X86Assembler.h
index 7a8b58d..745bc60 100644
--- a/src/3rdparty/webkit/JavaScriptCore/assembler/X86Assembler.h
+++ b/src/3rdparty/webkit/JavaScriptCore/assembler/X86Assembler.h
@@ -226,6 +226,7 @@ public:
{
}
+ void enableLatePatch() { }
private:
JmpSrc(int offset)
: m_offset(offset)
@@ -1344,6 +1345,11 @@ public:
return JmpDst(m_formatter.size());
}
+ static JmpDst labelFor(JmpSrc jump, intptr_t offset = 0)
+ {
+ return JmpDst(jump.m_offset + offset);
+ }
+
JmpDst align(int alignment)
{
while (!m_formatter.isAligned(alignment))
@@ -1366,59 +1372,48 @@ public:
ASSERT(to.m_offset != -1);
char* code = reinterpret_cast<char*>(m_formatter.data());
- patchRel32(code + from.m_offset, code + to.m_offset);
+ setRel32(code + from.m_offset, code + to.m_offset);
}
static void linkJump(void* code, JmpSrc from, void* to)
{
ASSERT(from.m_offset != -1);
- patchRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
+ setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
}
static void linkCall(void* code, JmpSrc from, void* to)
{
ASSERT(from.m_offset != -1);
- patchRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
+ setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
}
-#if PLATFORM(X86_64)
- static void patchPointerForCall(void* where, void* value)
- {
- reinterpret_cast<void**>(where)[-1] = value;
- }
-#endif
-
- static void patchPointer(void* code, JmpDst where, void* value)
+ static void linkPointer(void* code, JmpDst where, void* value)
{
ASSERT(where.m_offset != -1);
- patchPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
+ setPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
}
static void relinkJump(void* from, void* to)
{
- ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<char*>(from) - sizeof(int32_t), sizeof(int32_t));
- patchRel32(from, to);
+ setRel32(from, to);
}
static void relinkCall(void* from, void* to)
{
- ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<char*>(from) - sizeof(int32_t), sizeof(int32_t));
- patchRel32(from, to);
+ setRel32(from, to);
}
static void repatchInt32(void* where, int32_t value)
{
- ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<char*>(where) - sizeof(int32_t), sizeof(int32_t));
- patchInt32(where, value);
+ setInt32(where, value);
}
static void repatchPointer(void* where, void* value)
{
- ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<char*>(where) - sizeof(void*), sizeof(void*));
- patchPointer(where, value);
+ setPointer(where, value);
}
static void repatchLoadPtrToLEA(void* where)
@@ -1428,7 +1423,6 @@ public:
// Skip over the prefix byte.
where = reinterpret_cast<char*>(where) + 1;
#endif
- ExecutableAllocator::MakeWritable unprotect(where, 1);
*reinterpret_cast<unsigned char*>(where) = static_cast<unsigned char>(OP_LEA);
}
@@ -1476,22 +1470,22 @@ public:
private:
- static void patchPointer(void* where, void* value)
+ static void setPointer(void* where, void* value)
{
reinterpret_cast<void**>(where)[-1] = value;
}
- static void patchInt32(void* where, int32_t value)
+ static void setInt32(void* where, int32_t value)
{
reinterpret_cast<int32_t*>(where)[-1] = value;
}
- static void patchRel32(void* from, void* to)
+ static void setRel32(void* from, void* to)
{
intptr_t offset = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
ASSERT(offset == static_cast<int32_t>(offset));
- patchInt32(from, offset);
+ setInt32(from, offset);
}
class X86InstructionFormatter {