summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/webkit/JavaScriptCore/assembler
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@nokia.com>2009-06-24 11:42:15 (GMT)
committerSimon Hausmann <simon.hausmann@nokia.com>2009-06-24 11:42:15 (GMT)
commit259c32cd921fcbb85f79f21923d0efa0d6743d8a (patch)
treef9629c9be4b8488eb3221b48c0629a42a6ffce4c /src/3rdparty/webkit/JavaScriptCore/assembler
parentff2b98cf81daf585bb657bee7f5f131769b38eca (diff)
downloadQt-259c32cd921fcbb85f79f21923d0efa0d6743d8a.zip
Qt-259c32cd921fcbb85f79f21923d0efa0d6743d8a.tar.gz
Qt-259c32cd921fcbb85f79f21923d0efa0d6743d8a.tar.bz2
Updated WebKit from /home/shausman/src/webkit/trunk to qtwebkit-4.6-snapshot-24062009 ( 6d5a2a0472a6af0b7f781da018e76bb8522d57a5 )
++ b/WebKit/qt/ChangeLog 2009-06-19 Daniel <qt-info@nokia.com> Reviewed by Simon Hausmann. Remove warnings for QString() constructions from const char * By explicitly wrapping it with QLatin1String() / QLatin1Char() * Api/qwebelement.cpp: (QWebElement::classes): Use QLatin1String. (QWebElement::addClass): Ditto. (QWebElement::removeClass): Ditto. (QWebElement::toggleClass): Ditto. 2009-06-18 Friedemann Kleint <Friedemann.Kleint@nokia.com> Reviewed by Simon Hausmann. Fixed MinGW compilation. * Api/qwebelement.cpp: (QWebElement::evaluateScript): 2009-06-18 Markus Goetz <Markus.Goetz@nokia.com> Reviewed by Simon Hausman. Clarify in docs how to compile with debug information. * docs/qtwebkit.qdoc: 2009-06-17 Markus Goetz <Markus.Goetz@nokia.com> Reviewed by Simon Hausmann. QWebPage: Don't call supportsSsl() This stops QWebPage from loading the OpenSSL libs, certificates etc. when they are not needed for the non-HTTPS case. Loading the SSL libraries can be a very slow operation. * Api/qwebpage.cpp: (QWebPage::userAgentForUrl): 2009-06-15 Benjamin C Meyer <benjamin.meyer@torchmobile.com> Reviewed by Adam Treat. Support the back/forward/stop/refresh multimedia keys and accept the event when handling backspace and shift backspace as we should. * Api/qwebpage.cpp: (QWebPagePrivate::keyPressEvent): 2009-06-15 Andre Pedralho <andre.pedralho@openbossa.org> Reviewed by Adam Treat. https://bugs.webkit.org/show_bug.cgi?id=26351 Remove bool QWebHitTestResult::isScrollBar() const and make sure a null QWebHitTestResult is returned instead. * Api/qwebframe.cpp: (QWebFrame::hitTestContent): * Api/qwebframe.h: * Api/qwebpage.cpp: (QWebPage::updatePositionDependentActions): 2009-06-15 Simon Hausmann <simon.hausmann@nokia.com> Reviewed by Adam Treat. Fix the logic for disabling the fixed layout feature, when an invalid QSize is set. * Api/qwebpage.cpp: (QWebPage::setFixedContentsSize): 2009-06-13 Adam Barth <abarth@webkit.org> Reviewed by Darin Fisher. https://bugs.webkit.org/show_bug.cgi?id=24492 Move registerURLSchemeAsLocal from FrameLoader to SecurityOrigin. * Api/qwebpage.cpp: (QWebPage::acceptNavigationRequest): Rubber-stamped by Simon Hausmann. 2009-06-09 Simon Hausmann <simon.hausmann@nokia.com> Reviewed by Ariya Hidayat. Renamed QWebSettings::AllowUniversalAccessFromFileUrls to LocalContentCanAccessRemoteUrls, as discussed in the API review. * Api/qwebsettings.cpp: (QWebSettingsPrivate::apply): (QWebSettings::QWebSettings): * Api/qwebsettings.h: 2009-06-09 Simon Hausmann <simon.hausmann@nokia.com> Reviewed by Ariya Hidayat. Merged useFixedLayout property with fixedLayoutSize and renamed the latter to fixedContentsSize. * Api/qwebpage.cpp: (QWebPage::fixedContentsSize): (QWebPage::setFixedContentsSize): * Api/qwebpage.h: * WebCoreSupport/FrameLoaderClientQt.cpp: (WebCore::FrameLoaderClientQt::transitionToCommittedForNewPage): 2009-06-09 Simon Hausmann <simon.hausmann@nokia.com> Reviewed by Ariya Hidayat. Renamed QWebHitTestResult::linkTarget to linkElement() and made it return a QWebElement. The link target itself is always the target DOM attribute. * Api/qwebframe.cpp: (QWebHitTestResultPrivate::QWebHitTestResultPrivate): (QWebHitTestResult::linkElement): * Api/qwebframe.h: * Api/qwebframe_p.h: * tests/qwebframe/tst_qwebframe.cpp:
Diffstat (limited to 'src/3rdparty/webkit/JavaScriptCore/assembler')
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/assembler/ARMv7Assembler.h1758
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/assembler/AbstractMacroAssembler.h514
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/assembler/CodeLocation.h186
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssembler.h6
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerARMv7.h1063
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerCodeRef.h50
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerX86.h9
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerX86_64.h9
8 files changed, 3193 insertions, 402 deletions
diff --git a/src/3rdparty/webkit/JavaScriptCore/assembler/ARMv7Assembler.h b/src/3rdparty/webkit/JavaScriptCore/assembler/ARMv7Assembler.h
new file mode 100644
index 0000000..9745d6d
--- /dev/null
+++ b/src/3rdparty/webkit/JavaScriptCore/assembler/ARMv7Assembler.h
@@ -0,0 +1,1758 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ARMAssembler_h
+#define ARMAssembler_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER) && PLATFORM_ARM_ARCH(7)
+
+#include "AssemblerBuffer.h"
+#include <wtf/Assertions.h>
+#include <wtf/Vector.h>
+#include <stdint.h>
+
+namespace JSC {
+
+namespace ARM {
+ typedef enum {
+ r0,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7, wr = r7, // thumb work register
+ r8,
+ r9, sb = r9, // static base
+ r10, sl = r10, // stack limit
+ r11, fp = r11, // frame pointer
+ r12, ip = r12,
+ r13, sp = r13,
+ r14, lr = r14,
+ r15, pc = r15,
+ } RegisterID;
+
+ // s0 == d0 == q0
+ // s4 == d2 == q1
+ // etc
+ typedef enum {
+ s0 = 0,
+ s1 = 1,
+ s2 = 2,
+ s3 = 3,
+ s4 = 4,
+ s5 = 5,
+ s6 = 6,
+ s7 = 7,
+ s8 = 8,
+ s9 = 9,
+ s10 = 10,
+ s11 = 11,
+ s12 = 12,
+ s13 = 13,
+ s14 = 14,
+ s15 = 15,
+ s16 = 16,
+ s17 = 17,
+ s18 = 18,
+ s19 = 19,
+ s20 = 20,
+ s21 = 21,
+ s22 = 22,
+ s23 = 23,
+ s24 = 24,
+ s25 = 25,
+ s26 = 26,
+ s27 = 27,
+ s28 = 28,
+ s29 = 29,
+ s30 = 30,
+ s31 = 31,
+ d0 = 0 << 1,
+ d1 = 1 << 1,
+ d2 = 2 << 1,
+ d3 = 3 << 1,
+ d4 = 4 << 1,
+ d5 = 5 << 1,
+ d6 = 6 << 1,
+ d7 = 7 << 1,
+ d8 = 8 << 1,
+ d9 = 9 << 1,
+ d10 = 10 << 1,
+ d11 = 11 << 1,
+ d12 = 12 << 1,
+ d13 = 13 << 1,
+ d14 = 14 << 1,
+ d15 = 15 << 1,
+ d16 = 16 << 1,
+ d17 = 17 << 1,
+ d18 = 18 << 1,
+ d19 = 19 << 1,
+ d20 = 20 << 1,
+ d21 = 21 << 1,
+ d22 = 22 << 1,
+ d23 = 23 << 1,
+ d24 = 24 << 1,
+ d25 = 25 << 1,
+ d26 = 26 << 1,
+ d27 = 27 << 1,
+ d28 = 28 << 1,
+ d29 = 29 << 1,
+ d30 = 30 << 1,
+ d31 = 31 << 1,
+ q0 = 0 << 2,
+ q1 = 1 << 2,
+ q2 = 2 << 2,
+ q3 = 3 << 2,
+ q4 = 4 << 2,
+ q5 = 5 << 2,
+ q6 = 6 << 2,
+ q7 = 7 << 2,
+ q8 = 8 << 2,
+ q9 = 9 << 2,
+ q10 = 10 << 2,
+ q11 = 11 << 2,
+ q12 = 12 << 2,
+ q13 = 13 << 2,
+ q14 = 14 << 2,
+ q15 = 15 << 2,
+ q16 = 16 << 2,
+ q17 = 17 << 2,
+ q18 = 18 << 2,
+ q19 = 19 << 2,
+ q20 = 20 << 2,
+ q21 = 21 << 2,
+ q22 = 22 << 2,
+ q23 = 23 << 2,
+ q24 = 24 << 2,
+ q25 = 25 << 2,
+ q26 = 26 << 2,
+ q27 = 27 << 2,
+ q28 = 28 << 2,
+ q29 = 29 << 2,
+ q30 = 30 << 2,
+ q31 = 31 << 2,
+ } FPRegisterID;
+}
+
+class ARMv7Assembler;
+class ARMThumbImmediate {
+ friend class ARMv7Assembler;
+
+ typedef uint8_t ThumbImmediateType;
+ static const ThumbImmediateType TypeInvalid = 0;
+ static const ThumbImmediateType TypeEncoded = 1;
+ static const ThumbImmediateType TypeUInt16 = 2;
+
+ typedef union {
+ int16_t asInt;
+ struct {
+ unsigned imm8 : 8;
+ unsigned imm3 : 3;
+ unsigned i : 1;
+ unsigned imm4 : 4;
+ };
+ // If this is an encoded immediate, then it may describe a shift, or a pattern.
+ struct {
+ unsigned shiftValue7 : 7;
+ unsigned shiftAmount : 5;
+ };
+ struct {
+ unsigned immediate : 8;
+ unsigned pattern : 4;
+ };
+ } ThumbImmediateValue;
+
+ // byte0 contains least significant bit; not using an array to make client code endian agnostic.
+ typedef union {
+ int32_t asInt;
+ struct {
+ uint8_t byte0;
+ uint8_t byte1;
+ uint8_t byte2;
+ uint8_t byte3;
+ };
+ } PatternBytes;
+
+ ALWAYS_INLINE static int32_t countLeadingZerosPartial(uint32_t& value, int32_t& zeros, const int N)
+ {
+ if (value & ~((1<<N)-1)) /* check for any of the top N bits (of 2N bits) are set */ \
+ value >>= N; /* if any were set, lose the bottom N */ \
+ else /* if none of the top N bits are set, */ \
+ zeros += N; /* then we have identified N leading zeros */
+ }
+
+ static int32_t countLeadingZeros(uint32_t value)
+ {
+ if (!value)
+ return 32;
+
+ int32_t zeros = 0;
+ countLeadingZerosPartial(value, zeros, 16);
+ countLeadingZerosPartial(value, zeros, 8);
+ countLeadingZerosPartial(value, zeros, 4);
+ countLeadingZerosPartial(value, zeros, 2);
+ countLeadingZerosPartial(value, zeros, 1);
+ return zeros;
+ }
+
+ ARMThumbImmediate()
+ : m_type(TypeInvalid)
+ {
+ m_value.asInt = 0;
+ }
+
+ ARMThumbImmediate(ThumbImmediateType type, ThumbImmediateValue value)
+ : m_type(type)
+ , m_value(value)
+ {
+ }
+
+ ARMThumbImmediate(ThumbImmediateType type, uint16_t value)
+ : m_type(TypeUInt16)
+ {
+ m_value.asInt = value;
+ }
+
+public:
+ static ARMThumbImmediate makeEncodedImm(uint32_t value)
+ {
+ ThumbImmediateValue encoding;
+ encoding.asInt = 0;
+
+ // okay, these are easy.
+ if (value < 256) {
+ encoding.immediate = value;
+ encoding.pattern = 0;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ int32_t leadingZeros = countLeadingZeros(value);
+ // if there were 24 or more leading zeros, then we'd have hit the (value < 256) case.
+ ASSERT(leadingZeros < 24);
+
+ // Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32,
+ // Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for
+ // zero. count(B) == 8, so the count of bits to be checked is 24 - count(Z).
+ int32_t rightShiftAmount = 24 - leadingZeros;
+ if (value == ((value >> rightShiftAmount) << rightShiftAmount)) {
+ // Shift the value down to the low byte position. The assign to
+ // shiftValue7 drops the implicit top bit.
+ encoding.shiftValue7 = value >> rightShiftAmount;
+ // The endoded shift amount is the magnitude of a right rotate.
+ encoding.shiftAmount = 8 + leadingZeros;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ PatternBytes bytes;
+ bytes.asInt = value;
+
+ if ((bytes.byte0 == bytes.byte1) && (bytes.byte0 == bytes.byte2) && (bytes.byte0 == bytes.byte3)) {
+ encoding.immediate = bytes.byte0;
+ encoding.pattern = 3;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ if ((bytes.byte0 == bytes.byte2) && !(bytes.byte1 | bytes.byte3)) {
+ encoding.immediate = bytes.byte0;
+ encoding.pattern = 1;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ if ((bytes.byte1 == bytes.byte3) && !(bytes.byte0 | bytes.byte2)) {
+ encoding.immediate = bytes.byte0;
+ encoding.pattern = 2;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ return ARMThumbImmediate();
+ }
+
+ static ARMThumbImmediate makeUInt12(int32_t value)
+ {
+ return (!(value & 0xfffff000))
+ ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
+ : ARMThumbImmediate();
+ }
+
+ static ARMThumbImmediate makeUInt12OrEncodedImm(int32_t value)
+ {
+ // If this is not a 12-bit unsigned it, try making an encoded immediate.
+ return (!(value & 0xfffff000))
+ ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
+ : makeEncodedImm(value);
+ }
+
+ // The 'make' methods, above, return a !isValid() value if the argument
+ // cannot be represented as the requested type. This methods is called
+ // 'get' since the argument can always be represented.
+ static ARMThumbImmediate makeUInt16(uint16_t value)
+ {
+ return ARMThumbImmediate(TypeUInt16, value);
+ }
+
+ bool isValid()
+ {
+ return m_type != TypeInvalid;
+ }
+
+ // These methods rely on the format of encoded byte values.
+ bool isUInt3() { return !(m_value.asInt & 0xfff8); }
+ bool isUInt4() { return !(m_value.asInt & 0xfff0); }
+ bool isUInt5() { return !(m_value.asInt & 0xffe0); }
+ bool isUInt6() { return !(m_value.asInt & 0xffc0); }
+ bool isUInt7() { return !(m_value.asInt & 0xff80); }
+ bool isUInt8() { return !(m_value.asInt & 0xff00); }
+ bool isUInt9() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfe00); }
+ bool isUInt10() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfc00); }
+ bool isUInt12() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xf000); }
+ bool isUInt16() { return m_type == TypeUInt16; }
+ uint8_t getUInt3() { ASSERT(isUInt3()); return m_value.asInt; }
+ uint8_t getUInt4() { ASSERT(isUInt4()); return m_value.asInt; }
+ uint8_t getUInt5() { ASSERT(isUInt5()); return m_value.asInt; }
+ uint8_t getUInt6() { ASSERT(isUInt6()); return m_value.asInt; }
+ uint8_t getUInt7() { ASSERT(isUInt7()); return m_value.asInt; }
+ uint8_t getUInt8() { ASSERT(isUInt8()); return m_value.asInt; }
+ uint8_t getUInt9() { ASSERT(isUInt9()); return m_value.asInt; }
+ uint8_t getUInt10() { ASSERT(isUInt10()); return m_value.asInt; }
+ uint16_t getUInt12() { ASSERT(isUInt12()); return m_value.asInt; }
+ uint16_t getUInt16() { ASSERT(isUInt16()); return m_value.asInt; }
+
+ bool isEncodedImm() { return m_type == TypeEncoded; }
+
+private:
+ ThumbImmediateType m_type;
+ ThumbImmediateValue m_value;
+};
+
+
+typedef enum {
+ SRType_LSL,
+ SRType_LSR,
+ SRType_ASR,
+ SRType_ROR,
+
+ SRType_RRX = SRType_ROR
+} ARMShiftType;
+
+class ARMv7Assembler;
+class ShiftTypeAndAmount {
+ friend class ARMv7Assembler;
+
+public:
+ ShiftTypeAndAmount()
+ {
+ m_u.type = (ARMShiftType)0;
+ m_u.amount = 0;
+ }
+
+ ShiftTypeAndAmount(ARMShiftType type, unsigned amount)
+ {
+ m_u.type = type;
+ m_u.amount = amount & 31;
+ }
+
+ unsigned lo4() { return m_u.lo4; }
+ unsigned hi4() { return m_u.hi4; }
+
+private:
+ union {
+ struct {
+ unsigned lo4 : 4;
+ unsigned hi4 : 4;
+ };
+ struct {
+ unsigned type : 2;
+ unsigned amount : 5;
+ };
+ } m_u;
+};
+
+
+/*
+Some features of the Thumb instruction set are deprecated in ARMv7. Deprecated features affecting
+instructions supported by ARMv7-M are as follows:
+• use of the PC as <Rd> or <Rm> in a 16-bit ADD (SP plus register) instruction
+• use of the SP as <Rm> in a 16-bit ADD (SP plus register) instruction
+• use of the SP as <Rm> in a 16-bit CMP (register) instruction
+• use of MOV (register) instructions in which <Rd> is the SP or PC and <Rm> is also the SP or PC.
+• use of <Rn> as the lowest-numbered register in the register list of a 16-bit STM instruction with base
+register writeback
+*/
+
+class ARMv7Assembler {
+public:
+ typedef ARM::RegisterID RegisterID;
+ typedef ARM::FPRegisterID FPRegisterID;
+
+ // (HS, LO, HI, LS) -> (AE, B, A, BE)
+ // (VS, VC) -> (O, NO)
+ typedef enum {
+ ConditionEQ,
+ ConditionNE,
+ ConditionHS,
+ ConditionLO,
+ ConditionMI,
+ ConditionPL,
+ ConditionVS,
+ ConditionVC,
+ ConditionHI,
+ ConditionLS,
+ ConditionGE,
+ ConditionLT,
+ ConditionGT,
+ ConditionLE,
+ ConditionAL,
+
+ ConditionCS = ConditionHS,
+ ConditionCC = ConditionLO,
+ } Condition;
+
+ class JmpSrc {
+ friend class ARMv7Assembler;
+ friend class ARMInstructionFormatter;
+ public:
+ JmpSrc()
+ : m_offset(-1)
+ {
+ }
+
+ private:
+ JmpSrc(int offset)
+ : m_offset(offset)
+ {
+ }
+
+ int m_offset;
+ };
+
+ class JmpDst {
+ friend class ARMv7Assembler;
+ friend class ARMInstructionFormatter;
+ public:
+ JmpDst()
+ : m_offset(-1)
+ , m_used(false)
+ {
+ }
+
+ bool isUsed() const { return m_used; }
+ void used() { m_used = true; }
+ private:
+ JmpDst(int offset)
+ : m_offset(offset)
+ , m_used(false)
+ {
+ ASSERT(m_offset == offset);
+ }
+
+ int m_offset : 31;
+ int m_used : 1;
+ };
+
+private:
+
+ // ARMv7, Appx-A.6.3
+ bool BadReg(RegisterID reg)
+ {
+ return (reg == ARM::sp) || (reg == ARM::pc);
+ }
+
+ bool isSingleRegister(FPRegisterID reg)
+ {
+ // Check that the high bit isn't set (q16+), and that the low bit isn't (s1, s3, etc).
+ return !(reg & ~31);
+ }
+
+ bool isDoubleRegister(FPRegisterID reg)
+ {
+ // Check that the high bit isn't set (q16+), and that the low bit isn't (s1, s3, etc).
+ return !(reg & ~(31 << 1));
+ }
+
+ bool isQuadRegister(FPRegisterID reg)
+ {
+ return !(reg & ~(31 << 2));
+ }
+
+ uint32_t singleRegisterNum(FPRegisterID reg)
+ {
+ ASSERT(isSingleRegister(reg));
+ return reg;
+ }
+
+ uint32_t doubleRegisterNum(FPRegisterID reg)
+ {
+ ASSERT(isDoubleRegister(reg));
+ return reg >> 1;
+ }
+
+ uint32_t quadRegisterNum(FPRegisterID reg)
+ {
+ ASSERT(isQuadRegister(reg));
+ return reg >> 2;
+ }
+
+ uint32_t singleRegisterMask(FPRegisterID rd, int highBitsShift, int lowBitShift)
+ {
+ uint32_t rdNum = singleRegisterNum(rd);
+ uint32_t rdMask = (rdNum >> 1) << highBitsShift;
+ if (rdNum & 1)
+ rdMask |= 1 << lowBitShift;
+ return rdMask;
+ }
+
+ uint32_t doubleRegisterMask(FPRegisterID rd, int highBitShift, int lowBitsShift)
+ {
+ uint32_t rdNum = doubleRegisterNum(rd);
+ uint32_t rdMask = (rdNum & 0xf) << lowBitsShift;
+ if (rdNum & 16)
+ rdMask |= 1 << highBitShift;
+ return rdMask;
+ }
+
+ typedef enum {
+ OP_ADD_reg_T1 = 0x1800,
+ OP_ADD_S_reg_T1 = 0x1800,
+ OP_SUB_reg_T1 = 0x1A00,
+ OP_SUB_S_reg_T1 = 0x1A00,
+ OP_ADD_imm_T1 = 0x1C00,
+ OP_ADD_S_imm_T1 = 0x1C00,
+ OP_SUB_imm_T1 = 0x1E00,
+ OP_SUB_S_imm_T1 = 0x1E00,
+ OP_MOV_imm_T1 = 0x2000,
+ OP_CMP_imm_T1 = 0x2800,
+ OP_ADD_imm_T2 = 0x3000,
+ OP_ADD_S_imm_T2 = 0x3000,
+ OP_SUB_imm_T2 = 0x3800,
+ OP_SUB_S_imm_T2 = 0x3800,
+ OP_AND_reg_T1 = 0x4000,
+ OP_EOR_reg_T1 = 0x4040,
+ OP_TST_reg_T1 = 0x4200,
+ OP_CMP_reg_T1 = 0x4280,
+ OP_ORR_reg_T1 = 0x4300,
+ OP_MVN_reg_T1 = 0x43C0,
+ OP_ADD_reg_T2 = 0x4400,
+ OP_MOV_reg_T1 = 0x4600,
+ OP_BLX = 0x4700,
+ OP_BX = 0x4700,
+ OP_LDRH_reg_T1 = 0x5A00,
+ OP_STR_reg_T1 = 0x5000,
+ OP_LDR_reg_T1 = 0x5800,
+ OP_STR_imm_T1 = 0x6000,
+ OP_LDR_imm_T1 = 0x6800,
+ OP_LDRH_imm_T1 = 0x8800,
+ OP_STR_imm_T2 = 0x9000,
+ OP_LDR_imm_T2 = 0x9800,
+ OP_ADD_SP_imm_T1 = 0xA800,
+ OP_ADD_SP_imm_T2 = 0xB000,
+ OP_SUB_SP_imm_T1 = 0xB080,
+ OP_BKPT = 0xBE00,
+ OP_IT = 0xBF00,
+ } OpcodeID;
+
+ typedef enum {
+ OP_AND_reg_T2 = 0xEA00,
+ OP_TST_reg_T2 = 0xEA10,
+ OP_ORR_reg_T2 = 0xEA40,
+ OP_ASR_imm_T1 = 0xEA4F,
+ OP_LSL_imm_T1 = 0xEA4F,
+ OP_LSR_imm_T1 = 0xEA4F,
+ OP_ROR_imm_T1 = 0xEA4F,
+ OP_MVN_reg_T2 = 0xEA6F,
+ OP_EOR_reg_T2 = 0xEA80,
+ OP_ADD_reg_T3 = 0xEB00,
+ OP_ADD_S_reg_T3 = 0xEB10,
+ OP_SUB_reg_T2 = 0xEBA0,
+ OP_SUB_S_reg_T2 = 0xEBB0,
+ OP_CMP_reg_T2 = 0xEBB0,
+ OP_B_T4a = 0xF000,
+ OP_AND_imm_T1 = 0xF000,
+ OP_TST_imm = 0xF010,
+ OP_ORR_imm_T1 = 0xF040,
+ OP_MOV_imm_T2 = 0xF040,
+ OP_MVN_imm = 0xF060,
+ OP_EOR_imm_T1 = 0xF080,
+ OP_ADD_imm_T3 = 0xF100,
+ OP_ADD_S_imm_T3 = 0xF110,
+ OP_CMN_imm = 0xF110,
+ OP_SUB_imm_T3 = 0xF1A0,
+ OP_SUB_S_imm_T3 = 0xF1B0,
+ OP_CMP_imm_T2 = 0xF1B0,
+ OP_ADD_imm_T4 = 0xF200,
+ OP_MOV_imm_T3 = 0xF240,
+ OP_SUB_imm_T4 = 0xF2A0,
+ OP_MOVT = 0xF2C0,
+ OP_LDRH_reg_T2 = 0xF830,
+ OP_LDRH_imm_T3 = 0xF830,
+ OP_STR_imm_T4 = 0xF840,
+ OP_STR_reg_T2 = 0xF840,
+ OP_LDR_imm_T4 = 0xF850,
+ OP_LDR_reg_T2 = 0xF850,
+ OP_LDRH_imm_T2 = 0xF8B0,
+ OP_STR_imm_T3 = 0xF8C0,
+ OP_LDR_imm_T3 = 0xF8D0,
+ OP_LSL_reg_T2 = 0xFA00,
+ OP_LSR_reg_T2 = 0xFA20,
+ OP_ASR_reg_T2 = 0xFA40,
+ OP_ROR_reg_T2 = 0xFA60,
+ OP_SMULL_T1 = 0xFB80,
+ } OpcodeID1;
+
+ typedef enum {
+ OP_B_T4b = 0x9000,
+ } OpcodeID2;
+
+ struct FourFours {
+ FourFours(unsigned f3, unsigned f2, unsigned f1, unsigned f0)
+ {
+ m_u.f0 = f0;
+ m_u.f1 = f1;
+ m_u.f2 = f2;
+ m_u.f3 = f3;
+ }
+
+ union {
+ unsigned value;
+ struct {
+ unsigned f0 : 4;
+ unsigned f1 : 4;
+ unsigned f2 : 4;
+ unsigned f3 : 4;
+ };
+ } m_u;
+ };
+
+ class ARMInstructionFormatter;
+
+ // false means else!
+ bool ifThenElseConditionBit(Condition condition, bool isIf)
+ {
+ return isIf ? (condition & 1) : !(condition & 1);
+ }
+ uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if)
+ {
+ int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
+ | (ifThenElseConditionBit(condition, inst3if) << 2)
+ | (ifThenElseConditionBit(condition, inst4if) << 1)
+ | 1;
+ ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
+ return (condition << 4) | mask;
+ }
+ uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
+ {
+ int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
+ | (ifThenElseConditionBit(condition, inst3if) << 2)
+ | 2;
+ ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
+ return (condition << 4) | mask;
+ }
+ uint8_t ifThenElse(Condition condition, bool inst2if)
+ {
+ int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
+ | 4;
+ ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
+ return (condition << 4) | mask;
+ }
+
+ uint8_t ifThenElse(Condition condition)
+ {
+ int mask = 8;
+ ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
+ return (condition << 4) | mask;
+ }
+
+public:
+
+ void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARM::sp) || (rn == ARM::sp));
+ ASSERT(rd != ARM::pc);
+ ASSERT(rn != ARM::pc);
+ ASSERT(imm.isValid());
+
+ if (rn == ARM::sp) {
+ if (!(rd & 8) && imm.isUInt10()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, imm.getUInt10() >> 2);
+ return;
+ } else if ((rd == ARM::sp) && imm.isUInt9()) {
+ m_formatter.oneWordOp9Imm7(OP_ADD_SP_imm_T2, imm.getUInt9() >> 2);
+ return;
+ }
+ } else if (!((rd | rn) & 8)) {
+ if (imm.isUInt3()) {
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+ return;
+ } else if ((rd == rn) && imm.isUInt8()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
+ return;
+ }
+ }
+
+ if (imm.isEncodedImm())
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3, rn, rd, imm);
+ else {
+ ASSERT(imm.isUInt12());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4, rn, rd, imm);
+ }
+ }
+
+ void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT((rd != ARM::sp) || (rn == ARM::sp));
+ ASSERT(rd != ARM::pc);
+ ASSERT(rn != ARM::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ADD_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // NOTE: In an IT block, add doesn't modify the flags register.
+ void add(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if (rd == rn)
+ m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd);
+ else if (rd == rm)
+ m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rn, rd);
+ else if (!((rd | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
+ else
+ add(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ // Not allowed in an IT (if then) block.
+ void add_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARM::sp) || (rn == ARM::sp));
+ ASSERT(rd != ARM::pc);
+ ASSERT(rn != ARM::pc);
+ ASSERT(imm.isEncodedImm());
+
+ if (!((rd | rn) & 8)) {
+ if (imm.isUInt3()) {
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_S_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+ return;
+ } else if ((rd == rn) && imm.isUInt8()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_ADD_S_imm_T2, rd, imm.getUInt8());
+ return;
+ }
+ }
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3, rn, rd, imm);
+ }
+
+ // Not allowed in an IT (if then) block?
+ void add_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT((rd != ARM::sp) || (rn == ARM::sp));
+ ASSERT(rd != ARM::pc);
+ ASSERT(rn != ARM::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // Not allowed in an IT (if then) block.
+ void add_S(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if (!((rd | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_S_reg_T1, rm, rn, rd);
+ else
+ add_S(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ void ARM_and(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(imm.isEncodedImm());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1, rn, rd, imm);
+ }
+
+ void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_AND_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if ((rd == rn) && !((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rm, rd);
+ else if ((rd == rm) && !((rd | rn) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rn, rd);
+ else
+ ARM_and(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ void asr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ ShiftTypeAndAmount shift(SRType_ASR, shiftAmount);
+ m_formatter.twoWordOp16FourFours(OP_ASR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void asr(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ASR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+ }
+
+ // Only allowed in IT (if then) block if last instruction.
+ JmpSrc b()
+ {
+ m_formatter.twoWordOp16Op16(OP_B_T4a, OP_B_T4b);
+ return JmpSrc(m_formatter.size());
+ }
+
+ // Only allowed in IT (if then) block if last instruction.
+ JmpSrc blx(RegisterID rm)
+ {
+ ASSERT(rm != ARM::pc);
+ m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8);
+ return JmpSrc(m_formatter.size());
+ }
+
+ // Only allowed in IT (if then) block if last instruction.
+ JmpSrc bx(RegisterID rm)
+ {
+ m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0);
+ return JmpSrc(m_formatter.size());
+ }
+
+ void bkpt(uint8_t imm=0)
+ {
+ m_formatter.oneWordOp8Imm8(OP_BKPT, imm);
+ }
+
+ void cmn(RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARM::pc);
+ ASSERT(imm.isEncodedImm());
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm, rn, (RegisterID)0xf, imm);
+ }
+
+ void cmp(RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARM::pc);
+ ASSERT(imm.isEncodedImm());
+
+ if (!(rn & 8) && imm.isUInt8())
+ m_formatter.oneWordOp5Reg3Imm8(OP_CMP_imm_T1, rn, imm.getUInt8());
+ else
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2, rn, (RegisterID)0xf, imm);
+ }
+
+ void cmp(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(rn != ARM::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_CMP_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
+ }
+
+ void cmp(RegisterID rn, RegisterID rm)
+ {
+ if ((rn | rm) & 8)
+ cmp(rn, rm, ShiftTypeAndAmount());
+ else
+ m_formatter.oneWordOp10Reg3Reg3(OP_CMP_reg_T1, rm, rn);
+ }
+
+ // xor is not spelled with an 'e'. :-(
+ void eor(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(imm.isEncodedImm());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1, rn, rd, imm);
+ }
+
+ // xor is not spelled with an 'e'. :-(
+ void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_EOR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // xor is not spelled with an 'e'. :-(
+ void eor(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if ((rd == rn) && !((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rm, rd);
+ else if ((rd == rm) && !((rd | rn) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rn, rd);
+ else
+ eor(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ void it(Condition cond)
+ {
+ m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond));
+ }
+
+ void it(Condition cond, bool inst2if)
+ {
+ m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if));
+ }
+
+ void it(Condition cond, bool inst2if, bool inst3if)
+ {
+ m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if));
+ }
+
+ void it(Condition cond, bool inst2if, bool inst3if, bool inst4if)
+ {
+ m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if, inst4if));
+ }
+
+ // rt == ARM::pc only allowed if last instruction in IT (if then) block.
+ void ldr(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARM::pc); // LDR (literal)
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt7())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
+ else if ((rn == ARM::sp) && !(rt & 8) && imm.isUInt10())
+ m_formatter.oneWordOp5Reg3Imm8(OP_LDR_imm_T2, rt, imm.getUInt10() >> 2);
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, imm.getUInt12());
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed load;
+ // if index is not set then is is a post-index load.
+ //
+ // If wback is set rn is updated - this is a pre or post index load,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ void ldr(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARM::pc);
+ ASSERT(rn != ARM::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT((offset & ~0xff) == 0);
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4, rn, rt, offset);
+ }
+
+ // rt == ARM::pc only allowed if last instruction in IT (if then) block.
+ void ldr(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0)
+ {
+ ASSERT(rn != ARM::pc); // LDR (literal)
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_LDR_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ // rt == ARM::pc only allowed if last instruction in IT (if then) block.
+ void ldrh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARM::pc); // LDR (literal)
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt6())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 2, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12());
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed load;
+ // if index is not set then is is a post-index load.
+ //
+ // If wback is set rn is updated - this is a pre or post index load,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ void ldrh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARM::pc);
+ ASSERT(rn != ARM::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT((offset & ~0xff) == 0);
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3, rn, rt, offset);
+ }
+
+ void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0)
+ {
+ ASSERT(!BadReg(rt)); // Memory hint
+ ASSERT(rn != ARM::pc); // LDRH (literal)
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ void lsl(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ ShiftTypeAndAmount shift(SRType_LSL, shiftAmount);
+ m_formatter.twoWordOp16FourFours(OP_LSL_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void lsl(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_LSL_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+ }
+
+ void lsr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ ShiftTypeAndAmount shift(SRType_LSR, shiftAmount);
+ m_formatter.twoWordOp16FourFours(OP_LSR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void lsr(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_LSR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+ }
+
+ void movT3(RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isValid());
+ ASSERT(!imm.isEncodedImm());
+ ASSERT(!BadReg(rd));
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3, imm.m_value.imm4, rd, imm);
+ }
+
+ void mov(RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isValid());
+ ASSERT(!BadReg(rd));
+
+ if ((rd < 8) && imm.isUInt8())
+ m_formatter.oneWordOp5Reg3Imm8(OP_MOV_imm_T1, rd, imm.getUInt8());
+ else if (imm.isEncodedImm())
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2, 0xf, rd, imm);
+ else
+ movT3(rd, imm);
+ }
+
+ void mov(RegisterID rd, RegisterID rm)
+ {
+ m_formatter.oneWordOp8RegReg143(OP_MOV_reg_T1, rm, rd);
+ }
+
+ void movt(RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isUInt16());
+ ASSERT(!BadReg(rd));
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT, imm.m_value.imm4, rd, imm);
+ }
+
+ void mvn(RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isEncodedImm());
+ ASSERT(!BadReg(rd));
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm, 0xf, rd, imm);
+ }
+
+ void mvn(RegisterID rd, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp16FourFours(OP_MVN_reg_T2, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void mvn(RegisterID rd, RegisterID rm)
+ {
+ if (!((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_MVN_reg_T1, rm, rd);
+ else
+ mvn(rd, rm, ShiftTypeAndAmount());
+ }
+
+ void orr(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(imm.isEncodedImm());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1, rn, rd, imm);
+ }
+
+ void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ORR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void orr(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if ((rd == rn) && !((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
+ else if ((rd == rm) && !((rd | rn) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
+ else
+ orr(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ void ror(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ ShiftTypeAndAmount shift(SRType_ROR, shiftAmount);
+ m_formatter.twoWordOp16FourFours(OP_ROR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void ror(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+ }
+
+ void smull(RegisterID rdLo, RegisterID rdHi, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rdLo));
+ ASSERT(!BadReg(rdHi));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ ASSERT(rdLo != rdHi);
+ m_formatter.twoWordOp12Reg4FourFours(OP_SMULL_T1, rn, FourFours(rdLo, rdHi, 0, rm));
+ }
+
+ // rt == ARM::pc only allowed if last instruction in IT (if then) block.
+ void str(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rt != ARM::pc);
+ ASSERT(rn != ARM::pc);
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt7())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1, imm.getUInt7() >> 2, rn, rt);
+ else if ((rn == ARM::sp) && !(rt & 8) && imm.isUInt10())
+ m_formatter.oneWordOp5Reg3Imm8(OP_STR_imm_T2, rt, imm.getUInt10() >> 2);
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3, rn, rt, imm.getUInt12());
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed store;
+ // if index is not set then is is a post-index store.
+ //
+ // If wback is set rn is updated - this is a pre or post index store,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ void str(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARM::pc);
+ ASSERT(rn != ARM::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT((offset & ~0xff) == 0);
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4, rn, rt, offset);
+ }
+
+ // rt == ARM::pc only allowed if last instruction in IT (if then) block.
+ void str(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0)
+ {
+ ASSERT(rn != ARM::pc);
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_STR_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ void sub(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARM::sp) || (rn == ARM::sp));
+ ASSERT(rd != ARM::pc);
+ ASSERT(rn != ARM::pc);
+ ASSERT(imm.isValid());
+
+ if ((rn == ARM::sp) && (rd == ARM::sp) && imm.isUInt9()) {
+ m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, imm.getUInt9() >> 2);
+ return;
+ } else if (!((rd | rn) & 8)) {
+ if (imm.isUInt3()) {
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+ return;
+ } else if ((rd == rn) && imm.isUInt8()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
+ return;
+ }
+ }
+
+ if (imm.isEncodedImm())
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3, rn, rd, imm);
+ else {
+ ASSERT(imm.isUInt12());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4, rn, rd, imm);
+ }
+ }
+
+ void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT((rd != ARM::sp) || (rn == ARM::sp));
+ ASSERT(rd != ARM::pc);
+ ASSERT(rn != ARM::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_SUB_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // NOTE: In an IT block, add doesn't modify the flags register.
+ void sub(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if (!((rd | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
+ else
+ sub(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ // Not allowed in an IT (if then) block.
+ void sub_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARM::sp) || (rn == ARM::sp));
+ ASSERT(rd != ARM::pc);
+ ASSERT(rn != ARM::pc);
+ ASSERT(imm.isValid());
+
+ if ((rn == ARM::sp) && (rd == ARM::sp) && imm.isUInt9()) {
+ m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, imm.getUInt9() >> 2);
+ return;
+ } else if (!((rd | rn) & 8)) {
+ if (imm.isUInt3()) {
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_S_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+ return;
+ } else if ((rd == rn) && imm.isUInt8()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_SUB_S_imm_T2, rd, imm.getUInt8());
+ return;
+ }
+ }
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3, rn, rd, imm);
+ }
+
+ // Not allowed in an IT (if then) block?
+ void sub_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT((rd != ARM::sp) || (rn == ARM::sp));
+ ASSERT(rd != ARM::pc);
+ ASSERT(rn != ARM::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // Not allowed in an IT (if then) block.
+ void sub_S(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if (!((rd | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_S_reg_T1, rm, rn, rd);
+ else
+ sub_S(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ void tst(RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(!BadReg(rn));
+ ASSERT(imm.isEncodedImm());
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm, rn, (RegisterID)0xf, imm);
+ }
+
+ void tst(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_TST_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
+ }
+
+ void tst(RegisterID rn, RegisterID rm)
+ {
+ if ((rn | rm) & 8)
+ tst(rn, rm, ShiftTypeAndAmount());
+ else
+ m_formatter.oneWordOp10Reg3Reg3(OP_TST_reg_T1, rm, rn);
+ }
+
+ void vadd_F64(FPRegisterID rd, FPRegisterID rn, FPRegisterID rm)
+ {
+ m_formatter.vfpOp(0x0b00ee30 | doubleRegisterMask(rd, 6, 28) | doubleRegisterMask(rn, 23, 0) | doubleRegisterMask(rm, 21, 16));
+ }
+
+ void vcmp_F64(FPRegisterID rd, FPRegisterID rm)
+ {
+ m_formatter.vfpOp(0x0bc0eeb4 | doubleRegisterMask(rd, 6, 28) | doubleRegisterMask(rm, 21, 16));
+ }
+
+ void vcvt_F64_S32(FPRegisterID fd, FPRegisterID sm)
+ {
+ m_formatter.vfpOp(0x0bc0eeb8 | doubleRegisterMask(fd, 6, 28) | singleRegisterMask(sm, 16, 21));
+ }
+
+ void vcvt_S32_F64(FPRegisterID sd, FPRegisterID fm)
+ {
+ m_formatter.vfpOp(0x0bc0eebd | singleRegisterMask(sd, 28, 6) | doubleRegisterMask(fm, 21, 16));
+ }
+
+ void vldr(FPRegisterID rd, RegisterID rn, int32_t imm)
+ {
+ vmem(rd, rn, imm, true);
+ }
+
+ void vmov(RegisterID rd, FPRegisterID sn)
+ {
+ m_formatter.vfpOp(0x0a10ee10 | (rd << 28) | singleRegisterMask(sn, 0, 23));
+ }
+
+ void vmov(FPRegisterID sn, RegisterID rd)
+ {
+ m_formatter.vfpOp(0x0a10ee00 | (rd << 28) | singleRegisterMask(sn, 0, 23));
+ }
+
+ // move FPSCR flags to APSR.
+ void vmrs_APSR_nzcv_FPSCR()
+ {
+ m_formatter.vfpOp(0xfa10eef1);
+ }
+
+ void vmul_F64(FPRegisterID rd, FPRegisterID rn, FPRegisterID rm)
+ {
+ m_formatter.vfpOp(0x0b00ee20 | doubleRegisterMask(rd, 6, 28) | doubleRegisterMask(rn, 23, 0) | doubleRegisterMask(rm, 21, 16));
+ }
+
+ void vstr(FPRegisterID rd, RegisterID rn, int32_t imm)
+ {
+ vmem(rd, rn, imm, false);
+ }
+
+ void vsub_F64(FPRegisterID rd, FPRegisterID rn, FPRegisterID rm)
+ {
+ m_formatter.vfpOp(0x0b40ee30 | doubleRegisterMask(rd, 6, 28) | doubleRegisterMask(rn, 23, 0) | doubleRegisterMask(rm, 21, 16));
+ }
+
+
+ JmpDst label()
+ {
+ return JmpDst(m_formatter.size());
+ }
+
+ JmpDst align(int alignment)
+ {
+ while (!m_formatter.isAligned(alignment))
+ bkpt();
+
+ return label();
+ }
+
+ static void* getRelocatedAddress(void* code, JmpSrc jump)
+ {
+ ASSERT(jump.m_offset != -1);
+
+ return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset);
+ }
+
+ static void* getRelocatedAddress(void* code, JmpDst destination)
+ {
+ ASSERT(destination.m_offset != -1);
+
+ return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + destination.m_offset);
+ }
+
+ static int getDifferenceBetweenLabels(JmpDst src, JmpDst dst)
+ {
+ return dst.m_offset - src.m_offset;
+ }
+
+ static int getDifferenceBetweenLabels(JmpDst src, JmpSrc dst)
+ {
+ return dst.m_offset - src.m_offset;
+ }
+
+ static int getDifferenceBetweenLabels(JmpSrc src, JmpDst dst)
+ {
+ return dst.m_offset - src.m_offset;
+ }
+
+ // Assembler admin methods:
+
+ size_t size() const
+ {
+ return m_formatter.size();
+ }
+
+ void* executableCopy(ExecutablePool* allocator)
+ {
+ void* copy = m_formatter.executableCopy(allocator);
+ ASSERT(copy);
+ return copy;
+ }
+
+ static unsigned getCallReturnOffset(JmpSrc call)
+ {
+ ASSERT(call.m_offset >= 0);
+ return call.m_offset;
+ }
+
+ // Linking & patching:
+ //
+ // 'link' and 'patch' methods are for use on unprotected code - such as the code
+ // within the AssemblerBuffer, and code being patched by the patch buffer. Once
+ // code has been finalized it is (platform support permitting) within a non-
+ // writable region of memory; to modify the code in an execute-only execuable
+ // pool the 'repatch' and 'relink' methods should be used.
+
+ void linkJump(JmpSrc from, JmpDst to)
+ {
+ ASSERT(to.m_offset != -1);
+ ASSERT(from.m_offset != -1);
+
+ uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(m_formatter.data()) + from.m_offset);
+ intptr_t relative = to.m_offset - from.m_offset;
+
+ linkWithOffset(location, relative);
+ }
+
+ static void linkJump(void* code, JmpSrc from, void* to)
+ {
+ ASSERT(from.m_offset != -1);
+
+ uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
+ intptr_t relative = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(location);
+
+ linkWithOffset(location, relative);
+ }
+
+ // bah, this mathod should really be static, since it is used by the LinkBuffer.
+ // return a bool saying whether the link was successful?
+ static void linkCall(void* code, JmpSrc from, void* to)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(code) & 1));
+ ASSERT(from.m_offset != -1);
+ ASSERT(reinterpret_cast<intptr_t>(to) & 1);
+
+ patchPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset) - 1, to);
+ }
+
+ static void patchPointer(void* code, JmpDst where, void* value)
+ {
+ patchPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
+ }
+
+ static void relinkJump(void* from, void* to)
+ {
+ ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<uint16_t*>(from) - 2, 2 * sizeof(uint16_t));
+
+ ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(to) & 1));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
+ linkWithOffset(reinterpret_cast<uint16_t*>(from), relative);
+ }
+
+ static void relinkCall(void* from, void* to)
+ {
+ ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<uint16_t*>(from) - 5, 4 * sizeof(uint16_t));
+
+ ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
+ ASSERT(reinterpret_cast<intptr_t>(to) & 1);
+
+ patchPointer(reinterpret_cast<uint16_t*>(from) - 1, to);
+ }
+
+ static void repatchInt32(void* where, int32_t value)
+ {
+ ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<uint16_t*>(where) - 4, 4 * sizeof(uint16_t));
+
+ ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
+
+ patchInt32(where, value);
+ }
+
+ static void repatchPointer(void* where, void* value)
+ {
+ ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<uint16_t*>(where) - 4, 4 * sizeof(uint16_t));
+
+ ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
+
+ patchPointer(where, value);
+ }
+
+ static void repatchLoadPtrToLEA(void* where)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
+
+ uint16_t* loadOp = reinterpret_cast<uint16_t*>(where) + 4;
+ ASSERT((*loadOp & 0xfff0) == OP_LDR_reg_T2);
+
+ ExecutableAllocator::MakeWritable unprotect(loadOp, sizeof(uint16_t));
+ *loadOp = OP_ADD_reg_T3 | (*loadOp & 0xf);
+ }
+
+private:
+
+ // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+ // (i.e. +/-(0..255) 32-bit words)
+ void vmem(FPRegisterID rd, RegisterID rn, int32_t imm, bool isLoad)
+ {
+ bool up;
+ uint32_t offset;
+ if (imm < 0) {
+ offset = -imm;
+ up = false;
+ } else {
+ offset = imm;
+ up = true;
+ }
+
+ // offset is effectively leftshifted by 2 already (the bottom two bits are zero, and not
+ // reperesented in the instruction. Left shift by 14, to mov it into position 0x00AA0000.
+ ASSERT((offset & ~(0xff << 2)) == 0);
+ offset <<= 14;
+
+ m_formatter.vfpOp(0x0b00ed00 | offset | (up << 7) | (isLoad << 4) | doubleRegisterMask(rd, 6, 28) | rn);
+ }
+
+ static void patchInt32(void* code, uint32_t value)
+ {
+ uint16_t* location = reinterpret_cast<uint16_t*>(code);
+
+ ExecutableAllocator::MakeWritable unprotect(location - 4, 4 * sizeof(uint16_t));
+
+ uint16_t lo16 = value;
+ uint16_t hi16 = value >> 16;
+
+ spliceHi5(location - 4, lo16);
+ spliceLo11(location - 3, lo16);
+ spliceHi5(location - 2, hi16);
+ spliceLo11(location - 1, hi16);
+ }
+
+ static void patchPointer(void* code, void* value)
+ {
+ patchInt32(code, reinterpret_cast<uint32_t>(value));
+ }
+
+ // Linking & patching:
+ // This method assumes that the JmpSrc being linked is a T4 b instruction.
+ static void linkWithOffset(uint16_t* instruction, intptr_t relative)
+ {
+ // Currently branches > 16m = mostly deathy.
+ if (((relative << 7) >> 7) != relative) {
+ // FIXME: This CRASH means we cannot turn the JIT on by default on arm-v7.
+ fprintf(stderr, "Error: Cannot link T4b.\n");
+ CRASH();
+ }
+
+ // ARM encoding for the top two bits below the sign bit is 'peculiar'.
+ if (relative >= 0)
+ relative ^= 0xC00000;
+
+ // All branch offsets should be an even distance.
+ ASSERT(!(relative & 1));
+
+ int word1 = ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
+ int word2 = ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
+
+ instruction[-2] = OP_B_T4a | word1;
+ instruction[-1] = OP_B_T4b | word2;
+ }
+
+ // These functions can be used to splice 16-bit immediates back into previously generated instructions.
+ static void spliceHi5(uint16_t* where, uint16_t what)
+ {
+ uint16_t pattern = (what >> 12) | ((what & 0x0800) >> 1);
+ *where = (*where & 0xFBF0) | pattern;
+ }
+ static void spliceLo11(uint16_t* where, uint16_t what)
+ {
+ uint16_t pattern = ((what & 0x0700) << 4) | (what & 0x00FF);
+ *where = (*where & 0x8F00) | pattern;
+ }
+
+ class ARMInstructionFormatter {
+ public:
+ void oneWordOp5Reg3Imm8(OpcodeID op, RegisterID rd, uint8_t imm)
+ {
+ m_buffer.putShort(op | (rd << 8) | imm);
+ }
+
+ void oneWordOp5Imm5Reg3Reg3(OpcodeID op, uint8_t imm, RegisterID reg1, RegisterID reg2)
+ {
+ m_buffer.putShort(op | (imm << 6) | (reg1 << 3) | reg2);
+ }
+
+ void oneWordOp7Reg3Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2, RegisterID reg3)
+ {
+ m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3);
+ }
+
+ void oneWordOp8Imm8(OpcodeID op, uint8_t imm)
+ {
+ m_buffer.putShort(op | imm);
+ }
+
+ void oneWordOp8RegReg143(OpcodeID op, RegisterID reg1, RegisterID reg2)
+ {
+ m_buffer.putShort(op | ((reg2 & 8) << 4) | (reg1 << 3) | (reg2 & 7));
+ }
+ void oneWordOp9Imm7(OpcodeID op, uint8_t imm)
+ {
+ m_buffer.putShort(op | imm);
+ }
+
+ void oneWordOp10Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2)
+ {
+ m_buffer.putShort(op | (reg1 << 3) | reg2);
+ }
+
+ void twoWordOp12Reg4FourFours(OpcodeID1 op, RegisterID reg, FourFours ff)
+ {
+ m_buffer.putShort(op | reg);
+ m_buffer.putShort(ff.m_u.value);
+ }
+
+ void twoWordOp16FourFours(OpcodeID1 op, FourFours ff)
+ {
+ m_buffer.putShort(op);
+ m_buffer.putShort(ff.m_u.value);
+ }
+
+ void twoWordOp16Op16(OpcodeID1 op1, OpcodeID2 op2)
+ {
+ m_buffer.putShort(op1);
+ m_buffer.putShort(op2);
+ }
+
+ void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm)
+ {
+ m_buffer.putShort(op | (imm.m_value.i << 10) | imm4);
+ m_buffer.putShort((imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8);
+ }
+
+ void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm)
+ {
+ m_buffer.putShort(op | reg1);
+ m_buffer.putShort((reg2 << 12) | imm);
+ }
+
+ void vfpOp(int32_t op)
+ {
+ m_buffer.putInt(op);
+ }
+
+
+ // Administrative methods:
+
+ size_t size() const { return m_buffer.size(); }
+ bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
+ void* data() const { return m_buffer.data(); }
+ void* executableCopy(ExecutablePool* allocator) { return m_buffer.executableCopy(allocator); }
+
+ private:
+ AssemblerBuffer m_buffer;
+ } m_formatter;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && PLATFORM_ARM_ARCH(7)
+
+#endif // ARMAssembler_h
diff --git a/src/3rdparty/webkit/JavaScriptCore/assembler/AbstractMacroAssembler.h b/src/3rdparty/webkit/JavaScriptCore/assembler/AbstractMacroAssembler.h
index 5def60f..cf94677 100644
--- a/src/3rdparty/webkit/JavaScriptCore/assembler/AbstractMacroAssembler.h
+++ b/src/3rdparty/webkit/JavaScriptCore/assembler/AbstractMacroAssembler.h
@@ -29,6 +29,7 @@
#include <wtf/Platform.h>
#include <MacroAssemblerCodeRef.h>
+#include <CodeLocation.h>
#include <wtf/Noncopyable.h>
#include <wtf/UnusedParam.h>
@@ -48,15 +49,8 @@ public:
typedef MacroAssemblerCodeRef CodeRef;
class Jump;
- class PatchBuffer;
- class CodeLocationInstruction;
- class CodeLocationLabel;
- class CodeLocationJump;
- class CodeLocationCall;
- class CodeLocationNearCall;
- class CodeLocationDataLabel32;
- class CodeLocationDataLabelPtr;
- class ProcessorReturnAddress;
+ class LinkBuffer;
+ class RepatchBuffer;
typedef typename AssemblerType::RegisterID RegisterID;
typedef typename AssemblerType::FPRegisterID FPRegisterID;
@@ -181,17 +175,32 @@ public:
struct Imm32 {
explicit Imm32(int32_t value)
: m_value(value)
+#if PLATFORM_ARM_ARCH(7)
+ , m_isPointer(false)
+#endif
{
}
#if !PLATFORM(X86_64)
explicit Imm32(ImmPtr ptr)
: m_value(ptr.asIntptr())
+#if PLATFORM_ARM_ARCH(7)
+ , m_isPointer(true)
+#endif
{
}
#endif
int32_t m_value;
+#if PLATFORM_ARM_ARCH(7)
+ // We rely on being able to regenerate code to recover exception handling
+ // information. Since ARMv7 supports 16-bit immediates there is a danger
+ // that if pointer values change the layout of the generated code will change.
+ // To avoid this problem, always generate pointers (and thus Imm32s constructed
+ // from ImmPtrs) with a code sequence that is able to represent any pointer
+ // value - don't use a more compact form in these cases.
+ bool m_isPointer;
+#endif
};
@@ -212,7 +221,7 @@ public:
friend class AbstractMacroAssembler;
friend class Jump;
friend class MacroAssemblerCodeRef;
- friend class PatchBuffer;
+ friend class LinkBuffer;
public:
Label()
@@ -237,7 +246,7 @@ public:
class DataLabelPtr {
template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
- friend class PatchBuffer;
+ friend class LinkBuffer;
public:
DataLabelPtr()
{
@@ -259,7 +268,7 @@ public:
class DataLabel32 {
template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
- friend class PatchBuffer;
+ friend class LinkBuffer;
public:
DataLabel32()
{
@@ -283,7 +292,7 @@ public:
class Call {
template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
- friend class PatchBuffer;
+ friend class LinkBuffer;
public:
enum Flags {
None = 0x0,
@@ -328,7 +337,7 @@ public:
template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
friend class Call;
- friend class PatchBuffer;
+ friend class LinkBuffer;
public:
Jump()
{
@@ -358,7 +367,7 @@ public:
// A JumpList is a set of Jump objects.
// All jumps in the set will be linked to the same destination.
class JumpList {
- friend class PatchBuffer;
+ friend class LinkBuffer;
public:
void link(AbstractMacroAssembler<AssemblerType>* masm)
@@ -397,343 +406,14 @@ public:
};
- // Section 3: MacroAssembler JIT instruction stream handles.
- //
- // The MacroAssembler supported facilities to modify a JIT generated
- // instruction stream after it has been generated (relinking calls and
- // jumps, and repatching data values). The following types are used
- // to store handles into the underlying instruction stream, the type
- // providing semantic information as to what it is that is in the
- // instruction stream at this point, and thus what operations may be
- // performed on it.
-
-
- // CodeLocationCommon:
- //
- // Base type for other CodeLocation* types. A postion in the JIT genertaed
- // instruction stream, without any semantic information.
- class CodeLocationCommon {
- public:
- CodeLocationCommon()
- {
- }
-
- // In order to avoid the need to store multiple handles into the
- // instructions stream, where the code generation is deterministic
- // and the labels will always be a fixed distance apart, these
- // methods may be used to recover a handle that has nopw been
- // retained, based on a known fixed relative offset from one that has.
- CodeLocationInstruction instructionAtOffset(int offset);
- CodeLocationLabel labelAtOffset(int offset);
- CodeLocationJump jumpAtOffset(int offset);
- CodeLocationCall callAtOffset(int offset);
- CodeLocationNearCall nearCallAtOffset(int offset);
- CodeLocationDataLabelPtr dataLabelPtrAtOffset(int offset);
- CodeLocationDataLabel32 dataLabel32AtOffset(int offset);
-
- protected:
- explicit CodeLocationCommon(CodePtr location)
- : m_location(location)
- {
- }
-
- void* dataLocation() { return m_location.dataLocation(); }
- void* executableAddress() { return m_location.executableAddress(); }
-
- void reset()
- {
- m_location = CodePtr();
- }
-
- private:
- CodePtr m_location;
- };
-
- // CodeLocationInstruction:
- //
- // An arbitrary instruction in the JIT code.
- class CodeLocationInstruction : public CodeLocationCommon {
- friend class CodeLocationCommon;
- public:
- CodeLocationInstruction()
- {
- }
-
- void repatchLoadPtrToLEA()
- {
- AssemblerType::repatchLoadPtrToLEA(this->dataLocation());
- }
-
- private:
- explicit CodeLocationInstruction(void* location)
- : CodeLocationCommon(CodePtr(location))
- {
- }
- };
-
- // CodeLocationLabel:
- //
- // A point in the JIT code maked with a label.
- class CodeLocationLabel : public CodeLocationCommon {
- friend class CodeLocationCommon;
- friend class CodeLocationJump;
- friend class CodeLocationCall;
- friend class CodeLocationNearCall;
- friend class PatchBuffer;
- friend class ProcessorReturnAddress;
-
- public:
- CodeLocationLabel()
- {
- }
-
- void* addressForSwitch() { return this->executableAddress(); }
- void* addressForExceptionHandler() { return this->executableAddress(); }
- void* addressForJSR() { return this->executableAddress(); }
-
- bool operator!()
- {
- return !this->executableAddress();
- }
-
- void reset()
- {
- CodeLocationCommon::reset();
- }
-
- private:
- explicit CodeLocationLabel(CodePtr location)
- : CodeLocationCommon(location)
- {
- }
-
- explicit CodeLocationLabel(void* location)
- : CodeLocationCommon(CodePtr(location))
- {
- }
-
- void* getJumpDestination() { return this->executableAddress(); }
- };
-
- // CodeLocationJump:
- //
- // A point in the JIT code at which there is a jump instruction.
- class CodeLocationJump : public CodeLocationCommon {
- friend class CodeLocationCommon;
- friend class PatchBuffer;
- public:
- CodeLocationJump()
- {
- }
-
- void relink(CodeLocationLabel destination)
- {
- AssemblerType::relinkJump(this->dataLocation(), destination.executableAddress());
- }
-
- private:
- explicit CodeLocationJump(void* location)
- : CodeLocationCommon(CodePtr(location))
- {
- }
- };
-
- // CodeLocationCall:
- //
- // A point in the JIT code at which there is a call instruction.
- class CodeLocationCall : public CodeLocationCommon {
- friend class CodeLocationCommon;
- friend class PatchBuffer;
- friend class ProcessorReturnAddress;
- public:
- CodeLocationCall()
- {
- }
-
- void relink(CodeLocationLabel destination)
- {
-#if PLATFORM(X86_64)
- CodeLocationCommon::dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).repatch(destination.executableAddress());
-#else
- AssemblerType::relinkCall(this->dataLocation(), destination.executableAddress());
-#endif
- }
-
- void relink(FunctionPtr destination)
- {
-#if PLATFORM(X86_64)
- CodeLocationCommon::dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).repatch(destination.executableAddress());
-#else
- AssemblerType::relinkCall(this->dataLocation(), destination.executableAddress());
-#endif
- }
-
- // This methods returns the value that will be set as the return address
- // within a function that has been called from this call instruction.
- void* calleeReturnAddressValue()
- {
- return this->executableAddress();
- }
-
- private:
- explicit CodeLocationCall(CodePtr location)
- : CodeLocationCommon(location)
- {
- }
-
- explicit CodeLocationCall(void* location)
- : CodeLocationCommon(CodePtr(location))
- {
- }
- };
-
- // CodeLocationNearCall:
- //
- // A point in the JIT code at which there is a call instruction with near linkage.
- class CodeLocationNearCall : public CodeLocationCommon {
- friend class CodeLocationCommon;
- friend class PatchBuffer;
- friend class ProcessorReturnAddress;
- public:
- CodeLocationNearCall()
- {
- }
-
- void relink(CodePtr destination)
- {
- AssemblerType::relinkCall(this->dataLocation(), destination.executableAddress());
- }
-
- void relink(CodeLocationLabel destination)
- {
- AssemblerType::relinkCall(this->dataLocation(), destination.executableAddress());
- }
-
- void relink(FunctionPtr destination)
- {
- AssemblerType::relinkCall(this->dataLocation(), destination.executableAddress());
- }
-
- // This methods returns the value that will be set as the return address
- // within a function that has been called from this call instruction.
- void* calleeReturnAddressValue()
- {
- return this->executableAddress();
- }
-
- private:
- explicit CodeLocationNearCall(CodePtr location)
- : CodeLocationCommon(location)
- {
- }
-
- explicit CodeLocationNearCall(void* location)
- : CodeLocationCommon(CodePtr(location))
- {
- }
- };
-
- // CodeLocationDataLabel32:
- //
- // A point in the JIT code at which there is an int32_t immediate that may be repatched.
- class CodeLocationDataLabel32 : public CodeLocationCommon {
- friend class CodeLocationCommon;
- friend class PatchBuffer;
- public:
- CodeLocationDataLabel32()
- {
- }
-
- void repatch(int32_t value)
- {
- AssemblerType::repatchInt32(this->dataLocation(), value);
- }
-
- private:
- explicit CodeLocationDataLabel32(void* location)
- : CodeLocationCommon(CodePtr(location))
- {
- }
- };
-
- // CodeLocationDataLabelPtr:
- //
- // A point in the JIT code at which there is a void* immediate that may be repatched.
- class CodeLocationDataLabelPtr : public CodeLocationCommon {
- friend class CodeLocationCommon;
- friend class PatchBuffer;
- public:
- CodeLocationDataLabelPtr()
- {
- }
-
- void repatch(void* value)
- {
- AssemblerType::repatchPointer(this->dataLocation(), value);
- }
-
- private:
- explicit CodeLocationDataLabelPtr(void* location)
- : CodeLocationCommon(CodePtr(location))
- {
- }
- };
-
- // ProcessorReturnAddress:
- //
- // This class can be used to relink a call identified by its return address.
- class ProcessorReturnAddress {
- friend class CodeLocationCall;
- friend class CodeLocationNearCall;
- public:
- ProcessorReturnAddress(void* location)
- : m_location(location)
- {
- }
-
- void relinkCallerToTrampoline(CodeLocationLabel label)
- {
- CodeLocationCall(CodePtr(m_location)).relink(label);
- }
-
- void relinkCallerToTrampoline(CodePtr newCalleeFunction)
- {
- relinkCallerToTrampoline(CodeLocationLabel(newCalleeFunction));
- }
-
- void relinkCallerToFunction(FunctionPtr function)
- {
- CodeLocationCall(CodePtr(m_location)).relink(function);
- }
-
- void relinkNearCallerToTrampoline(CodeLocationLabel label)
- {
- CodeLocationNearCall(CodePtr(m_location)).relink(label);
- }
-
- void relinkNearCallerToTrampoline(CodePtr newCalleeFunction)
- {
- relinkNearCallerToTrampoline(CodeLocationLabel(newCalleeFunction));
- }
-
- void* addressForLookup()
- {
- return m_location.value();
- }
-
- private:
- ReturnAddressPtr m_location;
- };
-
-
- // Section 4: PatchBuffer - utility to finalize code generation.
+ // Section 3: LinkBuffer - utility to finalize code generation.
static CodePtr trampolineAt(CodeRef ref, Label label)
{
return CodePtr(AssemblerType::getRelocatedAddress(ref.m_code.dataLocation(), label.m_label));
}
- // PatchBuffer:
+ // LinkBuffer:
//
// This class assists in linking code generated by the macro assembler, once code generation
// has been completed, and the code has been copied to is final location in memory. At this
@@ -750,12 +430,12 @@ public:
// FIXME: distinguish between Calls & Jumps (make a specific call to obtain the return
// address of calls, as opposed to a point that can be used to later relink a Jump -
// possibly wrap the later up in an object that can do just that).
- class PatchBuffer : public Noncopyable {
+ class LinkBuffer : public Noncopyable {
public:
// Note: Initialization sequence is significant, since executablePool is a PassRefPtr.
// First, executablePool is copied into m_executablePool, then the initialization of
// m_code uses m_executablePool, *not* executablePool, since this is no longer valid.
- PatchBuffer(AbstractMacroAssembler<AssemblerType>* masm, PassRefPtr<ExecutablePool> executablePool)
+ LinkBuffer(AbstractMacroAssembler<AssemblerType>* masm, PassRefPtr<ExecutablePool> executablePool)
: m_executablePool(executablePool)
, m_code(masm->m_assembler.executableCopy(m_executablePool.get()))
, m_size(masm->m_assembler.size())
@@ -765,7 +445,7 @@ public:
{
}
- ~PatchBuffer()
+ ~LinkBuffer()
{
ASSERT(m_completed);
}
@@ -786,13 +466,13 @@ public:
void link(Jump jump, CodeLocationLabel label)
{
- AssemblerType::linkJump(code(), jump.m_jmp, label.executableAddress());
+ AssemblerType::linkJump(code(), jump.m_jmp, label.dataLocation());
}
void link(JumpList list, CodeLocationLabel label)
{
for (unsigned i = 0; i < list.m_jumps.size(); ++i)
- AssemblerType::linkJump(code(), list.m_jumps[i].m_jmp, label.executableAddress());
+ AssemblerType::linkJump(code(), list.m_jumps[i].m_jmp, label.dataLocation());
}
void patch(DataLabelPtr label, void* value)
@@ -802,7 +482,7 @@ public:
void patch(DataLabelPtr label, CodeLocationLabel value)
{
- AssemblerType::patchPointer(code(), label.m_label, value.getJumpDestination());
+ AssemblerType::patchPointer(code(), label.m_label, value.executableAddress());
}
// These methods are used to obtain handles to allow the code to be relinked / repatched later.
@@ -886,8 +566,93 @@ public:
#endif
};
+ class RepatchBuffer {
+ public:
+ RepatchBuffer()
+ {
+ }
+
+ void relink(CodeLocationJump jump, CodeLocationLabel destination)
+ {
+ AssemblerType::relinkJump(jump.dataLocation(), destination.dataLocation());
+ }
+
+ void relink(CodeLocationCall call, CodeLocationLabel destination)
+ {
+#if PLATFORM(X86_64)
+ repatch(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11), destination.executableAddress());
+#else
+ AssemblerType::relinkCall(call.dataLocation(), destination.executableAddress());
+#endif
+ }
+
+ void relink(CodeLocationCall call, FunctionPtr destination)
+ {
+#if PLATFORM(X86_64)
+ repatch(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11), destination.executableAddress());
+#else
+ AssemblerType::relinkCall(call.dataLocation(), destination.executableAddress());
+#endif
+ }
+
+ void relink(CodeLocationNearCall nearCall, CodePtr destination)
+ {
+ AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress());
+ }
+
+ void relink(CodeLocationNearCall nearCall, CodeLocationLabel destination)
+ {
+ AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress());
+ }
+
+ void relink(CodeLocationNearCall nearCall, FunctionPtr destination)
+ {
+ AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress());
+ }
+
+ void repatch(CodeLocationDataLabel32 dataLabel32, int32_t value)
+ {
+ AssemblerType::repatchInt32(dataLabel32.dataLocation(), value);
+ }
+
+ void repatch(CodeLocationDataLabelPtr dataLabelPtr, void* value)
+ {
+ AssemblerType::repatchPointer(dataLabelPtr.dataLocation(), value);
+ }
+
+ void relinkCallerToTrampoline(ReturnAddressPtr returnAddress, CodeLocationLabel label)
+ {
+ relink(CodeLocationCall(CodePtr(returnAddress)), label);
+ }
+
+ void relinkCallerToTrampoline(ReturnAddressPtr returnAddress, CodePtr newCalleeFunction)
+ {
+ relinkCallerToTrampoline(returnAddress, CodeLocationLabel(newCalleeFunction));
+ }
+
+ void relinkCallerToFunction(ReturnAddressPtr returnAddress, FunctionPtr function)
+ {
+ relink(CodeLocationCall(CodePtr(returnAddress)), function);
+ }
+
+ void relinkNearCallerToTrampoline(ReturnAddressPtr returnAddress, CodeLocationLabel label)
+ {
+ relink(CodeLocationNearCall(CodePtr(returnAddress)), label);
+ }
+
+ void relinkNearCallerToTrampoline(ReturnAddressPtr returnAddress, CodePtr newCalleeFunction)
+ {
+ relinkNearCallerToTrampoline(returnAddress, CodeLocationLabel(newCalleeFunction));
+ }
+
+ void repatchLoadPtrToLEA(CodeLocationInstruction instruction)
+ {
+ AssemblerType::repatchLoadPtrToLEA(instruction.dataLocation());
+ }
+ };
+
- // Section 5: Misc admin methods
+ // Section 4: Misc admin methods
size_t size()
{
@@ -949,49 +714,6 @@ protected:
AssemblerType m_assembler;
};
-
-template <class AssemblerType>
-typename AbstractMacroAssembler<AssemblerType>::CodeLocationInstruction AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::instructionAtOffset(int offset)
-{
- return typename AbstractMacroAssembler::CodeLocationInstruction(reinterpret_cast<char*>(dataLocation()) + offset);
-}
-
-template <class AssemblerType>
-typename AbstractMacroAssembler<AssemblerType>::CodeLocationLabel AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::labelAtOffset(int offset)
-{
- return typename AbstractMacroAssembler::CodeLocationLabel(reinterpret_cast<char*>(dataLocation()) + offset);
-}
-
-template <class AssemblerType>
-typename AbstractMacroAssembler<AssemblerType>::CodeLocationJump AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::jumpAtOffset(int offset)
-{
- return typename AbstractMacroAssembler::CodeLocationJump(reinterpret_cast<char*>(dataLocation()) + offset);
-}
-
-template <class AssemblerType>
-typename AbstractMacroAssembler<AssemblerType>::CodeLocationCall AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::callAtOffset(int offset)
-{
- return typename AbstractMacroAssembler::CodeLocationCall(reinterpret_cast<char*>(dataLocation()) + offset);
-}
-
-template <class AssemblerType>
-typename AbstractMacroAssembler<AssemblerType>::CodeLocationNearCall AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::nearCallAtOffset(int offset)
-{
- return typename AbstractMacroAssembler::CodeLocationNearCall(reinterpret_cast<char*>(dataLocation()) + offset);
-}
-
-template <class AssemblerType>
-typename AbstractMacroAssembler<AssemblerType>::CodeLocationDataLabelPtr AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::dataLabelPtrAtOffset(int offset)
-{
- return typename AbstractMacroAssembler::CodeLocationDataLabelPtr(reinterpret_cast<char*>(dataLocation()) + offset);
-}
-
-template <class AssemblerType>
-typename AbstractMacroAssembler<AssemblerType>::CodeLocationDataLabel32 AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::dataLabel32AtOffset(int offset)
-{
- return typename AbstractMacroAssembler::CodeLocationDataLabel32(reinterpret_cast<char*>(dataLocation()) + offset);
-}
-
} // namespace JSC
#endif // ENABLE(ASSEMBLER)
diff --git a/src/3rdparty/webkit/JavaScriptCore/assembler/CodeLocation.h b/src/3rdparty/webkit/JavaScriptCore/assembler/CodeLocation.h
new file mode 100644
index 0000000..b910b6f
--- /dev/null
+++ b/src/3rdparty/webkit/JavaScriptCore/assembler/CodeLocation.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CodeLocation_h
+#define CodeLocation_h
+
+#include <wtf/Platform.h>
+
+#include <MacroAssemblerCodeRef.h>
+
+#if ENABLE(ASSEMBLER)
+
+namespace JSC {
+
+class CodeLocationInstruction;
+class CodeLocationLabel;
+class CodeLocationJump;
+class CodeLocationCall;
+class CodeLocationNearCall;
+class CodeLocationDataLabel32;
+class CodeLocationDataLabelPtr;
+
+// The CodeLocation* types are all pretty much do-nothing wrappers around
+// CodePtr (or MacroAssemblerCodePtr, to give it its full name). These
+// classes only exist to provide type-safety when linking and patching code.
+//
+// The one new piece of functionallity introduced by these classes is the
+// ability to create (or put another way, to re-discover) another CodeLocation
+// at an offset from one you already know. When patching code to optimize it
+// we often want to patch a number of instructions that are short, fixed
+// offsets apart. To reduce memory overhead we will only retain a pointer to
+// one of the instructions, and we will use the *AtOffset methods provided by
+// CodeLocationCommon to find the other points in the code to modify.
+class CodeLocationCommon : public MacroAssemblerCodePtr {
+public:
+ CodeLocationInstruction instructionAtOffset(int offset);
+ CodeLocationLabel labelAtOffset(int offset);
+ CodeLocationJump jumpAtOffset(int offset);
+ CodeLocationCall callAtOffset(int offset);
+ CodeLocationNearCall nearCallAtOffset(int offset);
+ CodeLocationDataLabelPtr dataLabelPtrAtOffset(int offset);
+ CodeLocationDataLabel32 dataLabel32AtOffset(int offset);
+
+protected:
+ CodeLocationCommon()
+ {
+ }
+
+ CodeLocationCommon(MacroAssemblerCodePtr location)
+ : MacroAssemblerCodePtr(location)
+ {
+ }
+};
+
+class CodeLocationInstruction : public CodeLocationCommon {
+public:
+ CodeLocationInstruction() {}
+ explicit CodeLocationInstruction(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationInstruction(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationLabel : public CodeLocationCommon {
+public:
+ CodeLocationLabel() {}
+ explicit CodeLocationLabel(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationLabel(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationJump : public CodeLocationCommon {
+public:
+ CodeLocationJump() {}
+ explicit CodeLocationJump(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationJump(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationCall : public CodeLocationCommon {
+public:
+ CodeLocationCall() {}
+ explicit CodeLocationCall(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationCall(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationNearCall : public CodeLocationCommon {
+public:
+ CodeLocationNearCall() {}
+ explicit CodeLocationNearCall(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationNearCall(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationDataLabel32 : public CodeLocationCommon {
+public:
+ CodeLocationDataLabel32() {}
+ explicit CodeLocationDataLabel32(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationDataLabel32(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationDataLabelPtr : public CodeLocationCommon {
+public:
+ CodeLocationDataLabelPtr() {}
+ explicit CodeLocationDataLabelPtr(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationDataLabelPtr(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+inline CodeLocationInstruction CodeLocationCommon::instructionAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationInstruction(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationLabel CodeLocationCommon::labelAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationLabel(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationJump CodeLocationCommon::jumpAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationJump(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationCall CodeLocationCommon::callAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationCall(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationNearCall CodeLocationCommon::nearCallAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationNearCall(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationDataLabelPtr CodeLocationCommon::dataLabelPtrAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationDataLabelPtr(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationDataLabel32 CodeLocationCommon::dataLabel32AtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationDataLabel32(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // CodeLocation_h
diff --git a/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssembler.h b/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssembler.h
index f341267..43d27e7 100644
--- a/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssembler.h
+++ b/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssembler.h
@@ -30,7 +30,11 @@
#if ENABLE(ASSEMBLER)
-#if PLATFORM(X86)
+#if PLATFORM_ARM_ARCH(7)
+#include "MacroAssemblerARMv7.h"
+namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; };
+
+#elif PLATFORM(X86)
#include "MacroAssemblerX86.h"
namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; };
diff --git a/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerARMv7.h b/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerARMv7.h
new file mode 100644
index 0000000..bd83c60
--- /dev/null
+++ b/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerARMv7.h
@@ -0,0 +1,1063 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerARMv7_h
+#define MacroAssemblerARMv7_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER)
+
+#include "ARMv7Assembler.h"
+#include "AbstractMacroAssembler.h"
+
+namespace JSC {
+
+class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler> {
+ // FIXME: switch dataTempRegister & addressTempRegister, or possibly use r7?
+ // - dTR is likely used more than aTR, and we'll get better instruction
+ // encoding if it's in the low 8 registers.
+ static const ARM::RegisterID dataTempRegister = ARM::ip;
+ static const RegisterID addressTempRegister = ARM::r3;
+ static const FPRegisterID fpTempRegister = ARM::d7;
+
+ struct ArmAddress {
+ enum AddressType {
+ HasOffset,
+ HasIndex,
+ } type;
+ RegisterID base;
+ union {
+ int32_t offset;
+ struct {
+ RegisterID index;
+ Scale scale;
+ };
+ } u;
+
+ explicit ArmAddress(RegisterID base, int32_t offset = 0)
+ : type(HasOffset)
+ , base(base)
+ {
+ u.offset = offset;
+ }
+
+ explicit ArmAddress(RegisterID base, RegisterID index, Scale scale = TimesOne)
+ : type(HasIndex)
+ , base(base)
+ {
+ u.index = index;
+ u.scale = scale;
+ }
+ };
+
+public:
+
+ static const Scale ScalePtr = TimesFour;
+
+ enum Condition {
+ Equal = ARMv7Assembler::ConditionEQ,
+ NotEqual = ARMv7Assembler::ConditionNE,
+ Above = ARMv7Assembler::ConditionHI,
+ AboveOrEqual = ARMv7Assembler::ConditionHS,
+ Below = ARMv7Assembler::ConditionLO,
+ BelowOrEqual = ARMv7Assembler::ConditionLS,
+ GreaterThan = ARMv7Assembler::ConditionGT,
+ GreaterThanOrEqual = ARMv7Assembler::ConditionGE,
+ LessThan = ARMv7Assembler::ConditionLT,
+ LessThanOrEqual = ARMv7Assembler::ConditionLE,
+ Overflow = ARMv7Assembler::ConditionVS,
+ Signed = ARMv7Assembler::ConditionMI,
+ Zero = ARMv7Assembler::ConditionEQ,
+ NonZero = ARMv7Assembler::ConditionNE
+ };
+
+ enum DoubleCondition {
+ DoubleEqual = ARMv7Assembler::ConditionEQ,
+ DoubleGreaterThan = ARMv7Assembler::ConditionGT,
+ DoubleGreaterThanOrEqual = ARMv7Assembler::ConditionGE,
+ DoubleLessThan = ARMv7Assembler::ConditionLO,
+ DoubleLessThanOrEqual = ARMv7Assembler::ConditionLS,
+ };
+
+ static const RegisterID stackPointerRegister = ARM::sp;
+ static const RegisterID linkRegister = ARM::lr;
+
+ // Integer arithmetic operations:
+ //
+ // Operations are typically two operand - operation(source, srcDst)
+ // For many operations the source may be an Imm32, the srcDst operand
+ // may often be a memory location (explictly described using an Address
+ // object).
+
+ void add32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.add(dest, dest, src);
+ }
+
+ void add32(Imm32 imm, RegisterID dest)
+ {
+ add32(imm, dest, dest);
+ }
+
+ void add32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add(dest, src, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.add(dest, src, dataTempRegister);
+ }
+ }
+
+ void add32(Imm32 imm, Address address)
+ {
+ load32(address, dataTempRegister);
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add(dataTempRegister, dataTempRegister, armImm);
+ else {
+ // Hrrrm, since dataTempRegister holds the data loaded,
+ // use addressTempRegister to hold the immediate.
+ move(imm, addressTempRegister);
+ m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
+ }
+
+ store32(dataTempRegister, address);
+ }
+
+ void add32(Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ add32(dataTempRegister, dest);
+ }
+
+ void add32(Imm32 imm, AbsoluteAddress address)
+ {
+ load32(address.m_ptr, dataTempRegister);
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add(dataTempRegister, dataTempRegister, armImm);
+ else {
+ // Hrrrm, since dataTempRegister holds the data loaded,
+ // use addressTempRegister to hold the immediate.
+ move(imm, addressTempRegister);
+ m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
+ }
+
+ store32(dataTempRegister, address.m_ptr);
+ }
+
+ void and32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.ARM_and(dest, dest, src);
+ }
+
+ void and32(Imm32 imm, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.ARM_and(dest, dest, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.ARM_and(dest, dest, dataTempRegister);
+ }
+ }
+
+ void lshift32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.lsl(dest, dest, imm.m_value);
+ }
+
+ void lshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ m_assembler.lsl(dest, dest, shift_amount);
+ }
+
+ void mul32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.smull(dest, dataTempRegister, dest, src);
+ }
+
+ void mul32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(imm, dataTempRegister);
+ m_assembler.smull(dest, dataTempRegister, src, dataTempRegister);
+ }
+
+ void not32(RegisterID srcDest)
+ {
+ m_assembler.mvn(srcDest, srcDest);
+ }
+
+ void or32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.orr(dest, dest, src);
+ }
+
+ void or32(Imm32 imm, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.orr(dest, dest, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.orr(dest, dest, dataTempRegister);
+ }
+ }
+
+ void rshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ m_assembler.asr(dest, dest, shift_amount);
+ }
+
+ void rshift32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.asr(dest, dest, imm.m_value);
+ }
+
+ void sub32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.sub(dest, dest, src);
+ }
+
+ void sub32(Imm32 imm, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.sub(dest, dest, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.sub(dest, dest, dataTempRegister);
+ }
+ }
+
+ void sub32(Imm32 imm, Address address)
+ {
+ load32(address, dataTempRegister);
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
+ else {
+ // Hrrrm, since dataTempRegister holds the data loaded,
+ // use addressTempRegister to hold the immediate.
+ move(imm, addressTempRegister);
+ m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
+ }
+
+ store32(dataTempRegister, address);
+ }
+
+ void sub32(Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ sub32(dataTempRegister, dest);
+ }
+
+ void sub32(Imm32 imm, AbsoluteAddress address)
+ {
+ load32(address.m_ptr, dataTempRegister);
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
+ else {
+ // Hrrrm, since dataTempRegister holds the data loaded,
+ // use addressTempRegister to hold the immediate.
+ move(imm, addressTempRegister);
+ m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
+ }
+
+ store32(dataTempRegister, address.m_ptr);
+ }
+
+ void xor32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.eor(dest, dest, src);
+ }
+
+ void xor32(Imm32 imm, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.eor(dest, dest, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.eor(dest, dest, dataTempRegister);
+ }
+ }
+
+
+ // Memory access operations:
+ //
+ // Loads are of the form load(address, destination) and stores of the form
+ // store(source, address). The source for a store may be an Imm32. Address
+ // operand objects to loads and store will be implicitly constructed if a
+ // register is passed.
+
+private:
+ void load32(ArmAddress address, RegisterID dest)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.ldr(dest, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.ldr(dest, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.ldr(dest, address.base, address.u.offset, true, false);
+ }
+ }
+
+ void load16(ArmAddress address, RegisterID dest)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.ldrh(dest, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.ldrh(dest, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.ldrh(dest, address.base, address.u.offset, true, false);
+ }
+ }
+
+ void store32(RegisterID src, ArmAddress address)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.str(src, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.str(src, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.str(src, address.base, address.u.offset, true, false);
+ }
+ }
+
+public:
+ void load32(ImplicitAddress address, RegisterID dest)
+ {
+ load32(setupArmAddress(address), dest);
+ }
+
+ void load32(BaseIndex address, RegisterID dest)
+ {
+ load32(setupArmAddress(address), dest);
+ }
+
+ void load32(void* address, RegisterID dest)
+ {
+ move(ImmPtr(address), addressTempRegister);
+ m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ DataLabel32 label = moveWithPatch(Imm32(address.offset), dataTempRegister);
+ load32(ArmAddress(address.base, dataTempRegister), dest);
+ return label;
+ }
+
+ Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
+ {
+ Label label(this);
+ moveFixedWidthEncoding(Imm32(address.offset), dataTempRegister);
+ load32(ArmAddress(address.base, dataTempRegister), dest);
+ return label;
+ }
+
+ void load16(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale);
+ }
+
+ DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ DataLabel32 label = moveWithPatch(Imm32(address.offset), dataTempRegister);
+ store32(src, ArmAddress(address.base, dataTempRegister));
+ return label;
+ }
+
+ void store32(RegisterID src, ImplicitAddress address)
+ {
+ store32(src, setupArmAddress(address));
+ }
+
+ void store32(RegisterID src, BaseIndex address)
+ {
+ store32(src, setupArmAddress(address));
+ }
+
+ void store32(Imm32 imm, ImplicitAddress address)
+ {
+ move(imm, dataTempRegister);
+ store32(dataTempRegister, setupArmAddress(address));
+ }
+
+ void store32(RegisterID src, void* address)
+ {
+ move(ImmPtr(address), addressTempRegister);
+ m_assembler.str(src, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ void store32(Imm32 imm, void* address)
+ {
+ move(imm, dataTempRegister);
+ store32(dataTempRegister, address);
+ }
+
+
+ // Floating-point operations:
+
+ bool supportsFloatingPoint() const { return true; }
+ // On x86(_64) the MacroAssembler provides an interface to truncate a double to an integer.
+ // If a value is not representable as an integer, and possibly for some values that are,
+ // (on x86 INT_MIN, since this is indistinguishable from results for out-of-range/NaN input)
+ // a branch will be taken. It is not clear whether this interface will be well suited to
+ // other platforms. On ARMv7 the hardware truncation operation produces multiple possible
+ // failure values (saturates to INT_MIN & INT_MAX, NaN reulsts in a value of 0). This is a
+ // temporary solution while we work out what this interface should be. Either we need to
+ // decide to make this interface work on all platforms, rework the interface to make it more
+ // generic, or decide that the MacroAssembler cannot practically be used to abstracted these
+ // operations, and make clients go directly to the m_assembler to plant truncation instructions.
+ // In short, FIXME:.
+ bool supportsFloatingPointTruncate() const { return false; }
+
+ void loadDouble(ImplicitAddress address, FPRegisterID dest)
+ {
+ RegisterID base = address.base;
+ int32_t offset = address.offset;
+
+ // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+ if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
+ add32(Imm32(offset), base, addressTempRegister);
+ base = addressTempRegister;
+ offset = 0;
+ }
+
+ m_assembler.vldr(dest, base, offset);
+ }
+
+ void storeDouble(FPRegisterID src, ImplicitAddress address)
+ {
+ RegisterID base = address.base;
+ int32_t offset = address.offset;
+
+ // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+ if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
+ add32(Imm32(offset), base, addressTempRegister);
+ base = addressTempRegister;
+ offset = 0;
+ }
+
+ m_assembler.vstr(src, base, offset);
+ }
+
+ void addDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vadd_F64(dest, dest, src);
+ }
+
+ void addDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ addDouble(fpTempRegister, dest);
+ }
+
+ void subDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vsub_F64(dest, dest, src);
+ }
+
+ void subDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ subDouble(fpTempRegister, dest);
+ }
+
+ void mulDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vmul_F64(dest, dest, src);
+ }
+
+ void mulDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ mulDouble(fpTempRegister, dest);
+ }
+
+ void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vmov(fpTempRegister, src);
+ m_assembler.vcvt_F64_S32(dest, fpTempRegister);
+ }
+
+ Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+ {
+ m_assembler.vcmp_F64(left, right);
+ m_assembler.vmrs_APSR_nzcv_FPSCR();
+ return makeBranch(cond);
+ }
+
+ Jump branchTruncateDoubleToInt32(FPRegisterID, RegisterID)
+ {
+ ASSERT_NOT_REACHED();
+ }
+
+
+ // Stack manipulation operations:
+ //
+ // The ABI is assumed to provide a stack abstraction to memory,
+ // containing machine word sized units of data. Push and pop
+ // operations add and remove a single register sized unit of data
+ // to or from the stack. Peek and poke operations read or write
+ // values on the stack, without moving the current stack position.
+
+ void pop(RegisterID dest)
+ {
+ // store postindexed with writeback
+ m_assembler.ldr(dest, ARM::sp, sizeof(void*), false, true);
+ }
+
+ void push(RegisterID src)
+ {
+ // store preindexed with writeback
+ m_assembler.str(src, ARM::sp, -sizeof(void*), true, true);
+ }
+
+ void push(Address address)
+ {
+ load32(address, dataTempRegister);
+ push(dataTempRegister);
+ }
+
+ void push(Imm32 imm)
+ {
+ move(imm, dataTempRegister);
+ push(dataTempRegister);
+ }
+
+ // Register move operations:
+ //
+ // Move values in registers.
+
+ void move(Imm32 imm, RegisterID dest)
+ {
+ uint32_t value = imm.m_value;
+
+ if (imm.m_isPointer)
+ moveFixedWidthEncoding(imm, dest);
+ else {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(value);
+
+ if (armImm.isValid())
+ m_assembler.mov(dest, armImm);
+ else if ((armImm = ARMThumbImmediate::makeEncodedImm(~value)).isValid())
+ m_assembler.mvn(dest, armImm);
+ else {
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(value));
+ if (value & 0xffff0000)
+ m_assembler.movt(dest, ARMThumbImmediate::makeUInt16(value >> 16));
+ }
+ }
+ }
+
+ void move(RegisterID src, RegisterID dest)
+ {
+ m_assembler.mov(dest, src);
+ }
+
+ void move(ImmPtr imm, RegisterID dest)
+ {
+ move(Imm32(imm), dest);
+ }
+
+ void swap(RegisterID reg1, RegisterID reg2)
+ {
+ move(reg1, dataTempRegister);
+ move(reg2, reg1);
+ move(dataTempRegister, reg2);
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ }
+
+ void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ }
+
+
+ // Forwards / external control flow operations:
+ //
+ // This set of jump and conditional branch operations return a Jump
+ // object which may linked at a later point, allow forwards jump,
+ // or jumps that will require external linkage (after the code has been
+ // relocated).
+ //
+ // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
+ // respecitvely, for unsigned comparisons the names b, a, be, and ae are
+ // used (representing the names 'below' and 'above').
+ //
+ // Operands to the comparision are provided in the expected order, e.g.
+ // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
+ // treated as a signed 32bit value, is less than or equal to 5.
+ //
+ // jz and jnz test whether the first operand is equal to zero, and take
+ // an optional second operand of a mask under which to perform the test.
+private:
+
+ // Should we be using TEQ for equal/not-equal?
+ void compare32(RegisterID left, Imm32 right)
+ {
+ int32_t imm = right.m_value;
+ if (!imm)
+ m_assembler.tst(left, left);
+ else {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
+ if (armImm.isValid())
+ m_assembler.cmp(left, armImm);
+ if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
+ m_assembler.cmn(left, armImm);
+ else {
+ move(Imm32(imm), dataTempRegister);
+ m_assembler.cmp(left, dataTempRegister);
+ }
+ }
+ }
+
+ void test32(RegisterID reg, Imm32 mask)
+ {
+ int32_t imm = mask.m_value;
+
+ if (imm == -1)
+ m_assembler.tst(reg, reg);
+ else {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
+ if (armImm.isValid())
+ m_assembler.tst(reg, armImm);
+ else {
+ move(mask, dataTempRegister);
+ m_assembler.tst(reg, dataTempRegister);
+ }
+ }
+ }
+
+public:
+ Jump branch32(Condition cond, RegisterID left, RegisterID right)
+ {
+ m_assembler.cmp(left, right);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branch32(Condition cond, RegisterID left, Imm32 right)
+ {
+ compare32(left, right);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branch32(Condition cond, RegisterID left, Address right)
+ {
+ load32(right, dataTempRegister);
+ return branch32(cond, left, dataTempRegister);
+ }
+
+ Jump branch32(Condition cond, Address left, RegisterID right)
+ {
+ load32(left, dataTempRegister);
+ return branch32(cond, dataTempRegister, right);
+ }
+
+ Jump branch32(Condition cond, Address left, Imm32 right)
+ {
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load32(left, addressTempRegister);
+ return branch32(cond, addressTempRegister, right);
+ }
+
+ Jump branch32(Condition cond, BaseIndex left, Imm32 right)
+ {
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load32(left, addressTempRegister);
+ return branch32(cond, addressTempRegister, right);
+ }
+
+ Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right)
+ {
+ load32(left.m_ptr, dataTempRegister);
+ return branch32(cond, dataTempRegister, right);
+ }
+
+ Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right)
+ {
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load32(left.m_ptr, addressTempRegister);
+ return branch32(cond, addressTempRegister, right);
+ }
+
+ Jump branch16(Condition cond, BaseIndex left, RegisterID right)
+ {
+ load16(left, dataTempRegister);
+ m_assembler.lsl(addressTempRegister, right, 16);
+ m_assembler.lsl(dataTempRegister, dataTempRegister, 16);
+ return branch32(cond, dataTempRegister, addressTempRegister);
+ }
+
+ Jump branch16(Condition cond, BaseIndex left, Imm32 right)
+ {
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load16(left, addressTempRegister);
+ m_assembler.lsl(addressTempRegister, addressTempRegister, 16);
+ return branch32(cond, addressTempRegister, Imm32(right.m_value << 16));
+ }
+
+ Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ m_assembler.tst(reg, mask);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ test32(reg, mask);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
+ load32(address, addressTempRegister);
+ return branchTest32(cond, addressTempRegister, mask);
+ }
+
+ Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
+ load32(address, addressTempRegister);
+ return branchTest32(cond, addressTempRegister, mask);
+ }
+
+ Jump jump()
+ {
+ return Jump(makeJump());
+ }
+
+ void jump(RegisterID target)
+ {
+ m_assembler.bx(target);
+ }
+
+ // Address is a memory location containing the address to jump to
+ void jump(Address address)
+ {
+ load32(address, dataTempRegister);
+ m_assembler.bx(dataTempRegister);
+ }
+
+
+ // Arithmetic control flow operations:
+ //
+ // This set of conditional branch operations branch based
+ // on the result of an arithmetic operation. The operation
+ // is performed as normal, storing the result.
+ //
+ // * jz operations branch if the result is zero.
+ // * jo operations branch if the (signed) arithmetic
+ // operation caused an overflow to occur.
+
+ Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ m_assembler.add_S(dest, dest, src);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add_S(dest, dest, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.add_S(dest, dest, dataTempRegister);
+ }
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT(cond == Overflow);
+ m_assembler.smull(dest, dataTempRegister, dest, src);
+ m_assembler.asr(addressTempRegister, dest, 31);
+ return branch32(NotEqual, addressTempRegister, dataTempRegister);
+ }
+
+ Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ ASSERT(cond == Overflow);
+ move(imm, dataTempRegister);
+ m_assembler.smull(dest, dataTempRegister, src, dataTempRegister);
+ m_assembler.asr(addressTempRegister, dest, 31);
+ return branch32(NotEqual, addressTempRegister, dataTempRegister);
+ }
+
+ Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ m_assembler.sub_S(dest, dest, src);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.sub_S(dest, dest, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.sub_S(dest, dest, dataTempRegister);
+ }
+ return Jump(makeBranch(cond));
+ }
+
+
+ // Miscellaneous operations:
+
+ void breakpoint()
+ {
+ m_assembler.bkpt();
+ }
+
+ Call nearCall()
+ {
+ moveFixedWidthEncoding(Imm32(0), dataTempRegister);
+ return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear);
+ }
+
+ Call call()
+ {
+ moveFixedWidthEncoding(Imm32(0), dataTempRegister);
+ return Call(m_assembler.blx(dataTempRegister), Call::Linkable);
+ }
+
+ Call call(RegisterID target)
+ {
+ return Call(m_assembler.blx(target), Call::None);
+ }
+
+ Call call(Address address)
+ {
+ load32(address, dataTempRegister);
+ return Call(m_assembler.blx(dataTempRegister), Call::None);
+ }
+
+ void ret()
+ {
+ m_assembler.bx(linkRegister);
+ }
+
+ void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmp(left, right);
+ m_assembler.it(armV7Condition(cond), false);
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
+ {
+ compare32(left, right);
+ m_assembler.it(armV7Condition(cond), false);
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ // FIXME:
+ // The mask should be optional... paerhaps the argument order should be
+ // dest-src, operations always have a dest? ... possibly not true, considering
+ // asm ops like test, or pseudo ops like pop().
+ void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
+ {
+ load32(address, dataTempRegister);
+ test32(dataTempRegister, mask);
+ m_assembler.it(armV7Condition(cond), false);
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+ }
+
+
+ DataLabel32 moveWithPatch(Imm32 imm, RegisterID dst)
+ {
+ moveFixedWidthEncoding(imm, dst);
+ return DataLabel32(this);
+ }
+
+ DataLabelPtr moveWithPatch(ImmPtr imm, RegisterID dst)
+ {
+ moveFixedWidthEncoding(Imm32(imm), dst);
+ return DataLabelPtr(this);
+ }
+
+ Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
+ {
+ dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
+ return branch32(cond, left, dataTempRegister);
+ }
+
+ Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
+ {
+ load32(left, addressTempRegister);
+ dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
+ return branch32(cond, addressTempRegister, dataTempRegister);
+ }
+
+ DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address)
+ {
+ DataLabelPtr label = moveWithPatch(initialValue, dataTempRegister);
+ store32(dataTempRegister, address);
+ return label;
+ }
+ DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(ImmPtr(0), address); }
+
+
+ Call tailRecursiveCall()
+ {
+ // Like a normal call, but don't link.
+ moveFixedWidthEncoding(Imm32(0), dataTempRegister);
+ return Call(m_assembler.bx(dataTempRegister), Call::Linkable);
+ }
+
+ Call makeTailRecursiveCall(Jump oldJump)
+ {
+ oldJump.link(this);
+ return tailRecursiveCall();
+ }
+
+
+protected:
+ ARMv7Assembler::JmpSrc makeJump()
+ {
+ return m_assembler.b();
+ }
+
+ ARMv7Assembler::JmpSrc makeBranch(ARMv7Assembler::Condition cond)
+ {
+ m_assembler.it(cond);
+ return m_assembler.b();
+ }
+ ARMv7Assembler::JmpSrc makeBranch(Condition cond) { return makeBranch(armV7Condition(cond)); }
+ ARMv7Assembler::JmpSrc makeBranch(DoubleCondition cond) { return makeBranch(armV7Condition(cond)); }
+
+ ArmAddress setupArmAddress(BaseIndex address)
+ {
+ if (address.offset) {
+ ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
+ if (imm.isValid())
+ m_assembler.add(addressTempRegister, address.base, imm);
+ else {
+ move(Imm32(address.offset), addressTempRegister);
+ m_assembler.add(addressTempRegister, addressTempRegister, address.base);
+ }
+
+ return ArmAddress(addressTempRegister, address.index, address.scale);
+ } else
+ return ArmAddress(address.base, address.index, address.scale);
+ }
+
+ ArmAddress setupArmAddress(Address address)
+ {
+ if ((address.offset >= -0xff) && (address.offset <= 0xfff))
+ return ArmAddress(address.base, address.offset);
+
+ move(Imm32(address.offset), addressTempRegister);
+ return ArmAddress(address.base, addressTempRegister);
+ }
+
+ ArmAddress setupArmAddress(ImplicitAddress address)
+ {
+ if ((address.offset >= -0xff) && (address.offset <= 0xfff))
+ return ArmAddress(address.base, address.offset);
+
+ move(Imm32(address.offset), addressTempRegister);
+ return ArmAddress(address.base, addressTempRegister);
+ }
+
+ RegisterID makeBaseIndexBase(BaseIndex address)
+ {
+ if (!address.offset)
+ return address.base;
+
+ ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
+ if (imm.isValid())
+ m_assembler.add(addressTempRegister, address.base, imm);
+ else {
+ move(Imm32(address.offset), addressTempRegister);
+ m_assembler.add(addressTempRegister, addressTempRegister, address.base);
+ }
+
+ return addressTempRegister;
+ }
+
+ DataLabel32 moveFixedWidthEncoding(Imm32 imm, RegisterID dst)
+ {
+ uint32_t value = imm.m_value;
+ m_assembler.movT3(dst, ARMThumbImmediate::makeUInt16(value & 0xffff));
+ m_assembler.movt(dst, ARMThumbImmediate::makeUInt16(value >> 16));
+ }
+
+ ARMv7Assembler::Condition armV7Condition(Condition cond)
+ {
+ return static_cast<ARMv7Assembler::Condition>(cond);
+ }
+
+ ARMv7Assembler::Condition armV7Condition(DoubleCondition cond)
+ {
+ return static_cast<ARMv7Assembler::Condition>(cond);
+ }
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerARMv7_h
diff --git a/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerCodeRef.h b/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
index 0603060..50fca5b 100644
--- a/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
+++ b/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
@@ -35,6 +35,25 @@
#if ENABLE(ASSEMBLER)
+// ASSERT_VALID_CODE_POINTER checks that ptr is a non-null pointer, and that it is a valid
+// instruction address on the platform (for example, check any alignment requirements).
+#if PLATFORM_ARM_ARCH(7)
+// ARM/thumb instructions must be 16-bit aligned, but all code pointers to be loaded
+// into the processor are decorated with the bottom bit set, indicating that this is
+// thumb code (as oposed to 32-bit traditional ARM). The first test checks for both
+// decorated and undectorated null, and the second test ensures that the pointer is
+// decorated.
+#define ASSERT_VALID_CODE_POINTER(ptr) \
+ ASSERT(reinterpret_cast<intptr_t>(ptr) & ~1); \
+ ASSERT(reinterpret_cast<intptr_t>(ptr) & 1)
+#define ASSERT_VALID_CODE_OFFSET(offset) \
+ ASSERT(!(offset & 1)) // Must be multiple of 2.
+#else
+#define ASSERT_VALID_CODE_POINTER(ptr) \
+ ASSERT(ptr)
+#define ASSERT_VALID_CODE_OFFSET(offset) // Anything goes!
+#endif
+
namespace JSC {
// FunctionPtr:
@@ -52,7 +71,7 @@ public:
explicit FunctionPtr(FunctionType* value)
: m_value(reinterpret_cast<void*>(value))
{
- ASSERT(m_value);
+ ASSERT_VALID_CODE_POINTER(m_value);
}
void* value() const { return m_value; }
@@ -79,7 +98,13 @@ public:
explicit ReturnAddressPtr(void* value)
: m_value(value)
{
- ASSERT(m_value);
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ explicit ReturnAddressPtr(FunctionPtr function)
+ : m_value(function.value())
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
}
void* value() const { return m_value; }
@@ -99,19 +124,34 @@ public:
}
explicit MacroAssemblerCodePtr(void* value)
+#if PLATFORM_ARM_ARCH(7)
+ // Decorate the pointer as a thumb code pointer.
+ : m_value(reinterpret_cast<char*>(value) + 1)
+#else
: m_value(value)
+#endif
{
- ASSERT(m_value);
+ ASSERT_VALID_CODE_POINTER(m_value);
}
explicit MacroAssemblerCodePtr(ReturnAddressPtr ra)
: m_value(ra.value())
{
- ASSERT(m_value);
+ ASSERT_VALID_CODE_POINTER(m_value);
}
void* executableAddress() const { return m_value; }
- void* dataLocation() const { ASSERT(m_value); return m_value; }
+#if PLATFORM_ARM_ARCH(7)
+ // To use this pointer as a data address remove the decoration.
+ void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return reinterpret_cast<char*>(m_value) - 1; }
+#else
+ void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return m_value; }
+#endif
+
+ bool operator!()
+ {
+ return !m_value;
+ }
private:
void* m_value;
diff --git a/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerX86.h b/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerX86.h
index 801bf61..aaf98fd 100644
--- a/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerX86.h
+++ b/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerX86.h
@@ -151,7 +151,16 @@ public:
return DataLabelPtr(this);
}
+ Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
+ {
+ Label label(this);
+ load32(address, dest);
+ return label;
+ }
+
bool supportsFloatingPoint() const { return m_isSSE2Present; }
+ // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
+ bool supportsFloatingPointTruncate() const { return m_isSSE2Present; }
private:
const bool m_isSSE2Present;
diff --git a/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerX86_64.h b/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerX86_64.h
index 4da7fe6..ffdca7c 100644
--- a/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerX86_64.h
+++ b/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssemblerX86_64.h
@@ -436,7 +436,16 @@ public:
return label;
}
+ Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
+ {
+ Label label(this);
+ loadPtr(address, dest);
+ return label;
+ }
+
bool supportsFloatingPoint() const { return true; }
+ // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
+ bool supportsFloatingPointTruncate() const { return true; }
};
} // namespace JSC