summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/webkit/JavaScriptCore/assembler
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/webkit/JavaScriptCore/assembler')
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/assembler/AssemblerBuffer.h160
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssembler.h1929
-rw-r--r--src/3rdparty/webkit/JavaScriptCore/assembler/X86Assembler.h1675
3 files changed, 3764 insertions, 0 deletions
diff --git a/src/3rdparty/webkit/JavaScriptCore/assembler/AssemblerBuffer.h b/src/3rdparty/webkit/JavaScriptCore/assembler/AssemblerBuffer.h
new file mode 100644
index 0000000..e1f53d8
--- /dev/null
+++ b/src/3rdparty/webkit/JavaScriptCore/assembler/AssemblerBuffer.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AssemblerBuffer_h
+#define AssemblerBuffer_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER)
+
+#include "stdint.h"
+#include <string.h>
+#include <jit/ExecutableAllocator.h>
+#include <wtf/Assertions.h>
+#include <wtf/FastMalloc.h>
+
+namespace JSC {
+
+ class AssemblerBuffer {
+ static const int inlineCapacity = 256;
+ public:
+ AssemblerBuffer()
+ : m_buffer(m_inlineBuffer)
+ , m_capacity(inlineCapacity)
+ , m_size(0)
+ {
+ }
+
+ ~AssemblerBuffer()
+ {
+ if (m_buffer != m_inlineBuffer)
+ fastFree(m_buffer);
+ }
+
+ void ensureSpace(int space)
+ {
+ if (m_size > m_capacity - space)
+ grow();
+ }
+
+ bool isAligned(int alignment) const
+ {
+ return !(m_size & (alignment - 1));
+ }
+
+ void putByteUnchecked(int value)
+ {
+ ASSERT(!(m_size > m_capacity - 4));
+ m_buffer[m_size] = value;
+ m_size++;
+ }
+
+ void putByte(int value)
+ {
+ if (m_size > m_capacity - 4)
+ grow();
+ putByteUnchecked(value);
+ }
+
+ void putShortUnchecked(int value)
+ {
+ ASSERT(!(m_size > m_capacity - 4));
+ *reinterpret_cast<short*>(&m_buffer[m_size]) = value;
+ m_size += 2;
+ }
+
+ void putShort(int value)
+ {
+ if (m_size > m_capacity - 4)
+ grow();
+ putShortUnchecked(value);
+ }
+
+ void putIntUnchecked(int value)
+ {
+ *reinterpret_cast<int*>(&m_buffer[m_size]) = value;
+ m_size += 4;
+ }
+
+ void putInt64Unchecked(int64_t value)
+ {
+ *reinterpret_cast<int64_t*>(&m_buffer[m_size]) = value;
+ m_size += 8;
+ }
+
+ void putInt(int value)
+ {
+ if (m_size > m_capacity - 4)
+ grow();
+ putIntUnchecked(value);
+ }
+
+ void* data() const
+ {
+ return m_buffer;
+ }
+
+ int size() const
+ {
+ return m_size;
+ }
+
+ void* executableCopy(ExecutablePool* allocator)
+ {
+ if (!m_size)
+ return 0;
+
+ void* result = allocator->alloc(m_size);
+
+ if (!result)
+ return 0;
+
+ return memcpy(result, m_buffer, m_size);
+ }
+
+ private:
+ void grow()
+ {
+ m_capacity += m_capacity / 2;
+
+ if (m_buffer == m_inlineBuffer) {
+ char* newBuffer = static_cast<char*>(fastMalloc(m_capacity));
+ m_buffer = static_cast<char*>(memcpy(newBuffer, m_buffer, m_size));
+ } else
+ m_buffer = static_cast<char*>(fastRealloc(m_buffer, m_capacity));
+ }
+
+ char m_inlineBuffer[inlineCapacity];
+ char* m_buffer;
+ int m_capacity;
+ int m_size;
+ };
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // AssemblerBuffer_h
diff --git a/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssembler.h b/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssembler.h
new file mode 100644
index 0000000..9f8d474
--- /dev/null
+++ b/src/3rdparty/webkit/JavaScriptCore/assembler/MacroAssembler.h
@@ -0,0 +1,1929 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssembler_h
+#define MacroAssembler_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER)
+
+#include "X86Assembler.h"
+
+namespace JSC {
+
+class MacroAssembler {
+protected:
+ X86Assembler m_assembler;
+
+#if PLATFORM(X86_64)
+ static const X86::RegisterID scratchRegister = X86::r11;
+#endif
+
+public:
+ typedef X86::RegisterID RegisterID;
+
+ // Note: do not rely on values in this enum, these will change (to 0..3).
+ enum Scale {
+ TimesOne = 1,
+ TimesTwo = 2,
+ TimesFour = 4,
+ TimesEight = 8,
+#if PLATFORM(X86)
+ ScalePtr = TimesFour
+#endif
+#if PLATFORM(X86_64)
+ ScalePtr = TimesEight
+#endif
+ };
+
+ MacroAssembler()
+ {
+ }
+
+ size_t size() { return m_assembler.size(); }
+ void* copyCode(ExecutablePool* allocator)
+ {
+ return m_assembler.executableCopy(allocator);
+ }
+
+
+ // Address:
+ //
+ // Describes a simple base-offset address.
+ struct Address {
+ explicit Address(RegisterID base, int32_t offset = 0)
+ : base(base)
+ , offset(offset)
+ {
+ }
+
+ RegisterID base;
+ int32_t offset;
+ };
+
+ // ImplicitAddress:
+ //
+ // This class is used for explicit 'load' and 'store' operations
+ // (as opposed to situations in which a memory operand is provided
+ // to a generic operation, such as an integer arithmetic instruction).
+ //
+ // In the case of a load (or store) operation we want to permit
+ // addresses to be implicitly constructed, e.g. the two calls:
+ //
+ // load32(Address(addrReg), destReg);
+ // load32(addrReg, destReg);
+ //
+ // Are equivalent, and the explicit wrapping of the Address in the former
+ // is unnecessary.
+ struct ImplicitAddress {
+ ImplicitAddress(RegisterID base)
+ : base(base)
+ , offset(0)
+ {
+ }
+
+ ImplicitAddress(Address address)
+ : base(address.base)
+ , offset(address.offset)
+ {
+ }
+
+ RegisterID base;
+ int32_t offset;
+ };
+
+ // BaseIndex:
+ //
+ // Describes a complex addressing mode.
+ struct BaseIndex {
+ BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0)
+ : base(base)
+ , index(index)
+ , scale(scale)
+ , offset(offset)
+ {
+ }
+
+ RegisterID base;
+ RegisterID index;
+ Scale scale;
+ int32_t offset;
+ };
+
+ // AbsoluteAddress:
+ //
+ // Describes an memory operand given by a pointer. For regular load & store
+ // operations an unwrapped void* will be used, rather than using this.
+ struct AbsoluteAddress {
+ explicit AbsoluteAddress(void* ptr)
+ : m_ptr(ptr)
+ {
+ }
+
+ void* m_ptr;
+ };
+
+
+ class Jump;
+ class PatchBuffer;
+
+ // DataLabelPtr:
+ //
+ // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
+ // patched after the code has been generated.
+ class DataLabelPtr {
+ friend class MacroAssembler;
+ friend class PatchBuffer;
+
+ public:
+ DataLabelPtr()
+ {
+ }
+
+ DataLabelPtr(MacroAssembler* masm)
+ : m_label(masm->m_assembler.label())
+ {
+ }
+
+ static void patch(void* address, void* value)
+ {
+ X86Assembler::patchPointer(reinterpret_cast<intptr_t>(address), reinterpret_cast<intptr_t>(value));
+ }
+
+ private:
+ X86Assembler::JmpDst m_label;
+ };
+
+ // DataLabel32:
+ //
+ // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
+ // patched after the code has been generated.
+ class DataLabel32 {
+ friend class MacroAssembler;
+ friend class PatchBuffer;
+
+ public:
+ DataLabel32()
+ {
+ }
+
+ DataLabel32(MacroAssembler* masm)
+ : m_label(masm->m_assembler.label())
+ {
+ }
+
+ static void patch(void* address, int32_t value)
+ {
+ X86Assembler::patchImmediate(reinterpret_cast<intptr_t>(address), value);
+ }
+
+ private:
+ X86Assembler::JmpDst m_label;
+ };
+
+ // Label:
+ //
+ // A Label records a point in the generated instruction stream, typically such that
+ // it may be used as a destination for a jump.
+ class Label {
+ friend class Jump;
+ friend class MacroAssembler;
+ friend class PatchBuffer;
+
+ public:
+ Label()
+ {
+ }
+
+ Label(MacroAssembler* masm)
+ : m_label(masm->m_assembler.label())
+ {
+ }
+
+ // FIXME: transitionary method, while we replace JmpSrces with Jumps.
+ operator X86Assembler::JmpDst()
+ {
+ return m_label;
+ }
+
+ private:
+ X86Assembler::JmpDst m_label;
+ };
+
+
+ // Jump:
+ //
+ // A jump object is a reference to a jump instruction that has been planted
+ // into the code buffer - it is typically used to link the jump, setting the
+ // relative offset such that when executed it will jump to the desired
+ // destination.
+ //
+ // Jump objects retain a pointer to the assembler for syntactic purposes -
+ // to allow the jump object to be able to link itself, e.g.:
+ //
+ // Jump forwardsBranch = jne32(Imm32(0), reg1);
+ // // ...
+ // forwardsBranch.link();
+ //
+ // Jumps may also be linked to a Label.
+ class Jump {
+ friend class PatchBuffer;
+ friend class MacroAssembler;
+
+ public:
+ Jump()
+ {
+ }
+
+ // FIXME: transitionary method, while we replace JmpSrces with Jumps.
+ Jump(X86Assembler::JmpSrc jmp)
+ : m_jmp(jmp)
+ {
+ }
+
+ void link(MacroAssembler* masm)
+ {
+ masm->m_assembler.link(m_jmp, masm->m_assembler.label());
+ }
+
+ void linkTo(Label label, MacroAssembler* masm)
+ {
+ masm->m_assembler.link(m_jmp, label.m_label);
+ }
+
+ // FIXME: transitionary method, while we replace JmpSrces with Jumps.
+ operator X86Assembler::JmpSrc()
+ {
+ return m_jmp;
+ }
+
+ static void patch(void* address, void* destination)
+ {
+ X86Assembler::patchBranchOffset(reinterpret_cast<intptr_t>(address), destination);
+ }
+
+ private:
+ X86Assembler::JmpSrc m_jmp;
+ };
+
+ // JumpList:
+ //
+ // A JumpList is a set of Jump objects.
+ // All jumps in the set will be linked to the same destination.
+ class JumpList {
+ friend class PatchBuffer;
+
+ public:
+ void link(MacroAssembler* masm)
+ {
+ size_t size = m_jumps.size();
+ for (size_t i = 0; i < size; ++i)
+ m_jumps[i].link(masm);
+ m_jumps.clear();
+ }
+
+ void linkTo(Label label, MacroAssembler* masm)
+ {
+ size_t size = m_jumps.size();
+ for (size_t i = 0; i < size; ++i)
+ m_jumps[i].linkTo(label, masm);
+ m_jumps.clear();
+ }
+
+ void append(Jump jump)
+ {
+ m_jumps.append(jump);
+ }
+
+ void append(JumpList& other)
+ {
+ m_jumps.append(other.m_jumps.begin(), other.m_jumps.size());
+ }
+
+ bool empty()
+ {
+ return !m_jumps.size();
+ }
+
+ private:
+ Vector<Jump, 16> m_jumps;
+ };
+
+
+ // PatchBuffer:
+ //
+ // This class assists in linking code generated by the macro assembler, once code generation
+ // has been completed, and the code has been copied to is final location in memory. At this
+ // time pointers to labels within the code may be resolved, and relative offsets to external
+ // addresses may be fixed.
+ //
+ // Specifically:
+ // * Jump objects may be linked to external targets,
+ // * The address of Jump objects may taken, such that it can later be relinked.
+ // * The return address of a Jump object representing a call may be acquired.
+ // * The address of a Label pointing into the code may be resolved.
+ // * The value referenced by a DataLabel may be fixed.
+ //
+ // FIXME: distinguish between Calls & Jumps (make a specific call to obtain the return
+ // address of calls, as opposed to a point that can be used to later relink a Jump -
+ // possibly wrap the later up in an object that can do just that).
+ class PatchBuffer {
+ public:
+ PatchBuffer(void* code)
+ : m_code(code)
+ {
+ }
+
+ void link(Jump jump, void* target)
+ {
+ X86Assembler::link(m_code, jump.m_jmp, target);
+ }
+
+ void link(JumpList list, void* target)
+ {
+ for (unsigned i = 0; i < list.m_jumps.size(); ++i)
+ X86Assembler::link(m_code, list.m_jumps[i], target);
+ }
+
+ void* addressOf(Jump jump)
+ {
+ return X86Assembler::getRelocatedAddress(m_code, jump.m_jmp);
+ }
+
+ void* addressOf(Label label)
+ {
+ return X86Assembler::getRelocatedAddress(m_code, label.m_label);
+ }
+
+ void* addressOf(DataLabelPtr label)
+ {
+ return X86Assembler::getRelocatedAddress(m_code, label.m_label);
+ }
+
+ void* addressOf(DataLabel32 label)
+ {
+ return X86Assembler::getRelocatedAddress(m_code, label.m_label);
+ }
+
+ void setPtr(DataLabelPtr label, void* value)
+ {
+ X86Assembler::patchAddress(m_code, label.m_label, value);
+ }
+
+ private:
+ void* m_code;
+ };
+
+
+ // ImmPtr:
+ //
+ // A pointer sized immediate operand to an instruction - this is wrapped
+ // in a class requiring explicit construction in order to differentiate
+ // from pointers used as absolute addresses to memory operations
+ struct ImmPtr {
+ explicit ImmPtr(void* value)
+ : m_value(value)
+ {
+ }
+
+ intptr_t asIntptr()
+ {
+ return reinterpret_cast<intptr_t>(m_value);
+ }
+
+ void* m_value;
+ };
+
+
+ // Imm32:
+ //
+ // A 32bit immediate operand to an instruction - this is wrapped in a
+ // class requiring explicit construction in order to prevent RegisterIDs
+ // (which are implemented as an enum) from accidentally being passed as
+ // immediate values.
+ struct Imm32 {
+ explicit Imm32(int32_t value)
+ : m_value(value)
+ {
+ }
+
+#if PLATFORM(X86)
+ explicit Imm32(ImmPtr ptr)
+ : m_value(ptr.asIntptr())
+ {
+ }
+#endif
+
+ int32_t m_value;
+ };
+
+ // Integer arithmetic operations:
+ //
+ // Operations are typically two operand - operation(source, srcDst)
+ // For many operations the source may be an Imm32, the srcDst operand
+ // may often be a memory location (explictly described using an Address
+ // object).
+
+ void addPtr(RegisterID src, RegisterID dest)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.addq_rr(src, dest);
+#else
+ add32(src, dest);
+#endif
+ }
+
+ void addPtr(Imm32 imm, RegisterID srcDest)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.addq_ir(imm.m_value, srcDest);
+#else
+ add32(imm, srcDest);
+#endif
+ }
+
+ void addPtr(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ m_assembler.leal_mr(imm.m_value, src, dest);
+ }
+
+ void add32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.addl_rr(src, dest);
+ }
+
+ void add32(Imm32 imm, Address address)
+ {
+ m_assembler.addl_im(imm.m_value, address.offset, address.base);
+ }
+
+ void add32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.addl_ir(imm.m_value, dest);
+ }
+
+ void add32(Imm32 imm, AbsoluteAddress address)
+ {
+#if PLATFORM(X86_64)
+ move(ImmPtr(address.m_ptr), scratchRegister);
+ add32(imm, Address(scratchRegister));
+#else
+ m_assembler.addl_im(imm.m_value, address.m_ptr);
+#endif
+ }
+
+ void add32(Address src, RegisterID dest)
+ {
+ m_assembler.addl_mr(src.offset, src.base, dest);
+ }
+
+ void andPtr(RegisterID src, RegisterID dest)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.andq_rr(src, dest);
+#else
+ and32(src, dest);
+#endif
+ }
+
+ void andPtr(Imm32 imm, RegisterID srcDest)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.andq_ir(imm.m_value, srcDest);
+#else
+ and32(imm, srcDest);
+#endif
+ }
+
+ void and32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.andl_rr(src, dest);
+ }
+
+ void and32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.andl_ir(imm.m_value, dest);
+ }
+
+ void lshift32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.shll_i8r(imm.m_value, dest);
+ }
+
+ void lshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ // On x86 we can only shift by ecx; if asked to shift by another register we'll
+ // need rejig the shift amount into ecx first, and restore the registers afterwards.
+ if (shift_amount != X86::ecx) {
+ swap(shift_amount, X86::ecx);
+
+ // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
+ if (dest == shift_amount)
+ m_assembler.shll_CLr(X86::ecx);
+ // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
+ else if (dest == X86::ecx)
+ m_assembler.shll_CLr(shift_amount);
+ // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
+ else
+ m_assembler.shll_CLr(dest);
+
+ swap(shift_amount, X86::ecx);
+ } else
+ m_assembler.shll_CLr(dest);
+ }
+
+ // Take the value from dividend, divide it by divisor, and put the remainder in remainder.
+ // For now, this operation has specific register requirements, and the three register must
+ // be unique. It is unfortunate to expose this in the MacroAssembler interface, however
+ // given the complexity to fix, the fact that it is not uncommmon for processors to have
+ // specific register requirements on this operation (e.g. Mips result in 'hi'), or to not
+ // support a hardware divide at all, it may not be
+ void mod32(RegisterID divisor, RegisterID dividend, RegisterID remainder)
+ {
+#ifdef NDEBUG
+#pragma unused(dividend,remainder)
+#else
+ ASSERT((dividend == X86::eax) && (remainder == X86::edx));
+ ASSERT((dividend != divisor) && (remainder != divisor));
+#endif
+
+ m_assembler.cdq();
+ m_assembler.idivl_r(divisor);
+ }
+
+ void mul32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ m_assembler.imull_i32r(src, imm.m_value, dest);
+ }
+
+ void not32(RegisterID srcDest)
+ {
+ m_assembler.notl_r(srcDest);
+ }
+
+ void orPtr(RegisterID src, RegisterID dest)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.orq_rr(src, dest);
+#else
+ or32(src, dest);
+#endif
+ }
+
+ void orPtr(ImmPtr imm, RegisterID dest)
+ {
+#if PLATFORM(X86_64)
+ move(imm, scratchRegister);
+ m_assembler.orq_rr(scratchRegister, dest);
+#else
+ or32(Imm32(imm), dest);
+#endif
+ }
+
+ void orPtr(Imm32 imm, RegisterID dest)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.orq_ir(imm.m_value, dest);
+#else
+ or32(imm, dest);
+#endif
+ }
+
+ void or32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.orl_rr(src, dest);
+ }
+
+ void or32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.orl_ir(imm.m_value, dest);
+ }
+
+ void rshiftPtr(RegisterID shift_amount, RegisterID dest)
+ {
+#if PLATFORM(X86_64)
+ // On x86 we can only shift by ecx; if asked to shift by another register we'll
+ // need rejig the shift amount into ecx first, and restore the registers afterwards.
+ if (shift_amount != X86::ecx) {
+ swap(shift_amount, X86::ecx);
+
+ // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
+ if (dest == shift_amount)
+ m_assembler.sarq_CLr(X86::ecx);
+ // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
+ else if (dest == X86::ecx)
+ m_assembler.sarq_CLr(shift_amount);
+ // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
+ else
+ m_assembler.sarq_CLr(dest);
+
+ swap(shift_amount, X86::ecx);
+ } else
+ m_assembler.sarq_CLr(dest);
+#else
+ rshift32(shift_amount, dest);
+#endif
+ }
+
+ void rshiftPtr(Imm32 imm, RegisterID dest)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.sarq_i8r(imm.m_value, dest);
+#else
+ rshift32(imm, dest);
+#endif
+ }
+
+ void rshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ // On x86 we can only shift by ecx; if asked to shift by another register we'll
+ // need rejig the shift amount into ecx first, and restore the registers afterwards.
+ if (shift_amount != X86::ecx) {
+ swap(shift_amount, X86::ecx);
+
+ // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
+ if (dest == shift_amount)
+ m_assembler.sarl_CLr(X86::ecx);
+ // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
+ else if (dest == X86::ecx)
+ m_assembler.sarl_CLr(shift_amount);
+ // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
+ else
+ m_assembler.sarl_CLr(dest);
+
+ swap(shift_amount, X86::ecx);
+ } else
+ m_assembler.sarl_CLr(dest);
+ }
+
+ void rshift32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.sarl_i8r(imm.m_value, dest);
+ }
+
+ void subPtr(Imm32 imm, RegisterID dest)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.subq_ir(imm.m_value, dest);
+#else
+ sub32(imm, dest);
+#endif
+ }
+
+ void sub32(Imm32 imm, RegisterID dest)
+ {
+ m_assembler.subl_ir(imm.m_value, dest);
+ }
+
+ void sub32(Imm32 imm, Address address)
+ {
+ m_assembler.subl_im(imm.m_value, address.offset, address.base);
+ }
+
+ void sub32(Imm32 imm, AbsoluteAddress address)
+ {
+#if PLATFORM(X86_64)
+ move(ImmPtr(address.m_ptr), scratchRegister);
+ sub32(imm, Address(scratchRegister));
+#else
+ m_assembler.subl_im(imm.m_value, address.m_ptr);
+#endif
+ }
+
+ void sub32(Address src, RegisterID dest)
+ {
+ m_assembler.subl_mr(src.offset, src.base, dest);
+ }
+
+ void xorPtr(RegisterID src, RegisterID dest)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.xorq_rr(src, dest);
+#else
+ xor32(src, dest);
+#endif
+ }
+
+ void xorPtr(Imm32 imm, RegisterID srcDest)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.xorq_ir(imm.m_value, srcDest);
+#else
+ xor32(imm, srcDest);
+#endif
+ }
+
+ void xor32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.xorl_rr(src, dest);
+ }
+
+ void xor32(Imm32 imm, RegisterID srcDest)
+ {
+ m_assembler.xorl_ir(imm.m_value, srcDest);
+ }
+
+
+ // Memory access operations:
+ //
+ // Loads are of the form load(address, destination) and stores of the form
+ // store(source, address). The source for a store may be an Imm32. Address
+ // operand objects to loads and store will be implicitly constructed if a
+ // register is passed.
+
+ void loadPtr(ImplicitAddress address, RegisterID dest)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.movq_mr(address.offset, address.base, dest);
+#else
+ load32(address, dest);
+#endif
+ }
+
+ DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.movq_mr_disp32(address.offset, address.base, dest);
+ return DataLabel32(this);
+#else
+ m_assembler.movl_mr_disp32(address.offset, address.base, dest);
+ return DataLabel32(this);
+#endif
+ }
+
+ void loadPtr(BaseIndex address, RegisterID dest)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest);
+#else
+ load32(address, dest);
+#endif
+ }
+
+ void loadPtr(void* address, RegisterID dest)
+ {
+#if PLATFORM(X86_64)
+ if (dest == X86::eax)
+ m_assembler.movq_mEAX(address);
+ else {
+ move(X86::eax, dest);
+ m_assembler.movq_mEAX(address);
+ swap(X86::eax, dest);
+ }
+#else
+ load32(address, dest);
+#endif
+ }
+
+ void load32(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.movl_mr(address.offset, address.base, dest);
+ }
+
+ void load32(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ void load32(void* address, RegisterID dest)
+ {
+#if PLATFORM(X86_64)
+ if (dest == X86::eax)
+ m_assembler.movl_mEAX(address);
+ else {
+ move(X86::eax, dest);
+ m_assembler.movl_mEAX(address);
+ swap(X86::eax, dest);
+ }
+#else
+ m_assembler.movl_mr(address, dest);
+#endif
+ }
+
+ void load16(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ void storePtr(RegisterID src, ImplicitAddress address)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.movq_rm(src, address.offset, address.base);
+#else
+ store32(src, address);
+#endif
+ }
+
+ DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.movq_rm_disp32(src, address.offset, address.base);
+ return DataLabel32(this);
+#else
+ m_assembler.movl_rm_disp32(src, address.offset, address.base);
+ return DataLabel32(this);
+#endif
+ }
+
+ void storePtr(RegisterID src, BaseIndex address)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale);
+#else
+ store32(src, address);
+#endif
+ }
+
+ void storePtr(ImmPtr imm, ImplicitAddress address)
+ {
+#if PLATFORM(X86_64)
+ move(imm, scratchRegister);
+ storePtr(scratchRegister, address);
+#else
+ m_assembler.movl_i32m(imm.asIntptr(), address.offset, address.base);
+#endif
+ }
+
+ DataLabelPtr storePtrWithPatch(Address address)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.movq_i64r(0, scratchRegister);
+ DataLabelPtr label(this);
+ storePtr(scratchRegister, address);
+ return label;
+#else
+ m_assembler.movl_i32m(0, address.offset, address.base);
+ return DataLabelPtr(this);
+#endif
+ }
+
+ void store32(RegisterID src, ImplicitAddress address)
+ {
+ m_assembler.movl_rm(src, address.offset, address.base);
+ }
+
+ void store32(RegisterID src, BaseIndex address)
+ {
+ m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
+ }
+
+ void store32(Imm32 imm, ImplicitAddress address)
+ {
+ m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
+ }
+
+ void store32(Imm32 imm, void* address)
+ {
+#if PLATFORM(X86_64)
+ move(X86::eax, scratchRegister);
+ move(imm, X86::eax);
+ m_assembler.movl_EAXm(address);
+ move(scratchRegister, X86::eax);
+#else
+ m_assembler.movl_i32m(imm.m_value, address);
+#endif
+ }
+
+
+ // Stack manipulation operations:
+ //
+ // The ABI is assumed to provide a stack abstraction to memory,
+ // containing machine word sized units of data. Push and pop
+ // operations add and remove a single register sized unit of data
+ // to or from the stack. Peek and poke operations read or write
+ // values on the stack, without moving the current stack position.
+
+ void pop(RegisterID dest)
+ {
+ m_assembler.pop_r(dest);
+ }
+
+ void push(RegisterID src)
+ {
+ m_assembler.push_r(src);
+ }
+
+ void push(Address address)
+ {
+ m_assembler.push_m(address.offset, address.base);
+ }
+
+ void push(Imm32 imm)
+ {
+ m_assembler.push_i32(imm.m_value);
+ }
+
+ void pop()
+ {
+ addPtr(Imm32(sizeof(void*)), X86::esp);
+ }
+
+ void peek(RegisterID dest, int index = 0)
+ {
+ loadPtr(Address(X86::esp, (index * sizeof(void *))), dest);
+ }
+
+ void poke(RegisterID src, int index = 0)
+ {
+ storePtr(src, Address(X86::esp, (index * sizeof(void *))));
+ }
+
+ void poke(Imm32 value, int index = 0)
+ {
+ store32(value, Address(X86::esp, (index * sizeof(void *))));
+ }
+
+ void poke(ImmPtr imm, int index = 0)
+ {
+ storePtr(imm, Address(X86::esp, (index * sizeof(void *))));
+ }
+
+ // Register move operations:
+ //
+ // Move values in registers.
+
+ void move(Imm32 imm, RegisterID dest)
+ {
+ // Note: on 64-bit the Imm32 value is zero extended into the register, it
+ // may be useful to have a separate version that sign extends the value?
+ if (!imm.m_value)
+ m_assembler.xorl_rr(dest, dest);
+ else
+ m_assembler.movl_i32r(imm.m_value, dest);
+ }
+
+ void move(RegisterID src, RegisterID dest)
+ {
+ // Note: on 64-bit this is is a full register move; perhaps it would be
+ // useful to have separate move32 & movePtr, with move32 zero extending?
+#if PLATFORM(X86_64)
+ m_assembler.movq_rr(src, dest);
+#else
+ m_assembler.movl_rr(src, dest);
+#endif
+ }
+
+ void move(ImmPtr imm, RegisterID dest)
+ {
+#if PLATFORM(X86_64)
+ if (CAN_SIGN_EXTEND_U32_64(imm.asIntptr()))
+ m_assembler.movl_i32r(static_cast<int32_t>(imm.asIntptr()), dest);
+ else
+ m_assembler.movq_i64r(imm.asIntptr(), dest);
+#else
+ m_assembler.movl_i32r(imm.asIntptr(), dest);
+#endif
+ }
+
+ void swap(RegisterID reg1, RegisterID reg2)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.xchgq_rr(reg1, reg2);
+#else
+ m_assembler.xchgl_rr(reg1, reg2);
+#endif
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.movsxd_rr(src, dest);
+#else
+ if (src != dest)
+ move(src, dest);
+#endif
+ }
+
+ void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.movl_rr(src, dest);
+#else
+ if (src != dest)
+ move(src, dest);
+#endif
+ }
+
+
+ // Forwards / external control flow operations:
+ //
+ // This set of jump and conditional branch operations return a Jump
+ // object which may linked at a later point, allow forwards jump,
+ // or jumps that will require external linkage (after the code has been
+ // relocated).
+ //
+ // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
+ // respecitvely, for unsigned comparisons the names b, a, be, and ae are
+ // used (representing the names 'below' and 'above').
+ //
+ // Operands to the comparision are provided in the expected order, e.g.
+ // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
+ // treated as a signed 32bit value, is less than or equal to 5.
+ //
+ // jz and jnz test whether the first operand is equal to zero, and take
+ // an optional second operand of a mask under which to perform the test.
+
+private:
+ void compareImm32ForBranch(RegisterID left, int32_t right)
+ {
+ m_assembler.cmpl_ir(right, left);
+ }
+
+ void compareImm32ForBranchEquality(RegisterID reg, int32_t imm)
+ {
+ if (!imm)
+ m_assembler.testl_rr(reg, reg);
+ else
+ m_assembler.cmpl_ir(imm, reg);
+ }
+
+ void compareImm32ForBranchEquality(Address address, int32_t imm)
+ {
+ m_assembler.cmpl_im(imm, address.offset, address.base);
+ }
+
+ void testImm32(RegisterID reg, Imm32 mask)
+ {
+ // if we are only interested in the low seven bits, this can be tested with a testb
+ if (mask.m_value == -1)
+ m_assembler.testl_rr(reg, reg);
+ else if ((mask.m_value & ~0x7f) == 0)
+ m_assembler.testb_i8r(mask.m_value, reg);
+ else
+ m_assembler.testl_i32r(mask.m_value, reg);
+ }
+
+ void testImm32(Address address, Imm32 mask)
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmpl_im(0, address.offset, address.base);
+ else
+ m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
+ }
+
+ void testImm32(BaseIndex address, Imm32 mask)
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
+ else
+ m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
+ }
+
+#if PLATFORM(X86_64)
+ void compareImm64ForBranch(RegisterID left, int32_t right)
+ {
+ m_assembler.cmpq_ir(right, left);
+ }
+
+ void compareImm64ForBranchEquality(RegisterID reg, int32_t imm)
+ {
+ if (!imm)
+ m_assembler.testq_rr(reg, reg);
+ else
+ m_assembler.cmpq_ir(imm, reg);
+ }
+
+ void testImm64(RegisterID reg, Imm32 mask)
+ {
+ // if we are only interested in the low seven bits, this can be tested with a testb
+ if (mask.m_value == -1)
+ m_assembler.testq_rr(reg, reg);
+ else if ((mask.m_value & ~0x7f) == 0)
+ m_assembler.testb_i8r(mask.m_value, reg);
+ else
+ m_assembler.testq_i32r(mask.m_value, reg);
+ }
+
+ void testImm64(Address address, Imm32 mask)
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmpq_im(0, address.offset, address.base);
+ else
+ m_assembler.testq_i32m(mask.m_value, address.offset, address.base);
+ }
+
+ void testImm64(BaseIndex address, Imm32 mask)
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale);
+ else
+ m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
+ }
+#endif
+
+public:
+ Jump ja32(RegisterID left, Imm32 right)
+ {
+ compareImm32ForBranch(left, right.m_value);
+ return Jump(m_assembler.ja());
+ }
+
+ Jump jaePtr(RegisterID left, RegisterID right)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.cmpq_rr(right, left);
+ return Jump(m_assembler.jae());
+#else
+ return jae32(left, right);
+#endif
+ }
+
+ Jump jaePtr(RegisterID reg, ImmPtr ptr)
+ {
+#if PLATFORM(X86_64)
+ intptr_t imm = ptr.asIntptr();
+ if (CAN_SIGN_EXTEND_32_64(imm)) {
+ compareImm64ForBranch(reg, imm);
+ return Jump(m_assembler.jae());
+ } else {
+ move(ptr, scratchRegister);
+ return jaePtr(reg, scratchRegister);
+ }
+#else
+ return jae32(reg, Imm32(ptr));
+#endif
+ }
+
+ Jump jae32(RegisterID left, RegisterID right)
+ {
+ m_assembler.cmpl_rr(right, left);
+ return Jump(m_assembler.jae());
+ }
+
+ Jump jae32(RegisterID left, Imm32 right)
+ {
+ compareImm32ForBranch(left, right.m_value);
+ return Jump(m_assembler.jae());
+ }
+
+ Jump jae32(RegisterID left, Address right)
+ {
+ m_assembler.cmpl_mr(right.offset, right.base, left);
+ return Jump(m_assembler.jae());
+ }
+
+ Jump jae32(Address left, RegisterID right)
+ {
+ m_assembler.cmpl_rm(right, left.offset, left.base);
+ return Jump(m_assembler.jae());
+ }
+
+ Jump jbPtr(RegisterID left, RegisterID right)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.cmpq_rr(right, left);
+ return Jump(m_assembler.jb());
+#else
+ return jb32(left, right);
+#endif
+ }
+
+ Jump jbPtr(RegisterID reg, ImmPtr ptr)
+ {
+#if PLATFORM(X86_64)
+ intptr_t imm = ptr.asIntptr();
+ if (CAN_SIGN_EXTEND_32_64(imm)) {
+ compareImm64ForBranch(reg, imm);
+ return Jump(m_assembler.jb());
+ } else {
+ move(ptr, scratchRegister);
+ return jbPtr(reg, scratchRegister);
+ }
+#else
+ return jb32(reg, Imm32(ptr));
+#endif
+ }
+
+ Jump jb32(RegisterID left, RegisterID right)
+ {
+ m_assembler.cmpl_rr(right, left);
+ return Jump(m_assembler.jb());
+ }
+
+ Jump jb32(RegisterID left, Imm32 right)
+ {
+ compareImm32ForBranch(left, right.m_value);
+ return Jump(m_assembler.jb());
+ }
+
+ Jump jb32(RegisterID left, Address right)
+ {
+ m_assembler.cmpl_mr(right.offset, right.base, left);
+ return Jump(m_assembler.jb());
+ }
+
+ Jump jePtr(RegisterID op1, RegisterID op2)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.cmpq_rr(op1, op2);
+ return Jump(m_assembler.je());
+#else
+ return je32(op1, op2);
+#endif
+ }
+
+ Jump jePtr(RegisterID reg, Address address)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.cmpq_rm(reg, address.offset, address.base);
+#else
+ m_assembler.cmpl_rm(reg, address.offset, address.base);
+#endif
+ return Jump(m_assembler.je());
+ }
+
+ Jump jePtr(RegisterID reg, ImmPtr ptr)
+ {
+#if PLATFORM(X86_64)
+ intptr_t imm = ptr.asIntptr();
+ if (CAN_SIGN_EXTEND_32_64(imm)) {
+ compareImm64ForBranchEquality(reg, imm);
+ return Jump(m_assembler.je());
+ } else {
+ move(ptr, scratchRegister);
+ return jePtr(scratchRegister, reg);
+ }
+#else
+ return je32(reg, Imm32(ptr));
+#endif
+ }
+
+ Jump jePtr(Address address, ImmPtr imm)
+ {
+#if PLATFORM(X86_64)
+ move(imm, scratchRegister);
+ return jePtr(scratchRegister, address);
+#else
+ return je32(address, Imm32(imm));
+#endif
+ }
+
+ Jump je32(RegisterID op1, RegisterID op2)
+ {
+ m_assembler.cmpl_rr(op1, op2);
+ return Jump(m_assembler.je());
+ }
+
+ Jump je32(Address op1, RegisterID op2)
+ {
+ m_assembler.cmpl_mr(op1.offset, op1.base, op2);
+ return Jump(m_assembler.je());
+ }
+
+ Jump je32(RegisterID reg, Imm32 imm)
+ {
+ compareImm32ForBranchEquality(reg, imm.m_value);
+ return Jump(m_assembler.je());
+ }
+
+ Jump je32(Address address, Imm32 imm)
+ {
+ compareImm32ForBranchEquality(address, imm.m_value);
+ return Jump(m_assembler.je());
+ }
+
+ Jump je16(RegisterID op1, BaseIndex op2)
+ {
+ m_assembler.cmpw_rm(op1, op2.offset, op2.base, op2.index, op2.scale);
+ return Jump(m_assembler.je());
+ }
+
+ Jump jg32(RegisterID left, RegisterID right)
+ {
+ m_assembler.cmpl_rr(right, left);
+ return Jump(m_assembler.jg());
+ }
+
+ Jump jg32(RegisterID reg, Address address)
+ {
+ m_assembler.cmpl_mr(address.offset, address.base, reg);
+ return Jump(m_assembler.jg());
+ }
+
+ Jump jgePtr(RegisterID left, RegisterID right)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.cmpq_rr(right, left);
+ return Jump(m_assembler.jge());
+#else
+ return jge32(left, right);
+#endif
+ }
+
+ Jump jgePtr(RegisterID reg, ImmPtr ptr)
+ {
+#if PLATFORM(X86_64)
+ intptr_t imm = ptr.asIntptr();
+ if (CAN_SIGN_EXTEND_32_64(imm)) {
+ compareImm64ForBranch(reg, imm);
+ return Jump(m_assembler.jge());
+ } else {
+ move(ptr, scratchRegister);
+ return jgePtr(reg, scratchRegister);
+ }
+#else
+ return jge32(reg, Imm32(ptr));
+#endif
+ }
+
+ Jump jge32(RegisterID left, RegisterID right)
+ {
+ m_assembler.cmpl_rr(right, left);
+ return Jump(m_assembler.jge());
+ }
+
+ Jump jge32(RegisterID left, Imm32 right)
+ {
+ compareImm32ForBranch(left, right.m_value);
+ return Jump(m_assembler.jge());
+ }
+
+ Jump jlPtr(RegisterID left, RegisterID right)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.cmpq_rr(right, left);
+ return Jump(m_assembler.jl());
+#else
+ return jl32(left, right);
+#endif
+ }
+
+ Jump jlPtr(RegisterID reg, ImmPtr ptr)
+ {
+#if PLATFORM(X86_64)
+ intptr_t imm = ptr.asIntptr();
+ if (CAN_SIGN_EXTEND_32_64(imm)) {
+ compareImm64ForBranch(reg, imm);
+ return Jump(m_assembler.jl());
+ } else {
+ move(ptr, scratchRegister);
+ return jlPtr(reg, scratchRegister);
+ }
+#else
+ return jl32(reg, Imm32(ptr));
+#endif
+ }
+
+ Jump jl32(RegisterID left, RegisterID right)
+ {
+ m_assembler.cmpl_rr(right, left);
+ return Jump(m_assembler.jl());
+ }
+
+ Jump jl32(RegisterID left, Imm32 right)
+ {
+ compareImm32ForBranch(left, right.m_value);
+ return Jump(m_assembler.jl());
+ }
+
+ Jump jlePtr(RegisterID left, RegisterID right)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.cmpq_rr(right, left);
+ return Jump(m_assembler.jle());
+#else
+ return jle32(left, right);
+#endif
+ }
+
+ Jump jlePtr(RegisterID reg, ImmPtr ptr)
+ {
+#if PLATFORM(X86_64)
+ intptr_t imm = ptr.asIntptr();
+ if (CAN_SIGN_EXTEND_32_64(imm)) {
+ compareImm64ForBranch(reg, imm);
+ return Jump(m_assembler.jle());
+ } else {
+ move(ptr, scratchRegister);
+ return jlePtr(reg, scratchRegister);
+ }
+#else
+ return jle32(reg, Imm32(ptr));
+#endif
+ }
+
+ Jump jle32(RegisterID left, RegisterID right)
+ {
+ m_assembler.cmpl_rr(right, left);
+ return Jump(m_assembler.jle());
+ }
+
+ Jump jle32(RegisterID left, Imm32 right)
+ {
+ compareImm32ForBranch(left, right.m_value);
+ return Jump(m_assembler.jle());
+ }
+
+ Jump jnePtr(RegisterID op1, RegisterID op2)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.cmpq_rr(op1, op2);
+ return Jump(m_assembler.jne());
+#else
+ return jne32(op1, op2);
+#endif
+ }
+
+ Jump jnePtr(RegisterID reg, Address address)
+ {
+#if PLATFORM(X86_64)
+ m_assembler.cmpq_rm(reg, address.offset, address.base);
+#else
+ m_assembler.cmpl_rm(reg, address.offset, address.base);
+#endif
+ return Jump(m_assembler.jne());
+ }
+
+ Jump jnePtr(RegisterID reg, AbsoluteAddress address)
+ {
+#if PLATFORM(X86_64)
+ move(ImmPtr(address.m_ptr), scratchRegister);
+ return jnePtr(reg, Address(scratchRegister));
+#else
+ m_assembler.cmpl_rm(reg, address.m_ptr);
+ return Jump(m_assembler.jne());
+#endif
+ }
+
+ Jump jnePtr(RegisterID reg, ImmPtr ptr)
+ {
+#if PLATFORM(X86_64)
+ intptr_t imm = ptr.asIntptr();
+ if (CAN_SIGN_EXTEND_32_64(imm)) {
+ compareImm64ForBranchEquality(reg, imm);
+ return Jump(m_assembler.jne());
+ } else {
+ move(ptr, scratchRegister);
+ return jnePtr(scratchRegister, reg);
+ }
+#else
+ return jne32(reg, Imm32(ptr));
+#endif
+ }
+
+ Jump jnePtr(Address address, ImmPtr imm)
+ {
+#if PLATFORM(X86_64)
+ move(imm, scratchRegister);
+ return jnePtr(scratchRegister, address);
+#else
+ return jne32(address, Imm32(imm));
+#endif
+ }
+
+#if !PLATFORM(X86_64)
+ Jump jnePtr(AbsoluteAddress address, ImmPtr imm)
+ {
+ m_assembler.cmpl_im(imm.asIntptr(), address.m_ptr);
+ return Jump(m_assembler.jne());
+ }
+#endif
+
+ Jump jnePtrWithPatch(RegisterID reg, DataLabelPtr& dataLabel, ImmPtr initialValue = ImmPtr(0))
+ {
+#if PLATFORM(X86_64)
+ m_assembler.movq_i64r(initialValue.asIntptr(), scratchRegister);
+ dataLabel = DataLabelPtr(this);
+ return jnePtr(scratchRegister, reg);
+#else
+ m_assembler.cmpl_ir_force32(initialValue.asIntptr(), reg);
+ dataLabel = DataLabelPtr(this);
+ return Jump(m_assembler.jne());
+#endif
+ }
+
+ Jump jnePtrWithPatch(Address address, DataLabelPtr& dataLabel, ImmPtr initialValue = ImmPtr(0))
+ {
+#if PLATFORM(X86_64)
+ m_assembler.movq_i64r(initialValue.asIntptr(), scratchRegister);
+ dataLabel = DataLabelPtr(this);
+ return jnePtr(scratchRegister, address);
+#else
+ m_assembler.cmpl_im_force32(initialValue.asIntptr(), address.offset, address.base);
+ dataLabel = DataLabelPtr(this);
+ return Jump(m_assembler.jne());
+#endif
+ }
+
+ Jump jne32(RegisterID op1, RegisterID op2)
+ {
+ m_assembler.cmpl_rr(op1, op2);
+ return Jump(m_assembler.jne());
+ }
+
+ Jump jne32(RegisterID reg, Imm32 imm)
+ {
+ compareImm32ForBranchEquality(reg, imm.m_value);
+ return Jump(m_assembler.jne());
+ }
+
+ Jump jne32(Address address, Imm32 imm)
+ {
+ compareImm32ForBranchEquality(address, imm.m_value);
+ return Jump(m_assembler.jne());
+ }
+
+ Jump jne32(Address address, RegisterID reg)
+ {
+ m_assembler.cmpl_rm(reg, address.offset, address.base);
+ return Jump(m_assembler.jne());
+ }
+
+ Jump jnzPtr(RegisterID reg, Imm32 mask = Imm32(-1))
+ {
+#if PLATFORM(X86_64)
+ testImm64(reg, mask);
+ return Jump(m_assembler.jne());
+#else
+ return jnz32(reg, mask);
+#endif
+ }
+
+ Jump jnzPtr(RegisterID reg, ImmPtr mask)
+ {
+#if PLATFORM(X86_64)
+ move(mask, scratchRegister);
+ m_assembler.testq_rr(scratchRegister, reg);
+ return Jump(m_assembler.jne());
+#else
+ return jnz32(reg, Imm32(mask));
+#endif
+ }
+
+ Jump jnzPtr(Address address, Imm32 mask = Imm32(-1))
+ {
+#if PLATFORM(X86_64)
+ testImm64(address, mask);
+ return Jump(m_assembler.jne());
+#else
+ return jnz32(address, mask);
+#endif
+ }
+
+ Jump jnz32(RegisterID reg, Imm32 mask = Imm32(-1))
+ {
+ testImm32(reg, mask);
+ return Jump(m_assembler.jne());
+ }
+
+ Jump jnz32(Address address, Imm32 mask = Imm32(-1))
+ {
+ testImm32(address, mask);
+ return Jump(m_assembler.jne());
+ }
+
+ Jump jzPtr(RegisterID reg, Imm32 mask = Imm32(-1))
+ {
+#if PLATFORM(X86_64)
+ testImm64(reg, mask);
+ return Jump(m_assembler.je());
+#else
+ return jz32(reg, mask);
+#endif
+ }
+
+ Jump jzPtr(RegisterID reg, ImmPtr mask)
+ {
+#if PLATFORM(X86_64)
+ move(mask, scratchRegister);
+ m_assembler.testq_rr(scratchRegister, reg);
+ return Jump(m_assembler.je());
+#else
+ return jz32(reg, Imm32(mask));
+#endif
+ }
+
+ Jump jzPtr(Address address, Imm32 mask = Imm32(-1))
+ {
+#if PLATFORM(X86_64)
+ testImm64(address, mask);
+ return Jump(m_assembler.je());
+#else
+ return jz32(address, mask);
+#endif
+ }
+
+ Jump jzPtr(BaseIndex address, Imm32 mask = Imm32(-1))
+ {
+#if PLATFORM(X86_64)
+ testImm64(address, mask);
+ return Jump(m_assembler.je());
+#else
+ return jz32(address, mask);
+#endif
+ }
+
+ Jump jz32(RegisterID reg, Imm32 mask = Imm32(-1))
+ {
+ testImm32(reg, mask);
+ return Jump(m_assembler.je());
+ }
+
+ Jump jz32(Address address, Imm32 mask = Imm32(-1))
+ {
+ testImm32(address, mask);
+ return Jump(m_assembler.je());
+ }
+
+ Jump jz32(BaseIndex address, Imm32 mask = Imm32(-1))
+ {
+ testImm32(address, mask);
+ return Jump(m_assembler.je());
+ }
+
+ Jump jump()
+ {
+ return Jump(m_assembler.jmp());
+ }
+
+
+ // Backwards, local control flow operations:
+ //
+ // These operations provide a shorter notation for local
+ // backwards branches, which may be both more convenient
+ // for the user, and for the programmer, and for the
+ // assembler (allowing shorter values to be used in
+ // relative offsets).
+ //
+ // The code sequence:
+ //
+ // Label topOfLoop(this);
+ // // ...
+ // jne32(reg1, reg2, topOfLoop);
+ //
+ // Is equivalent to the longer, potentially less efficient form:
+ //
+ // Label topOfLoop(this);
+ // // ...
+ // jne32(reg1, reg2).linkTo(topOfLoop);
+
+ void jae32(RegisterID left, Address right, Label target)
+ {
+ jae32(left, right).linkTo(target, this);
+ }
+
+ void je32(RegisterID op1, Imm32 imm, Label target)
+ {
+ je32(op1, imm).linkTo(target, this);
+ }
+
+ void je16(RegisterID op1, BaseIndex op2, Label target)
+ {
+ je16(op1, op2).linkTo(target, this);
+ }
+
+ void jl32(RegisterID left, Imm32 right, Label target)
+ {
+ jl32(left, right).linkTo(target, this);
+ }
+
+ void jle32(RegisterID left, RegisterID right, Label target)
+ {
+ jle32(left, right).linkTo(target, this);
+ }
+
+ void jnePtr(RegisterID op1, ImmPtr imm, Label target)
+ {
+ jnePtr(op1, imm).linkTo(target, this);
+ }
+
+ void jne32(RegisterID op1, RegisterID op2, Label target)
+ {
+ jne32(op1, op2).linkTo(target, this);
+ }
+
+ void jne32(RegisterID op1, Imm32 imm, Label target)
+ {
+ jne32(op1, imm).linkTo(target, this);
+ }
+
+ void jzPtr(RegisterID reg, Label target)
+ {
+ jzPtr(reg).linkTo(target, this);
+ }
+
+ void jump(Label target)
+ {
+ m_assembler.link(m_assembler.jmp(), target.m_label);
+ }
+
+ void jump(RegisterID target)
+ {
+ m_assembler.jmp_r(target);
+ }
+
+ // Address is a memory location containing the address to jump to
+ void jump(Address address)
+ {
+ m_assembler.jmp_m(address.offset, address.base);
+ }
+
+
+ // Arithmetic control flow operations:
+ //
+ // This set of conditional branch operations branch based
+ // on the result of an arithmetic operation. The operation
+ // is performed as normal, storing the result.
+ //
+ // * jz operations branch if the result is zero.
+ // * jo operations branch if the (signed) arithmetic
+ // operation caused an overflow to occur.
+
+ Jump jnzSubPtr(Imm32 imm, RegisterID dest)
+ {
+ subPtr(imm, dest);
+ return Jump(m_assembler.jne());
+ }
+
+ Jump jnzSub32(Imm32 imm, RegisterID dest)
+ {
+ sub32(imm, dest);
+ return Jump(m_assembler.jne());
+ }
+
+ Jump joAddPtr(RegisterID src, RegisterID dest)
+ {
+ addPtr(src, dest);
+ return Jump(m_assembler.jo());
+ }
+
+ Jump joAdd32(RegisterID src, RegisterID dest)
+ {
+ add32(src, dest);
+ return Jump(m_assembler.jo());
+ }
+
+ Jump joAdd32(Imm32 imm, RegisterID dest)
+ {
+ add32(imm, dest);
+ return Jump(m_assembler.jo());
+ }
+
+ Jump joMul32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ mul32(imm, src, dest);
+ return Jump(m_assembler.jo());
+ }
+
+ Jump joSub32(Imm32 imm, RegisterID dest)
+ {
+ sub32(imm, dest);
+ return Jump(m_assembler.jo());
+ }
+
+ Jump jzSubPtr(Imm32 imm, RegisterID dest)
+ {
+ subPtr(imm, dest);
+ return Jump(m_assembler.je());
+ }
+
+ Jump jzSub32(Imm32 imm, RegisterID dest)
+ {
+ sub32(imm, dest);
+ return Jump(m_assembler.je());
+ }
+
+
+ // Miscellaneous operations:
+
+ void breakpoint()
+ {
+ m_assembler.int3();
+ }
+
+ Jump call()
+ {
+ return Jump(m_assembler.call());
+ }
+
+ // FIXME: why does this return a Jump object? - it can't be linked.
+ // This may be to get a reference to the return address of the call.
+ //
+ // This should probably be handled by a separate label type to a regular
+ // jump. Todo: add a CallLabel type, for the regular call - can be linked
+ // like a jump (possibly a subclass of jump?, or possibly casts to a Jump).
+ // Also add a CallReturnLabel type for this to return (just a more JmpDsty
+ // form of label, can get the void* after the code has been linked, but can't
+ // try to link it like a Jump object), and let the CallLabel be cast into a
+ // CallReturnLabel.
+ Jump call(RegisterID target)
+ {
+ return Jump(m_assembler.call(target));
+ }
+
+ Label label()
+ {
+ return Label(this);
+ }
+
+ Label align()
+ {
+ m_assembler.align(16);
+ return Label(this);
+ }
+
+ ptrdiff_t differenceBetween(Label from, Jump to)
+ {
+ return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
+ }
+
+ ptrdiff_t differenceBetween(Label from, Label to)
+ {
+ return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_label);
+ }
+
+ ptrdiff_t differenceBetween(Label from, DataLabelPtr to)
+ {
+ return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_label);
+ }
+
+ ptrdiff_t differenceBetween(Label from, DataLabel32 to)
+ {
+ return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_label);
+ }
+
+ ptrdiff_t differenceBetween(DataLabelPtr from, Jump to)
+ {
+ return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
+ }
+
+ void ret()
+ {
+ m_assembler.ret();
+ }
+
+ void sete32(RegisterID src, RegisterID srcDest)
+ {
+ m_assembler.cmpl_rr(srcDest, src);
+ m_assembler.sete_r(srcDest);
+ m_assembler.movzbl_rr(srcDest, srcDest);
+ }
+
+ void sete32(Imm32 imm, RegisterID srcDest)
+ {
+ compareImm32ForBranchEquality(srcDest, imm.m_value);
+ m_assembler.sete_r(srcDest);
+ m_assembler.movzbl_rr(srcDest, srcDest);
+ }
+
+ void setne32(RegisterID src, RegisterID srcDest)
+ {
+ m_assembler.cmpl_rr(srcDest, src);
+ m_assembler.setne_r(srcDest);
+ m_assembler.movzbl_rr(srcDest, srcDest);
+ }
+
+ void setne32(Imm32 imm, RegisterID srcDest)
+ {
+ compareImm32ForBranchEquality(srcDest, imm.m_value);
+ m_assembler.setne_r(srcDest);
+ m_assembler.movzbl_rr(srcDest, srcDest);
+ }
+
+ // FIXME:
+ // The mask should be optional... paerhaps the argument order should be
+ // dest-src, operations always have a dest? ... possibly not true, considering
+ // asm ops like test, or pseudo ops like pop().
+ void setnz32(Address address, Imm32 mask, RegisterID dest)
+ {
+ testImm32(address, mask);
+ m_assembler.setnz_r(dest);
+ m_assembler.movzbl_rr(dest, dest);
+ }
+
+ void setz32(Address address, Imm32 mask, RegisterID dest)
+ {
+ testImm32(address, mask);
+ m_assembler.setz_r(dest);
+ m_assembler.movzbl_rr(dest, dest);
+ }
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssembler_h
diff --git a/src/3rdparty/webkit/JavaScriptCore/assembler/X86Assembler.h b/src/3rdparty/webkit/JavaScriptCore/assembler/X86Assembler.h
new file mode 100644
index 0000000..3b0ce65
--- /dev/null
+++ b/src/3rdparty/webkit/JavaScriptCore/assembler/X86Assembler.h
@@ -0,0 +1,1675 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef X86Assembler_h
+#define X86Assembler_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER) && (PLATFORM(X86) || PLATFORM(X86_64))
+
+#include "AssemblerBuffer.h"
+#include <stdint.h>
+#include <wtf/Assertions.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; }
+#if PLATFORM(X86_64)
+inline bool CAN_SIGN_EXTEND_32_64(intptr_t value) { return value == (intptr_t)(int32_t)value; }
+inline bool CAN_SIGN_EXTEND_U32_64(intptr_t value) { return value == (intptr_t)(uint32_t)value; }
+#endif
+
+namespace X86 {
+ typedef enum {
+ eax,
+ ecx,
+ edx,
+ ebx,
+ esp,
+ ebp,
+ esi,
+ edi,
+
+#if PLATFORM(X86_64)
+ r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ r14,
+ r15,
+#endif
+ } RegisterID;
+
+ typedef enum {
+ xmm0,
+ xmm1,
+ xmm2,
+ xmm3,
+ xmm4,
+ xmm5,
+ xmm6,
+ xmm7,
+ } XMMRegisterID;
+}
+
+class X86Assembler {
+public:
+ typedef X86::RegisterID RegisterID;
+ typedef X86::XMMRegisterID XMMRegisterID;
+
+ typedef enum {
+ OP_ADD_EvGv = 0x01,
+ OP_ADD_GvEv = 0x03,
+ OP_OR_EvGv = 0x09,
+ OP_OR_GvEv = 0x0B,
+ OP_2BYTE_ESCAPE = 0x0F,
+ OP_AND_EvGv = 0x21,
+ OP_SUB_EvGv = 0x29,
+ OP_SUB_GvEv = 0x2B,
+ PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E,
+ OP_XOR_EvGv = 0x31,
+ OP_CMP_EvGv = 0x39,
+ OP_CMP_GvEv = 0x3B,
+#if PLATFORM(X86_64)
+ PRE_REX = 0x40,
+#endif
+ OP_PUSH_EAX = 0x50,
+ OP_POP_EAX = 0x58,
+#if PLATFORM(X86_64)
+ OP_MOVSXD_GvEv = 0x63,
+#endif
+ PRE_OPERAND_SIZE = 0x66,
+ PRE_SSE_66 = 0x66,
+ OP_PUSH_Iz = 0x68,
+ OP_IMUL_GvEvIz = 0x69,
+ OP_GROUP1_EvIz = 0x81,
+ OP_GROUP1_EvIb = 0x83,
+ OP_TEST_EvGv = 0x85,
+ OP_XCHG_EvGv = 0x87,
+ OP_MOV_EvGv = 0x89,
+ OP_MOV_GvEv = 0x8B,
+ OP_LEA = 0x8D,
+ OP_GROUP1A_Ev = 0x8F,
+ OP_CDQ = 0x99,
+ OP_MOV_EAXOv = 0xA1,
+ OP_MOV_OvEAX = 0xA3,
+ OP_MOV_EAXIv = 0xB8,
+ OP_GROUP2_EvIb = 0xC1,
+ OP_RET = 0xC3,
+ OP_GROUP11_EvIz = 0xC7,
+ OP_INT3 = 0xCC,
+ OP_GROUP2_Ev1 = 0xD1,
+ OP_GROUP2_EvCL = 0xD3,
+ OP_CALL_rel32 = 0xE8,
+ OP_JMP_rel32 = 0xE9,
+ PRE_SSE_F2 = 0xF2,
+ OP_HLT = 0xF4,
+ OP_GROUP3_EbIb = 0xF6,
+ OP_GROUP3_Ev = 0xF7,
+ OP_GROUP3_EvIz = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
+ OP_GROUP5_Ev = 0xFF,
+ } OneByteOpcodeID;
+
+ typedef enum {
+ OP2_MOVSD_VsdWsd = 0x10,
+ OP2_MOVSD_WsdVsd = 0x11,
+ OP2_CVTSI2SD_VsdEd = 0x2A,
+ OP2_CVTTSD2SI_GdWsd = 0x2C,
+ OP2_UCOMISD_VsdWsd = 0x2E,
+ OP2_ADDSD_VsdWsd = 0x58,
+ OP2_MULSD_VsdWsd = 0x59,
+ OP2_SUBSD_VsdWsd = 0x5C,
+ OP2_MOVD_EdVd = 0x7E,
+ OP2_JO_rel32 = 0x80,
+ OP2_JB_rel32 = 0x82,
+ OP2_JAE_rel32 = 0x83,
+ OP2_JE_rel32 = 0x84,
+ OP2_JNE_rel32 = 0x85,
+ OP2_JBE_rel32 = 0x86,
+ OP2_JA_rel32 = 0x87,
+ OP2_JS_rel32 = 0x88,
+ OP2_JP_rel32 = 0x8A,
+ OP2_JL_rel32 = 0x8C,
+ OP2_JGE_rel32 = 0x8D,
+ OP2_JLE_rel32 = 0x8E,
+ OP2_JG_rel32 = 0x8F,
+ OP_SETE = 0x94,
+ OP_SETNE = 0x95,
+ OP2_IMUL_GvEv = 0xAF,
+ OP2_MOVZX_GvEb = 0xB6,
+ OP2_MOVZX_GvEw = 0xB7,
+ OP2_PEXTRW_GdUdIb = 0xC5,
+ } TwoByteOpcodeID;
+
+ typedef enum {
+ GROUP1_OP_ADD = 0,
+ GROUP1_OP_OR = 1,
+ GROUP1_OP_AND = 4,
+ GROUP1_OP_SUB = 5,
+ GROUP1_OP_XOR = 6,
+ GROUP1_OP_CMP = 7,
+
+ GROUP1A_OP_POP = 0,
+
+ GROUP2_OP_SHL = 4,
+ GROUP2_OP_SAR = 7,
+
+ GROUP3_OP_TEST = 0,
+ GROUP3_OP_NOT = 2,
+ GROUP3_OP_IDIV = 7,
+
+ GROUP5_OP_CALLN = 2,
+ GROUP5_OP_JMPN = 4,
+ GROUP5_OP_PUSH = 6,
+
+ GROUP11_MOV = 0,
+ } GroupOpcodeID;
+
+ // Opaque label types
+
+private:
+ class X86InstructionFormatter;
+public:
+
+ class JmpSrc {
+ friend class X86Assembler;
+ friend class X86InstructionFormatter;
+ public:
+ JmpSrc()
+ : m_offset(-1)
+ {
+ }
+
+ private:
+ JmpSrc(int offset)
+ : m_offset(offset)
+ {
+ }
+
+ int m_offset;
+ };
+
+ class JmpDst {
+ friend class X86Assembler;
+ friend class X86InstructionFormatter;
+ public:
+ JmpDst()
+ : m_offset(-1)
+ {
+ }
+
+ private:
+ JmpDst(int offset)
+ : m_offset(offset)
+ {
+ }
+
+ int m_offset;
+ };
+
+ X86Assembler()
+ {
+ }
+
+ size_t size() const { return m_formatter.size(); }
+
+ // Stack operations:
+
+ void push_r(RegisterID reg)
+ {
+ m_formatter.oneByteOp(OP_PUSH_EAX, reg);
+ }
+
+ void pop_r(RegisterID reg)
+ {
+ m_formatter.oneByteOp(OP_POP_EAX, reg);
+ }
+
+ void push_i32(int imm)
+ {
+ m_formatter.oneByteOp(OP_PUSH_Iz);
+ m_formatter.immediate32(imm);
+ }
+
+ void push_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_PUSH, base, offset);
+ }
+
+ void pop_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP1A_Ev, GROUP1A_OP_POP, base, offset);
+ }
+
+ // Arithmetic operations:
+
+ void addl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_ADD_EvGv, src, dst);
+ }
+
+ void addl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, offset);
+ }
+
+ void addl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void addl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+#if PLATFORM(X86_64)
+ void addq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_ADD_EvGv, src, dst);
+ }
+
+ void addq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+#else
+ void addl_im(int imm, void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
+ void andl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_AND_EvGv, src, dst);
+ }
+
+ void andl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+#if PLATFORM(X86_64)
+ void andq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_AND_EvGv, src, dst);
+ }
+
+ void andq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
+ void notl_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, dst);
+ }
+
+ void orl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_OR_EvGv, src, dst);
+ }
+
+ void orl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_OR_GvEv, dst, base, offset);
+ }
+
+ void orl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+#if PLATFORM(X86_64)
+ void orq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_OR_EvGv, src, dst);
+ }
+
+ void orq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
+ void subl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_SUB_EvGv, src, dst);
+ }
+
+ void subl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_SUB_GvEv, dst, base, offset);
+ }
+
+ void subl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void subl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+#if PLATFORM(X86_64)
+ void subq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+#else
+ void subl_im(int imm, void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
+ void xorl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_XOR_EvGv, src, dst);
+ }
+
+ void xorl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+#if PLATFORM(X86_64)
+ void xorq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_XOR_EvGv, src, dst);
+ }
+
+ void xorq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
+ void sarl_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
+ else {
+ m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+
+ void sarl_CLr(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
+ }
+
+ void shll_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst);
+ else {
+ m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+
+ void shll_CLr(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst);
+ }
+
+#if PLATFORM(X86_64)
+ void sarq_CLr(RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
+ }
+
+ void sarq_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
+ else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+#endif
+
+ void imull_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, src);
+ }
+
+ void imull_i32r(RegisterID src, int32_t value, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_IMUL_GvEvIz, dst, src);
+ m_formatter.immediate32(value);
+ }
+
+ void idivl_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst);
+ }
+
+ // Comparisons:
+
+ void cmpl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_CMP_EvGv, src, dst);
+ }
+
+ void cmpl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_CMP_EvGv, src, base, offset);
+ }
+
+ void cmpl_mr(int offset, RegisterID base, RegisterID src)
+ {
+ m_formatter.oneByteOp(OP_CMP_GvEv, src, base, offset);
+ }
+
+ void cmpl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void cmpl_ir_force32(int imm, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+ m_formatter.immediate32(imm);
+ }
+
+ void cmpl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void cmpl_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void cmpl_im_force32(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate32(imm);
+ }
+
+#if PLATFORM(X86_64)
+ void cmpq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_CMP_EvGv, src, dst);
+ }
+
+ void cmpq_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, offset);
+ }
+
+ void cmpq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void cmpq_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void cmpq_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+#else
+ void cmpl_rm(RegisterID reg, void* addr)
+ {
+ m_formatter.oneByteOp(OP_CMP_EvGv, reg, addr);
+ }
+
+ void cmpl_im(int imm, void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
+ void cmpw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_CMP_EvGv, src, base, index, scale, offset);
+ }
+
+ void testl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
+ }
+
+ void testl_i32r(int imm, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
+ m_formatter.immediate32(imm);
+ }
+
+ void testl_i32m(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
+ m_formatter.immediate32(imm);
+ }
+
+ void testl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
+ m_formatter.immediate32(imm);
+ }
+
+#if PLATFORM(X86_64)
+ void testq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_TEST_EvGv, src, dst);
+ }
+
+ void testq_i32r(int imm, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
+ m_formatter.immediate32(imm);
+ }
+
+ void testq_i32m(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
+ m_formatter.immediate32(imm);
+ }
+
+ void testq_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
+ m_formatter.immediate32(imm);
+ }
+#endif
+
+ void testb_i8r(int imm, RegisterID dst)
+ {
+ m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
+ m_formatter.immediate8(imm);
+ }
+
+ void sete_r(RegisterID dst)
+ {
+ m_formatter.twoByteOp8(OP_SETE, (GroupOpcodeID)0, dst);
+ }
+
+ void setz_r(RegisterID dst)
+ {
+ sete_r(dst);
+ }
+
+ void setne_r(RegisterID dst)
+ {
+ m_formatter.twoByteOp8(OP_SETNE, (GroupOpcodeID)0, dst);
+ }
+
+ void setnz_r(RegisterID dst)
+ {
+ setne_r(dst);
+ }
+
+ // Various move ops:
+
+ void cdq()
+ {
+ m_formatter.oneByteOp(OP_CDQ);
+ }
+
+ void xchgl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
+ }
+
+#if PLATFORM(X86_64)
+ void xchgq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
+ }
+#endif
+
+ void movl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_MOV_EvGv, src, dst);
+ }
+
+ void movl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_MOV_EvGv, src, base, offset);
+ }
+
+ void movl_rm_disp32(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp_disp32(OP_MOV_EvGv, src, base, offset);
+ }
+
+ void movl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp(OP_MOV_EvGv, src, base, index, scale, offset);
+ }
+
+ void movl_mEAX(void* addr)
+ {
+ m_formatter.oneByteOp(OP_MOV_EAXOv);
+#if PLATFORM(X86_64)
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+#else
+ m_formatter.immediate32(reinterpret_cast<int>(addr));
+#endif
+ }
+
+ void movl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, offset);
+ }
+
+ void movl_mr_disp32(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp_disp32(OP_MOV_GvEv, dst, base, offset);
+ }
+
+ void movl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, index, scale, offset);
+ }
+
+ void movl_i32r(int imm, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_MOV_EAXIv, dst);
+ m_formatter.immediate32(imm);
+ }
+
+ void movl_i32m(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
+ m_formatter.immediate32(imm);
+ }
+
+ void movl_EAXm(void* addr)
+ {
+ m_formatter.oneByteOp(OP_MOV_OvEAX);
+#if PLATFORM(X86_64)
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+#else
+ m_formatter.immediate32(reinterpret_cast<int>(addr));
+#endif
+ }
+
+#if PLATFORM(X86_64)
+ void movq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_MOV_EvGv, src, dst);
+ }
+
+ void movq_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, offset);
+ }
+
+ void movq_rm_disp32(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64_disp32(OP_MOV_EvGv, src, base, offset);
+ }
+
+ void movq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, index, scale, offset);
+ }
+
+ void movq_mEAX(void* addr)
+ {
+ m_formatter.oneByteOp64(OP_MOV_EAXOv);
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+ }
+
+ void movq_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, offset);
+ }
+
+ void movq_mr_disp32(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, dst, base, offset);
+ }
+
+ void movq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, index, scale, offset);
+ }
+
+ void movq_i64r(int64_t imm, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
+ m_formatter.immediate64(imm);
+ }
+
+ void movsxd_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_MOVSXD_GvEv, dst, src);
+ }
+
+
+#else
+ void movl_mr(void* addr, RegisterID dst)
+ {
+ if (dst == X86::eax)
+ movl_mEAX(addr);
+ else
+ m_formatter.oneByteOp(OP_MOV_GvEv, dst, addr);
+ }
+
+ void movl_i32m(int imm, void* addr)
+ {
+ m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, addr);
+ m_formatter.immediate32(imm);
+ }
+#endif
+
+ void movzwl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, offset);
+ }
+
+ void movzwl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, index, scale, offset);
+ }
+
+ void movzbl_rr(RegisterID src, RegisterID dst)
+ {
+ // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
+ // is in the range ESP-EDI, and the src would not have required a REX). Unneeded
+ // REX prefixes are defined to be silently ignored by the processor.
+ m_formatter.twoByteOp8(OP2_MOVZX_GvEb, dst, src);
+ }
+
+ void leal_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_LEA, dst, base, offset);
+ }
+
+ // Flow control:
+
+ JmpSrc call()
+ {
+ m_formatter.oneByteOp(OP_CALL_rel32);
+ return m_formatter.immediateRel32();
+ }
+
+ JmpSrc call(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, dst);
+ return JmpSrc(m_formatter.size());
+ }
+
+ JmpSrc jmp()
+ {
+ m_formatter.oneByteOp(OP_JMP_rel32);
+ return m_formatter.immediateRel32();
+ }
+
+ void jmp_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, dst);
+ }
+
+ void jmp_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, offset);
+ }
+
+ JmpSrc jne()
+ {
+ m_formatter.twoByteOp(OP2_JNE_rel32);
+ return m_formatter.immediateRel32();
+ }
+
+ JmpSrc jnz()
+ {
+ return jne();
+ }
+
+ JmpSrc je()
+ {
+ m_formatter.twoByteOp(OP2_JE_rel32);
+ return m_formatter.immediateRel32();
+ }
+
+ JmpSrc jl()
+ {
+ m_formatter.twoByteOp(OP2_JL_rel32);
+ return m_formatter.immediateRel32();
+ }
+
+ JmpSrc jb()
+ {
+ m_formatter.twoByteOp(OP2_JB_rel32);
+ return m_formatter.immediateRel32();
+ }
+
+ JmpSrc jle()
+ {
+ m_formatter.twoByteOp(OP2_JLE_rel32);
+ return m_formatter.immediateRel32();
+ }
+
+ JmpSrc jbe()
+ {
+ m_formatter.twoByteOp(OP2_JBE_rel32);
+ return m_formatter.immediateRel32();
+ }
+
+ JmpSrc jge()
+ {
+ m_formatter.twoByteOp(OP2_JGE_rel32);
+ return m_formatter.immediateRel32();
+ }
+
+ JmpSrc jg()
+ {
+ m_formatter.twoByteOp(OP2_JG_rel32);
+ return m_formatter.immediateRel32();
+ }
+
+ JmpSrc ja()
+ {
+ m_formatter.twoByteOp(OP2_JA_rel32);
+ return m_formatter.immediateRel32();
+ }
+
+ JmpSrc jae()
+ {
+ m_formatter.twoByteOp(OP2_JAE_rel32);
+ return m_formatter.immediateRel32();
+ }
+
+ JmpSrc jo()
+ {
+ m_formatter.twoByteOp(OP2_JO_rel32);
+ return m_formatter.immediateRel32();
+ }
+
+ JmpSrc jp()
+ {
+ m_formatter.twoByteOp(OP2_JP_rel32);
+ return m_formatter.immediateRel32();
+ }
+
+ JmpSrc js()
+ {
+ m_formatter.twoByteOp(OP2_JS_rel32);
+ return m_formatter.immediateRel32();
+ }
+
+ // SSE operations:
+
+ void addsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void addsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+ void cvtsi2sd_rr(RegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
+ }
+
+ void cvttsd2si_rr(XMMRegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
+ }
+
+ void movd_rr(XMMRegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_MOVD_EdVd, (RegisterID)src, dst);
+ }
+
+ void movsd_rm(XMMRegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
+ }
+
+ void movsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+ void mulsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void mulsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+ void pextrw_irr(int whichWord, XMMRegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_PEXTRW_GdUdIb, (RegisterID)dst, (RegisterID)src);
+ m_formatter.immediate8(whichWord);
+ }
+
+ void subsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void subsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+ void ucomis_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ // Misc instructions:
+
+ void int3()
+ {
+ m_formatter.oneByteOp(OP_INT3);
+ }
+
+ void ret()
+ {
+ m_formatter.oneByteOp(OP_RET);
+ }
+
+ void predictNotTaken()
+ {
+ m_formatter.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN);
+ }
+
+ // Assembler admin methods:
+
+ JmpDst label()
+ {
+ return JmpDst(m_formatter.size());
+ }
+
+ JmpDst align(int alignment)
+ {
+ while (!m_formatter.isAligned(alignment))
+ m_formatter.oneByteOp(OP_HLT);
+
+ return label();
+ }
+
+ // Linking & patching:
+
+ void link(JmpSrc from, JmpDst to)
+ {
+ ASSERT(to.m_offset != -1);
+ ASSERT(from.m_offset != -1);
+
+ reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(m_formatter.data()) + from.m_offset)[-1] = to.m_offset - from.m_offset;
+ }
+
+ static void patchAddress(void* code, JmpDst position, void* value)
+ {
+ ASSERT(position.m_offset != -1);
+
+ reinterpret_cast<void**>(reinterpret_cast<ptrdiff_t>(code) + position.m_offset)[-1] = value;
+ }
+
+ static void link(void* code, JmpSrc from, void* to)
+ {
+ ASSERT(from.m_offset != -1);
+
+ reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(code) + from.m_offset)[-1] = reinterpret_cast<ptrdiff_t>(to) - (reinterpret_cast<ptrdiff_t>(code) + from.m_offset);
+ }
+
+ static void* getRelocatedAddress(void* code, JmpSrc jump)
+ {
+ return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset);
+ }
+
+ static void* getRelocatedAddress(void* code, JmpDst destination)
+ {
+ ASSERT(destination.m_offset != -1);
+
+ return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + destination.m_offset);
+ }
+
+ static int getDifferenceBetweenLabels(JmpDst src, JmpDst dst)
+ {
+ return dst.m_offset - src.m_offset;
+ }
+
+ static int getDifferenceBetweenLabels(JmpDst src, JmpSrc dst)
+ {
+ return dst.m_offset - src.m_offset;
+ }
+
+ static int getDifferenceBetweenLabels(JmpSrc src, JmpDst dst)
+ {
+ return dst.m_offset - src.m_offset;
+ }
+
+ static void patchImmediate(intptr_t where, int32_t value)
+ {
+ reinterpret_cast<int32_t*>(where)[-1] = value;
+ }
+
+ static void patchPointer(intptr_t where, intptr_t value)
+ {
+ reinterpret_cast<intptr_t*>(where)[-1] = value;
+ }
+
+ static void patchBranchOffset(intptr_t where, void* destination)
+ {
+ intptr_t offset = reinterpret_cast<intptr_t>(destination) - where;
+ ASSERT(offset == static_cast<int32_t>(offset));
+ reinterpret_cast<int32_t*>(where)[-1] = static_cast<int32_t>(offset);
+ }
+
+ void* executableCopy(ExecutablePool* allocator)
+ {
+ void* copy = m_formatter.executableCopy(allocator);
+ ASSERT(copy);
+ return copy;
+ }
+
+private:
+
+ class X86InstructionFormatter {
+
+ static const int maxInstructionSize = 16;
+
+ public:
+
+ // Legacy prefix bytes:
+ //
+ // These are emmitted prior to the instruction.
+
+ void prefix(OneByteOpcodeID pre)
+ {
+ m_buffer.putByte(pre);
+ }
+
+ // Word-sized operands / no operand instruction formatters.
+ //
+ // In addition to the opcode, the following operand permutations are supported:
+ // * None - instruction takes no operands.
+ // * One register - the low three bits of the RegisterID are added into the opcode.
+ // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
+ // * Three argument ModRM - a register, and a register and an offset describing a memory operand.
+ // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
+ //
+ // For 32-bit x86 targets, the address operand may also be provided as a void*.
+ // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
+ //
+ // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
+
+ void oneByteOp(OneByteOpcodeID opcode)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ m_buffer.putByteUnchecked(opcode);
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, RegisterID reg)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(0, 0, reg);
+ m_buffer.putByteUnchecked(opcode + (reg & 7));
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, rm);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(reg, rm);
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, offset);
+ }
+
+ void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp32(reg, base, offset);
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, index, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, index, scale, offset);
+ }
+
+#if !PLATFORM(X86_64)
+ void oneByteOp(OneByteOpcodeID opcode, int reg, void* address)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, address);
+ }
+#endif
+
+ void twoByteOp(TwoByteOpcodeID opcode)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ }
+
+ void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(reg, rm);
+ }
+
+ void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, offset);
+ }
+
+ void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, index, base);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, index, scale, offset);
+ }
+
+#if PLATFORM(X86_64)
+ // Quad-word-sized operands:
+ //
+ // Used to format 64-bit operantions, planting a REX.w prefix.
+ // When planting d64 or f64 instructions, not requiring a REX.w prefix,
+ // the normal (non-'64'-postfixed) formatters should be used.
+
+ void oneByteOp64(OneByteOpcodeID opcode)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(0, 0, 0);
+ m_buffer.putByteUnchecked(opcode);
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, RegisterID reg)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(0, 0, reg);
+ m_buffer.putByteUnchecked(opcode + (reg & 7));
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, 0, rm);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(reg, rm);
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, offset);
+ }
+
+ void oneByteOp64_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp32(reg, base, offset);
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, index, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, index, scale, offset);
+ }
+#endif
+
+ // Byte-operands:
+ //
+ // These methods format byte operations. Byte operations differ from the normal
+ // formatters in the circumstances under which they will decide to emit REX prefixes.
+ // These should be used where any register operand signifies a byte register.
+ //
+ // The disctinction is due to the handling of register numbers in the range 4..7 on
+ // x86-64. These register numbers may either represent the second byte of the first
+ // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
+ //
+ // Since ah..bh cannot be used in all permutations of operands (specifically cannot
+ // be accessed where a REX prefix is present), these are likely best treated as
+ // deprecated. In order to ensure the correct registers spl..dil are selected a
+ // REX prefix will be emitted for any byte register operand in the range 4..15.
+ //
+ // These formatters may be used in instructions where a mix of operand sizes, in which
+ // case an unnecessary REX will be emitted, for example:
+ // movzbl %al, %edi
+ // In this case a REX will be planted since edi is 7 (and were this a byte operand
+ // a REX would be required to specify dil instead of bh). Unneeded REX prefixes will
+ // be silently ignored by the processor.
+ //
+ // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
+ // is provided to check byte register operands.
+
+ void oneByteOp8(OneByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(groupOp, rm);
+ }
+
+ void twoByteOp8(TwoByteOpcodeID opcode, RegisterID reg, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg)|byteRegRequiresRex(rm), reg, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(reg, rm);
+ }
+
+ void twoByteOp8(TwoByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(groupOp, rm);
+ }
+
+ // Immediates:
+ //
+ // An immedaite should be appended where appropriate after an op has been emitted.
+ // The writes are unchecked since the opcode formatters above will have ensured space.
+
+ void immediate8(int imm)
+ {
+ m_buffer.putByteUnchecked(imm);
+ }
+
+ void immediate32(int imm)
+ {
+ m_buffer.putIntUnchecked(imm);
+ }
+
+ void immediate64(int64_t imm)
+ {
+ m_buffer.putInt64Unchecked(imm);
+ }
+
+ JmpSrc immediateRel32()
+ {
+ m_buffer.putIntUnchecked(0);
+ return JmpSrc(m_buffer.size());
+ }
+
+ // Administrative methods:
+
+ size_t size() const { return m_buffer.size(); }
+ bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
+ void* data() const { return m_buffer.data(); }
+ void* executableCopy(ExecutablePool* allocator) { return m_buffer.executableCopy(allocator); }
+
+ private:
+
+ // Internals; ModRm and REX formatters.
+
+ static const RegisterID noBase = X86::ebp;
+ static const RegisterID hasSib = X86::esp;
+ static const RegisterID noIndex = X86::esp;
+#if PLATFORM(X86_64)
+ static const RegisterID noBase2 = X86::r13;
+ static const RegisterID hasSib2 = X86::r12;
+
+ // Registers r8 & above require a REX prefixe.
+ inline bool regRequiresRex(int reg)
+ {
+ return (reg >= X86::r8);
+ }
+
+ // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
+ inline bool byteRegRequiresRex(int reg)
+ {
+ return (reg >= X86::esp);
+ }
+
+ // Format a REX prefix byte.
+ inline void emitRex(bool w, int r, int x, int b)
+ {
+ m_buffer.putByteUnchecked(PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3));
+ }
+
+ // Used to plant a REX byte with REX.w set (for 64-bit operations).
+ inline void emitRexW(int r, int x, int b)
+ {
+ emitRex(true, r, x, b);
+ }
+
+ // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
+ // regRequiresRex() to check other registers (i.e. address base & index).
+ inline void emitRexIf(bool condition, int r, int x, int b)
+ {
+ if (condition) emitRex(false, r, x, b);
+ }
+
+ // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
+ inline void emitRexIfNeeded(int r, int x, int b)
+ {
+ emitRexIf(regRequiresRex(r) || regRequiresRex(x) || regRequiresRex(b), r, x, b);
+ }
+#else
+ // No REX prefix bytes on 32-bit x86.
+ inline bool regRequiresRex(int) { return false; }
+ inline bool byteRegRequiresRex(int) { return false; }
+ inline void emitRexIf(bool, int, int, int) {}
+ inline void emitRexIfNeeded(int, int, int) {}
+#endif
+
+ enum ModRmMode {
+ ModRmMemoryNoDisp,
+ ModRmMemoryDisp8,
+ ModRmMemoryDisp32,
+ ModRmRegister,
+ };
+
+ void putModRm(ModRmMode mode, int reg, RegisterID rm)
+ {
+ m_buffer.putByteUnchecked((mode << 6) | ((reg & 7) << 3) | (rm & 7));
+ }
+
+ void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale)
+ {
+ ASSERT(mode != ModRmRegister);
+
+ // Encode sacle of (1,2,4,8) -> (0,1,2,3)
+ int shift = 0;
+ while (scale >>= 1)
+ shift++;
+
+ putModRm(mode, reg, hasSib);
+ m_buffer.putByteUnchecked((shift << 6) | ((index & 7) << 3) | (base & 7));
+ }
+
+ void registerModRM(int reg, RegisterID rm)
+ {
+ putModRm(ModRmRegister, reg, rm);
+ }
+
+ void memoryModRM(int reg, RegisterID base, int offset)
+ {
+ // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
+#if PLATFORM(X86_64)
+ if ((base == hasSib) || (base == hasSib2)) {
+#else
+ if (base == hasSib) {
+#endif
+ if (!offset) // No need to check if the base is noBase, since we know it is hasSib!
+ putModRmSib(ModRmMemoryNoDisp, reg, base, noIndex, 0);
+ else if (CAN_SIGN_EXTEND_8_32(offset)) {
+ putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
+ m_buffer.putByteUnchecked(offset);
+ } else {
+ putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
+ m_buffer.putIntUnchecked(offset);
+ }
+ } else {
+#if PLATFORM(X86_64)
+ if (!offset && (base != noBase) && (base != noBase2))
+#else
+ if (!offset && (base != noBase))
+#endif
+ putModRm(ModRmMemoryNoDisp, reg, base);
+ else if (CAN_SIGN_EXTEND_8_32(offset)) {
+ putModRm(ModRmMemoryDisp8, reg, base);
+ m_buffer.putByteUnchecked(offset);
+ } else {
+ putModRm(ModRmMemoryDisp32, reg, base);
+ m_buffer.putIntUnchecked(offset);
+ }
+ }
+ }
+
+ void memoryModRM_disp32(int reg, RegisterID base, int offset)
+ {
+ // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
+#if PLATFORM(X86_64)
+ if ((base == hasSib) || (base == hasSib2)) {
+#else
+ if (base == hasSib) {
+#endif
+ putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
+ m_buffer.putIntUnchecked(offset);
+ } else {
+ putModRm(ModRmMemoryDisp32, reg, base);
+ m_buffer.putIntUnchecked(offset);
+ }
+ }
+
+ void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset)
+ {
+ ASSERT(index != noIndex);
+
+#if PLATFORM(X86_64)
+ if (!offset && (base != noBase) && (base != noBase2))
+#else
+ if (!offset && (base != noBase))
+#endif
+ putModRmSib(ModRmMemoryNoDisp, reg, base, index, scale);
+ else if (CAN_SIGN_EXTEND_8_32(offset)) {
+ putModRmSib(ModRmMemoryDisp8, reg, base, index, scale);
+ m_buffer.putByteUnchecked(offset);
+ } else {
+ putModRmSib(ModRmMemoryDisp32, reg, base, index, scale);
+ m_buffer.putIntUnchecked(offset);
+ }
+ }
+
+#if !PLATFORM(X86_64)
+ void memoryModRM(int reg, void* address)
+ {
+ // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
+ putModRm(ModRmMemoryNoDisp, reg, noBase);
+ m_buffer.putIntUnchecked(reinterpret_cast<int32_t>(address));
+ }
+#endif
+
+ AssemblerBuffer m_buffer;
+ } m_formatter;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && PLATFORM(X86)
+
+#endif // X86Assembler_h