backend/A64: migrate to mcl

backend\A64\devirtualize.h: fixup mcl
This commit is contained in:
SachinVin 2022-05-22 23:30:47 +05:30
parent df9d373a84
commit a870e9e74b
28 changed files with 124 additions and 114 deletions

View File

@ -11,6 +11,10 @@
#include <fmt/format.h>
#include <fmt/ostream.h>
#include <mcl/assert.hpp>
#include <mcl/bit_cast.hpp>
#include <mcl/stdint.hpp>
#include <mcl/scope_exit.hpp>
#include <dynarmic/A32/coprocessor.h>
@ -22,10 +26,6 @@
#include "backend/A64/emit_a64.h"
#include "backend/A64/emitter/a64_emitter.h"
#include "backend/A64/perf_map.h"
#include "common/assert.h"
#include "common/bit_util.h"
#include "common/common_types.h"
#include "common/scope_exit.h"
#include "common/variant_util.h"
#include "frontend/A32/location_descriptor.h"
#include "frontend/A32/types.h"
@ -657,10 +657,10 @@ void A32EmitA64::EmitA32SetGEFlagsCompressed(A32EmitContext& ctx, IR::Inst* inst
ARM64Reg to_store = DecodeReg(ctx.reg_alloc.ScratchGpr());
u32 imm = args[0].GetImmediateU32();
u32 ge = 0;
ge |= Common::Bit<19>(imm) ? 0xFF000000 : 0;
ge |= Common::Bit<18>(imm) ? 0x00FF0000 : 0;
ge |= Common::Bit<17>(imm) ? 0x0000FF00 : 0;
ge |= Common::Bit<16>(imm) ? 0x000000FF : 0;
ge |= mcl::bit::get_bit<19>(imm) ? 0xFF000000 : 0;
ge |= mcl::bit::get_bit<18>(imm) ? 0x00FF0000 : 0;
ge |= mcl::bit::get_bit<17>(imm) ? 0x0000FF00 : 0;
ge |= mcl::bit::get_bit<16>(imm) ? 0x000000FF : 0;
code.MOVI2R(to_store, ge);
code.STR(INDEX_UNSIGNED, to_store, X28, offsetof(A32JitState, cpsr_ge));
@ -696,8 +696,8 @@ void A32EmitA64::EmitA32BXWritePC(A32EmitContext& ctx, IR::Inst* inst) {
if (arg.IsImmediate()) {
const ARM64Reg scratch = DecodeReg(ctx.reg_alloc.ScratchGpr());
u32 new_pc = arg.GetImmediateU32();
const u32 mask = Common::Bit<0>(new_pc) ? 0xFFFFFFFE : 0xFFFFFFFC;
const u32 new_upper = upper_without_t | (Common::Bit<0>(new_pc) ? 1 : 0);
const u32 mask = mcl::bit::get_bit<0>(new_pc) ? 0xFFFFFFFE : 0xFFFFFFFC;
const u32 new_upper = upper_without_t | (mcl::bit::get_bit<0>(new_pc) ? 1 : 0);
code.MOVI2R(scratch, new_pc & mask);
code.STR(INDEX_UNSIGNED, scratch, X28, MJitStateReg(A32::Reg::PC));
@ -833,7 +833,7 @@ void A32EmitA64::DoNotFastmem(const DoNotFastmemMarker& marker) {
template <typename T>
void A32EmitA64::ReadMemory(A32EmitContext& ctx, IR::Inst* inst, const CodePtr callback_fn) {
constexpr size_t bit_size = Common::BitSize<T>();
constexpr size_t bit_size = mcl::bitsizeof<T>;
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ctx.reg_alloc.UseScratch(args[0], ABI_PARAM2);
@ -846,7 +846,7 @@ void A32EmitA64::ReadMemory(A32EmitContext& ctx, IR::Inst* inst, const CodePtr c
const auto do_not_fastmem_marker = GenerateDoNotFastmemMarker(ctx, inst);
const auto page_table_lookup = [this, result, vaddr, tmp, callback_fn](FixupBranch& end) {
constexpr size_t bit_size = Common::BitSize<T>();
constexpr size_t bit_size = mcl::bitsizeof<T>;
code.MOVP2R(result, config.page_table);
code.MOV(tmp, vaddr, ArithOption{vaddr, ST_LSR, 12});
@ -945,7 +945,7 @@ void A32EmitA64::ReadMemory(A32EmitContext& ctx, IR::Inst* inst, const CodePtr c
template<typename T>
void A32EmitA64::WriteMemory(A32EmitContext& ctx, IR::Inst* inst, const CodePtr callback_fn) {
constexpr size_t bit_size = Common::BitSize<T>();
constexpr size_t bit_size = mcl::bitsizeof<T>;
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ctx.reg_alloc.ScratchGpr({ABI_RETURN});
@ -960,7 +960,7 @@ void A32EmitA64::WriteMemory(A32EmitContext& ctx, IR::Inst* inst, const CodePtr
const auto do_not_fastmem_marker = GenerateDoNotFastmemMarker(ctx, inst);
const auto page_table_lookup = [this, vaddr, value, page_index, addr, callback_fn](FixupBranch& end) {
constexpr size_t bit_size = Common::BitSize<T>();
constexpr size_t bit_size = mcl::bitsizeof<T>;
code.MOVP2R(addr, config.page_table);
code.MOV(DecodeReg(page_index), vaddr, ArithOption{vaddr, ST_LSR, 12});

View File

@ -8,6 +8,9 @@
#include <boost/icl/interval_set.hpp>
#include <fmt/format.h>
#include <mcl/assert.hpp>
#include <mcl/stdint.hpp>
#include <mcl/scope_exit.hpp>
#include <dynarmic/A32/a32.h>
#include <dynarmic/A32/context.h>
@ -18,10 +21,7 @@
#include "backend/A64/callback.h"
#include "backend/A64/devirtualize.h"
#include "backend/A64/jitstate_info.h"
#include "common/assert.h"
#include "common/common_types.h"
#include "common/llvm_disassemble.h"
#include "common/scope_exit.h"
#include "frontend/A32/translate/translate.h"
#include "frontend/ir/basic_block.h"
#include "frontend/ir/location_descriptor.h"

View File

@ -4,11 +4,12 @@
* General Public License version 2 or any later version.
*/
#include <mcl/assert.hpp>
#include <mcl/bit_cast.hpp>
#include <mcl/stdint.hpp>
#include "backend/A64/a32_jitstate.h"
#include "backend/A64/block_of_code.h"
#include "common/assert.h"
#include "common/bit_util.h"
#include "common/common_types.h"
#include "frontend/A32/location_descriptor.h"
namespace Dynarmic::BackendA64 {
@ -55,13 +56,13 @@ u32 A32JitState::Cpsr() const {
// Q flag
cpsr |= cpsr_q ? 1 << 27 : 0;
// GE flags
cpsr |= Common::Bit<31>(cpsr_ge) ? 1 << 19 : 0;
cpsr |= Common::Bit<23>(cpsr_ge) ? 1 << 18 : 0;
cpsr |= Common::Bit<15>(cpsr_ge) ? 1 << 17 : 0;
cpsr |= Common::Bit<7>(cpsr_ge) ? 1 << 16 : 0;
cpsr |= mcl::bit::get_bit<31>(cpsr_ge) ? 1 << 19 : 0;
cpsr |= mcl::bit::get_bit<23>(cpsr_ge) ? 1 << 18 : 0;
cpsr |= mcl::bit::get_bit<15>(cpsr_ge) ? 1 << 17 : 0;
cpsr |= mcl::bit::get_bit<7>(cpsr_ge) ? 1 << 16 : 0;
// E flag, T flag
cpsr |= Common::Bit<1>(upper_location_descriptor) ? 1 << 9 : 0;
cpsr |= Common::Bit<0>(upper_location_descriptor) ? 1 << 5 : 0;
cpsr |= mcl::bit::get_bit<1>(upper_location_descriptor) ? 1 << 9 : 0;
cpsr |= mcl::bit::get_bit<0>(upper_location_descriptor) ? 1 << 5 : 0;
// IT state
cpsr |= static_cast<u32>(upper_location_descriptor & 0b11111100'00000000);
cpsr |= static_cast<u32>(upper_location_descriptor & 0b00000011'00000000) << 17;
@ -75,18 +76,18 @@ void A32JitState::SetCpsr(u32 cpsr) {
// NZCV flags
cpsr_nzcv = cpsr & 0xF0000000;
// Q flag
cpsr_q = Common::Bit<27>(cpsr) ? 1 : 0;
cpsr_q = mcl::bit::get_bit<27>(cpsr) ? 1 : 0;
// GE flags
cpsr_ge = 0;
cpsr_ge |= Common::Bit<19>(cpsr) ? 0xFF000000 : 0;
cpsr_ge |= Common::Bit<18>(cpsr) ? 0x00FF0000 : 0;
cpsr_ge |= Common::Bit<17>(cpsr) ? 0x0000FF00 : 0;
cpsr_ge |= Common::Bit<16>(cpsr) ? 0x000000FF : 0;
cpsr_ge |= mcl::bit::get_bit<19>(cpsr) ? 0xFF000000 : 0;
cpsr_ge |= mcl::bit::get_bit<18>(cpsr) ? 0x00FF0000 : 0;
cpsr_ge |= mcl::bit::get_bit<17>(cpsr) ? 0x0000FF00 : 0;
cpsr_ge |= mcl::bit::get_bit<16>(cpsr) ? 0x000000FF : 0;
upper_location_descriptor &= 0xFFFF0000;
// E flag, T flag
upper_location_descriptor |= Common::Bit<9>(cpsr) ? 2 : 0;
upper_location_descriptor |= Common::Bit<5>(cpsr) ? 1 : 0;
upper_location_descriptor |= mcl::bit::get_bit<9>(cpsr) ? 2 : 0;
upper_location_descriptor |= mcl::bit::get_bit<5>(cpsr) ? 1 : 0;
// IT state
upper_location_descriptor |= (cpsr >> 0) & 0b11111100'00000000;
upper_location_descriptor |= (cpsr >> 17) & 0b00000011'00000000;

View File

@ -7,8 +7,7 @@
#pragma once
#include <array>
#include "common/common_types.h"
#include <mcl/stdint.hpp>
namespace Dynarmic::BackendA64 {

View File

@ -17,10 +17,9 @@
#include <algorithm>
#include <vector>
#include <mcl/stdint.hpp>
#include "backend/A64/abi.h"
#include "common/common_types.h"
#include "common/math_util.h"
#include "common/iterator_util.h"
namespace Dynarmic::BackendA64 {

View File

@ -12,7 +12,7 @@
#include "backend/A64/abi.h"
#include "backend/A64/block_of_code.h"
#include "backend/A64/perf_map.h"
#include "common/assert.h"
#include <mcl/assert.hpp>
#ifdef _WIN32
#include <windows.h>

View File

@ -10,11 +10,12 @@
#include <memory>
#include <type_traits>
#include <mcl/stdint.hpp>
#include "backend/A64/callback.h"
#include "backend/A64/constant_pool.h"
#include "backend/A64/jitstate_info.h"
#include "backend/A64/emitter/a64_emitter.h"
#include "common/common_types.h"
namespace Dynarmic::BackendA64 {

View File

@ -8,9 +8,9 @@
#include <boost/icl/interval_map.hpp>
#include <boost/icl/interval_set.hpp>
#include <mcl/stdint.hpp>
#include "backend/A64/block_range_information.h"
#include "common/common_types.h"
namespace Dynarmic::BackendA64 {

View File

@ -9,8 +9,9 @@
#include <functional>
#include <vector>
#include <mcl/stdint.hpp>
#include "backend/A64/emitter/a64_emitter.h"
#include "common/common_types.h"
namespace Dynarmic::BackendA64 {

View File

@ -6,9 +6,10 @@
#include <cstring>
#include <mcl/assert.hpp>
#include "backend/A64/block_of_code.h"
#include "backend/A64/constant_pool.h"
#include "common/assert.h"
namespace Dynarmic::BackendA64 {

View File

@ -8,7 +8,7 @@
#include <map>
#include "common/common_types.h"
#include <mcl/stdint.hpp>
namespace Dynarmic::BackendA64 {

View File

@ -9,12 +9,12 @@
#include <cstring>
#include <memory>
#include <mp/traits/function_info.h>
#include <mcl/type_traits/function_info.hpp>
#include <mcl/stdint.hpp>
#include <mcl/assert.hpp>
#include <mcl/bit_cast.hpp>
#include "backend/A64/callback.h"
#include "common/assert.h"
#include "common/cast_util.h"
#include "common/common_types.h"
namespace Dynarmic::BackendA64 {
@ -33,25 +33,25 @@ struct ThunkBuilder<R(C::*)(Args...), mfp> {
} // namespace impl
template<auto mfp>
ArgCallback DevirtualizeGeneric(mp::class_type<decltype(mfp)>* this_) {
ArgCallback DevirtualizeGeneric(mcl::class_type<decltype(mfp)> * this_) {
return ArgCallback{&impl::ThunkBuilder<decltype(mfp), mfp>::Thunk, reinterpret_cast<u64>(this_)};
}
template<auto mfp>
ArgCallback DevirtualizeWindows(mp::class_type<decltype(mfp)>* this_) {
ArgCallback DevirtualizeWindows(mcl::class_type<decltype(mfp)>* this_) {
static_assert(sizeof(mfp) == 8);
return ArgCallback{Common::BitCast<u64>(mfp), reinterpret_cast<u64>(this_)};
return ArgCallback{mcl::bit_cast<u64>(mfp), reinterpret_cast<u64>(this_)};
}
template<auto mfp>
ArgCallback DevirtualizeAarch64(mp::class_type<decltype(mfp)>* this_) {
ArgCallback DevirtualizeAarch64(mcl::class_type<decltype(mfp)>* this_) {
struct MemberFunctionPointer {
/// For a non-virtual function, this is a simple function pointer.
/// For a virtual function, it is virtual table offset in bytes.
u64 ptr;
/// Twice the required adjustment to `this`, plus 1 if the member function is virtual.
u64 adj;
} mfp_struct = Common::BitCast<MemberFunctionPointer>(mfp);
} mfp_struct = mcl::bit_cast<MemberFunctionPointer>(mfp);
static_assert(sizeof(MemberFunctionPointer) == 16);
static_assert(sizeof(MemberFunctionPointer) == sizeof(mfp));
@ -59,14 +59,14 @@ ArgCallback DevirtualizeAarch64(mp::class_type<decltype(mfp)>* this_) {
u64 fn_ptr = mfp_struct.ptr;
u64 this_ptr = reinterpret_cast<u64>(this_) + mfp_struct.adj / 2;
if (mfp_struct.adj & 1) {
u64 vtable = Common::BitCastPointee<u64>(this_ptr);
fn_ptr = Common::BitCastPointee<u64>(vtable + fn_ptr);
u64 vtable = mcl::bit_cast_pointee<u64>(this_ptr);
fn_ptr = mcl::bit_cast_pointee<u64>(vtable + fn_ptr);
}
return ArgCallback{fn_ptr, this_ptr};
}
template<auto mfp>
ArgCallback Devirtualize(mp::class_type<decltype(mfp)>* this_) {
ArgCallback Devirtualize(mcl::class_type<decltype(mfp)>* this_) {
#if defined(linux) || defined(__linux) || defined(__linux__)
return DevirtualizeAarch64<mfp>(this_);
#else

View File

@ -7,14 +7,15 @@
#include <unordered_map>
#include <unordered_set>
#include <mcl/assert.hpp>
#include <mcl/bit/bit_field.hpp>
#include <mcl/stdint.hpp>
#include <mcl/scope_exit.hpp>
#include "backend/A64/block_of_code.h"
#include "backend/A64/emit_a64.h"
#include "backend/A64/hostloc.h"
#include "backend/A64/perf_map.h"
#include "common/assert.h"
#include "common/bit_util.h"
#include "common/common_types.h"
#include "common/scope_exit.h"
#include "common/variant_util.h"
#include "frontend/ir/basic_block.h"
#include "frontend/ir/microinstruction.h"
@ -129,10 +130,10 @@ void EmitA64::EmitNZCVFromPackedFlags(EmitContext& ctx, IR::Inst* inst) {
if (args[0].IsImmediate()) {
Arm64Gen::ARM64Reg nzcv = DecodeReg(ctx.reg_alloc.ScratchGpr());
u32 value = 0;
value |= Common::Bit<31>(args[0].GetImmediateU32()) ? (1 << 15) : 0;
value |= Common::Bit<30>(args[0].GetImmediateU32()) ? (1 << 14) : 0;
value |= Common::Bit<29>(args[0].GetImmediateU32()) ? (1 << 8) : 0;
value |= Common::Bit<28>(args[0].GetImmediateU32()) ? (1 << 0) : 0;
value |= mcl::bit::get_bit<31>(args[0].GetImmediateU32()) ? (1 << 15) : 0;
value |= mcl::bit::get_bit<30>(args[0].GetImmediateU32()) ? (1 << 14) : 0;
value |= mcl::bit::get_bit<29>(args[0].GetImmediateU32()) ? (1 << 8) : 0;
value |= mcl::bit::get_bit<28>(args[0].GetImmediateU32()) ? (1 << 0) : 0;
code.MOVI2R(nzcv, value);
ctx.reg_alloc.DefineValue(inst, nzcv);
} else {

View File

@ -14,9 +14,10 @@
#include <unordered_set>
#include <vector>
#include <mcl/bitsizeof.hpp>
#include "backend/A64/reg_alloc.h"
#include "backend/A64/emitter/a64_emitter.h"
#include "common/bit_util.h"
#include "common/fp/rounding_mode.h"
#include "frontend/ir/location_descriptor.h"
#include "frontend/ir/terminal.h"
@ -38,7 +39,7 @@ using A64FullVectorWidth = std::integral_constant<size_t, 128>;
// relative to the size of a vector register. e.g. T = u32 would result
// in a std::array<u32, 4>.
template <typename T>
using VectorArray = std::array<T, A64FullVectorWidth::value / Common::BitSize<T>()>;
using VectorArray = std::array<T, A64FullVectorWidth::value / mcl::bitsizeof<T>>;
struct EmitContext {
EmitContext(RegAlloc& reg_alloc, IR::Block& block);

View File

@ -4,10 +4,11 @@
* General Public License version 2 or any later version.
*/
#include <mcl/assert.hpp>
#include <mcl/stdint.hpp>
#include "backend/A64/block_of_code.h"
#include "backend/A64/emit_a64.h"
#include "common/assert.h"
#include "common/common_types.h"
#include "frontend/ir/basic_block.h"
#include "frontend/ir/microinstruction.h"
#include "frontend/ir/opcodes.h"

View File

@ -8,11 +8,12 @@
#include <type_traits>
#include <utility>
#include <mcl/assert.hpp>
#include <mcl/stdint.hpp>
#include "backend/A64/abi.h"
#include "backend/A64/block_of_code.h"
#include "backend/A64/emit_a64.h"
#include "common/assert.h"
#include "common/common_types.h"
#include "common/fp/fpcr.h"
#include "common/fp/fpsr.h"
#include "common/fp/info.h"

View File

@ -6,11 +6,12 @@
#include <limits>
#include <mcl/assert.hpp>
#include <mcl/bit/bit_field.hpp>
#include <mcl/stdint.hpp>
#include "backend/A64/block_of_code.h"
#include "backend/A64/emit_a64.h"
#include "common/assert.h"
#include "common/bit_util.h"
#include "common/common_types.h"
#include "frontend/ir/basic_block.h"
#include "frontend/ir/microinstruction.h"
#include "frontend/ir/opcodes.h"
@ -105,7 +106,7 @@ void EmitA64::EmitSignedSaturation(EmitContext& ctx, IR::Inst* inst) {
const u32 mask = (1u << N) - 1;
const u32 positive_saturated_value = (1u << (N - 1)) - 1;
const u32 negative_saturated_value = 1u << (N - 1);
const u32 sext_negative_satured_value = Common::SignExtend(N, negative_saturated_value);
const u32 sext_negative_satured_value = mcl::bit::sign_extend(N, negative_saturated_value);
const ARM64Reg result = DecodeReg(ctx.reg_alloc.ScratchGpr());
const ARM64Reg reg_a = DecodeReg(ctx.reg_alloc.UseGpr(args[0]));

View File

@ -12,11 +12,12 @@
#include <libkern/OSCacheControl.h>
#endif
#include <mcl/assert.hpp>
#include <mcl/bit_cast.hpp>
#include <mcl/bit/bit_count.hpp>
#include <mcl/bit/bit_field.hpp>
#include "a64_emitter.h"
#include "common/assert.h"
#include "common/bit_util.h"
#include "common/cast_util.h"
#include "common/common_types.h"
#include "common/math_util.h"
namespace Dynarmic::BackendA64::Arm64Gen {
@ -267,11 +268,11 @@ float FPImm8ToFloat(u8 bits) {
const u32 mantissa = (bits & 0xF) << 19;
const u32 f = (sign << 31) | (exp << 23) | mantissa;
return Dynarmic::Common::BitCast<float>(f);
return mcl::bit_cast<float>(f);
}
bool FPImm8FromFloat(float value, u8* imm_out) {
const u32 f = Dynarmic::Common::BitCast<u32>(value);
const u32 f = mcl::bit_cast<u32>(value);
const u32 mantissa4 = (f & 0x7FFFFF) >> 19;
const u32 exponent = (f >> 23) & 0xFF;
const u32 sign = f >> 31;
@ -1857,7 +1858,7 @@ bool ARM64XEmitter::MOVI2R2(ARM64Reg Rd, u64 imm1, u64 imm2) {
}
void ARM64XEmitter::ABI_PushRegisters(u32 registers) {
int num_regs = Common::BitCount(registers);
int num_regs = static_cast<int>(mcl::bit::count_ones(registers));
int stack_size = (num_regs + (num_regs & 1)) * 8;
int it = 0;
@ -1867,7 +1868,7 @@ void ARM64XEmitter::ABI_PushRegisters(u32 registers) {
return;
for (int i = 0; i < 32; ++i) {
if (Common::Bit(i, registers)) {
if (mcl::bit::get_bit(i, registers)) {
gpr[it++] = static_cast<ARM64Reg>(X0 + i);
}
}
@ -1896,7 +1897,7 @@ void ARM64XEmitter::ABI_PushRegisters(u32 registers) {
}
void ARM64XEmitter::ABI_PopRegisters(u32 registers) {
u8 num_regs = static_cast<u8>(Common::BitCount(registers));
u8 num_regs = static_cast<u8>(mcl::bit::count_ones(registers));
int stack_size = (num_regs + (num_regs & 1)) * 8;
int it = 0;
@ -1906,7 +1907,7 @@ void ARM64XEmitter::ABI_PopRegisters(u32 registers) {
return;
for (int i = 0; i < 32; ++i) {
if (Common::Bit(i, registers)) {
if (mcl::bit::get_bit(i, registers)) {
gpr[it++] = static_cast<ARM64Reg>(X0 + i);
}
}
@ -3508,11 +3509,11 @@ void ARM64FloatEmitter::ABI_PushRegisters(u32 registers, ARM64Reg tmp) {
bool bundled_loadstore = false;
for (int i = 0; i < 32; ++i) {
if (!Common::Bit(i, registers))
if (!mcl::bit::get_bit(i, registers))
continue;
int count = 0;
while (++count < 4 && (i + count) < 32 && Common::Bit(i + count, registers)) {
while (++count < 4 && (i + count) < 32 && mcl::bit::get_bit(i + count, registers)) {
}
if (count > 1) {
bundled_loadstore = true;
@ -3521,12 +3522,12 @@ void ARM64FloatEmitter::ABI_PushRegisters(u32 registers, ARM64Reg tmp) {
}
if (bundled_loadstore && tmp != INVALID_REG) {
int num_regs = Common::BitCount(registers);
int num_regs = mcl::bit::count_ones(registers);
m_emit->SUB(SP, SP, num_regs * 16);
m_emit->ADD(tmp, SP, 0);
std::vector<ARM64Reg> island_regs;
for (int i = 0; i < 32; ++i) {
if (!Common::Bit(i, registers))
if (!mcl::bit::get_bit(i, registers))
continue;
int count = 0;
@ -3536,7 +3537,7 @@ void ARM64FloatEmitter::ABI_PushRegisters(u32 registers, ARM64Reg tmp) {
// 2 < 4 && registers[i + 2] true!
// 3 < 4 && registers[i + 3] true!
// 4 < 4 && registers[i + 4] false!
while (++count < 4 && (i + count) < 32 && Common::Bit(i + count, registers)) {
while (++count < 4 && (i + count) < 32 && mcl::bit::get_bit(i + count, registers)) {
}
if (count == 1)
@ -3561,7 +3562,7 @@ void ARM64FloatEmitter::ABI_PushRegisters(u32 registers, ARM64Reg tmp) {
} else {
std::vector<ARM64Reg> pair_regs;
for (int i = 0; i < 32; ++i) {
if (Common::Bit(i, registers)) {
if (mcl::bit::get_bit(i, registers)) {
pair_regs.push_back((ARM64Reg)(Q0 + i));
if (pair_regs.size() == 2) {
STP(128, INDEX_PRE, pair_regs[0], pair_regs[1], SP, -32);
@ -3575,14 +3576,14 @@ void ARM64FloatEmitter::ABI_PushRegisters(u32 registers, ARM64Reg tmp) {
}
void ARM64FloatEmitter::ABI_PopRegisters(u32 registers, ARM64Reg tmp) {
bool bundled_loadstore = false;
int num_regs = Common::BitCount(registers);
int num_regs = mcl::bit::count_ones(registers);
for (int i = 0; i < 32; ++i) {
if (!Common::Bit(i, registers))
if (!mcl::bit::get_bit(i, registers))
continue;
int count = 0;
while (++count < 4 && (i + count) < 32 && Common::Bit(i + count, registers)) {
while (++count < 4 && (i + count) < 32 && mcl::bit::get_bit(i + count, registers)) {
}
if (count > 1) {
bundled_loadstore = true;
@ -3594,11 +3595,11 @@ void ARM64FloatEmitter::ABI_PopRegisters(u32 registers, ARM64Reg tmp) {
// The temporary register is only used to indicate that we can use this code path
std::vector<ARM64Reg> island_regs;
for (int i = 0; i < 32; ++i) {
if (!Common::Bit(i, registers))
if (!mcl::bit::get_bit(i, registers))
continue;
u8 count = 0;
while (++count < 4 && (i + count) < 32 && Common::Bit(i + count, registers)) {
while (++count < 4 && (i + count) < 32 && mcl::bit::get_bit(i + count, registers)) {
}
if (count == 1)
@ -3624,7 +3625,7 @@ void ARM64FloatEmitter::ABI_PopRegisters(u32 registers, ARM64Reg tmp) {
bool odd = num_regs % 2;
std::vector<ARM64Reg> pair_regs;
for (int i = 31; i >= 0; --i) {
if (!Common::Bit(i, registers))
if (!mcl::bit::get_bit(i, registers))
continue;
if (odd) {
@ -3878,7 +3879,7 @@ void ARM64FloatEmitter::MOVI2F(ARM64Reg Rd, float value, ARM64Reg scratch, bool
if (negate)
value = -value;
const u32 ival = Dynarmic::Common::BitCast<u32>(value);
const u32 ival = mcl::bit_cast<u32>(value);
m_emit->MOVI2R(scratch, ival);
FMOV(Rd, scratch);
}

View File

@ -8,10 +8,11 @@
#include <cstring>
#include <functional>
#include <mcl/assert.hpp>
#include <mcl/stdint.hpp>
#include "arm_common.h"
#include "code_block.h"
#include "common/assert.h"
#include "common/common_types.h"
namespace Dynarmic::BackendA64::Arm64Gen {

View File

@ -2,7 +2,7 @@
// Licensed under GPLv2+
// Refer to the license.txt file included.
#include "common/common_types.h"
#include <mcl/stdint.hpp>
namespace Dynarmic::BackendA64 {
enum CCFlags {

View File

@ -13,8 +13,8 @@
#include <sys/mman.h>
#endif
#include "common/assert.h"
#include "common/common_types.h"
#include <mcl/assert.hpp>
#include <mcl/stdint.hpp>
namespace Dynarmic::BackendA64 {
// Everything that needs to generate code should inherit from this.

View File

@ -10,8 +10,9 @@
#include <memory>
#include <functional>
#include <mcl/stdint.hpp>
#include "backend/A64/a32_jitstate.h"
#include "common/common_types.h"
namespace Dynarmic::BackendA64 {

View File

@ -18,12 +18,11 @@
#include <ucontext.h>
#endif
#include <mcl/assert.hpp>
#include "backend/A64/a32_jitstate.h"
#include "backend/A64/block_of_code.h"
#include "backend/A64/exception_handler.h"
#include "common/assert.h"
#include "common/cast_util.h"
#include "common/common_types.h"
namespace Dynarmic::BackendA64 {

View File

@ -5,9 +5,10 @@
*/
#pragma once
#include <mcl/assert.hpp>
#include <mcl/stdint.hpp>
#include "backend/A64/emitter/a64_emitter.h"
#include "common/assert.h"
#include "common/common_types.h"
namespace Dynarmic::BackendA64 {

View File

@ -16,8 +16,7 @@
#include <unistd.h>
#include <fmt/format.h>
#include "common/common_types.h"
#include <mcl/stdint.hpp>
namespace Dynarmic::BackendA64 {

View File

@ -9,7 +9,7 @@
#include <cstddef>
#include <string>
#include "common/cast_util.h"
#include <mcl/bit_cast.hpp>
namespace Dynarmic::BackendA64 {
@ -19,7 +19,7 @@ void PerfMapRegister(const void* start, const void* end, const std::string& frie
template<typename T>
void PerfMapRegister(T start, const void* end, const std::string& friendly_name) {
detail::PerfMapRegister(Common::BitCast<const void*>(start), end, friendly_name);
detail::PerfMapRegister(mcl::bit_cast<const void*>(start), end, friendly_name);
}
void PerfMapClear();

View File

@ -9,10 +9,10 @@
#include <utility>
#include <fmt/ostream.h>
#include <mcl/assert.hpp>
#include "backend/A64/abi.h"
#include "backend/A64/reg_alloc.h"
#include "common/assert.h"
namespace Dynarmic::BackendA64 {

View File

@ -13,10 +13,11 @@
#include <optional>
#include <mcl/stdint.hpp>
#include "backend/A64/block_of_code.h"
#include "backend/A64/hostloc.h"
//#include "backend/A64/oparg.h"
#include "common/common_types.h"
#include "frontend/ir/cond.h"
#include "frontend/ir/microinstruction.h"
#include "frontend/ir/value.h"