Add AArch64 fixups

This commit is contained in:
xperia64 2020-11-22 20:36:19 -05:00
parent 232c2588ab
commit f9d84871fb
3 changed files with 23 additions and 20 deletions

@ -1081,13 +1081,10 @@ void A32EmitA64::EmitA32WriteMemory64(A32EmitContext& ctx, IR::Inst* inst) {
}
template <typename T, void (A32::UserCallbacks::*fn)(A32::VAddr, T)>
static void ExclusiveWrite(BlockOfCode& code, RegAlloc& reg_alloc, IR::Inst* inst, const A32::UserConfig& config, bool prepend_high_word) {
static void ExclusiveWrite(BlockOfCode& code, RegAlloc& reg_alloc, IR::Inst* inst, const A32::UserConfig& config) {
auto args = reg_alloc.GetArgumentInfo(inst);
if (prepend_high_word) {
reg_alloc.HostCall(nullptr, {}, args[0], args[1], args[2]);
} else {
reg_alloc.HostCall(nullptr, {}, args[0], args[1]);
}
reg_alloc.HostCall(nullptr, {}, args[0], args[1]);
// Use unused HostCall registers
ARM64Reg passed = W9;
ARM64Reg tmp = W10;
@ -1102,34 +1099,31 @@ static void ExclusiveWrite(BlockOfCode& code, RegAlloc& reg_alloc, IR::Inst* ins
code.TSTI2R(tmp, A32JitState::RESERVATION_GRANULE_MASK, reg_alloc.ScratchGpr());
end.push_back(code.B(CC_NEQ));
code.STR(INDEX_UNSIGNED, WZR, X28, offsetof(A32JitState, exclusive_state));
if (prepend_high_word) {
code.LSL(code.ABI_PARAM4,code.ABI_PARAM4, 32);
code.ORR(code.ABI_PARAM3, code.ABI_PARAM3, code.ABI_PARAM4);
}
Devirtualize<fn>(config.callbacks).EmitCall(code);
code.MOVI2R(passed, 0);
for (FixupBranch e : end) {
code.SetJumpTarget(e);
}
for (FixupBranch e : end) {
code.SetJumpTarget(e);
}
reg_alloc.DefineValue(inst, passed);
}
void A32EmitA64::EmitA32ExclusiveWriteMemory8(A32EmitContext& ctx, IR::Inst* inst) {
ExclusiveWrite<u8, &A32::UserCallbacks::MemoryWrite8>(code, ctx.reg_alloc, inst, config, false);
ExclusiveWrite<u8, &A32::UserCallbacks::MemoryWrite8>(code, ctx.reg_alloc, inst, config);
}
void A32EmitA64::EmitA32ExclusiveWriteMemory16(A32EmitContext& ctx, IR::Inst* inst) {
ExclusiveWrite<u16, &A32::UserCallbacks::MemoryWrite16>(code, ctx.reg_alloc, inst, config, false);
ExclusiveWrite<u16, &A32::UserCallbacks::MemoryWrite16>(code, ctx.reg_alloc, inst, config);
}
void A32EmitA64::EmitA32ExclusiveWriteMemory32(A32EmitContext& ctx, IR::Inst* inst) {
ExclusiveWrite<u32, &A32::UserCallbacks::MemoryWrite32>(code, ctx.reg_alloc, inst, config, false);
ExclusiveWrite<u32, &A32::UserCallbacks::MemoryWrite32>(code, ctx.reg_alloc, inst, config);
}
void A32EmitA64::EmitA32ExclusiveWriteMemory64(A32EmitContext& ctx, IR::Inst* inst) {
ExclusiveWrite<u64, &A32::UserCallbacks::MemoryWrite64>(code, ctx.reg_alloc, inst, config, true);
ExclusiveWrite<u64, &A32::UserCallbacks::MemoryWrite64>(code, ctx.reg_alloc, inst, config);
}
static void EmitCoprocessorException() {

@ -307,8 +307,17 @@ void Jit::LoadContext(const Context& ctx) {
impl->jit_state.TransferJitState(ctx.impl->jit_state, reset_rsb);
}
std::string Jit::Disassemble(const IR::LocationDescriptor& descriptor) {
return impl->Disassemble(descriptor);
std::string Jit::Disassemble() const {
std::string result;
#ifdef DYNARMIC_USE_LLVM
for (const u32* pos = reinterpret_cast<const u32*>(impl->block_of_code.GetCodeBegin());
reinterpret_cast<const u8*>(pos) < reinterpret_cast<const u8*>(impl->block_of_code.GetCodePtr()); pos += 1) {
fmt::print("0x{:02x} 0x{:02x} ", reinterpret_cast<u64>(pos), *pos);
fmt::print("{}", Common::DisassembleAArch64(*pos, reinterpret_cast<u64>(pos)));
result += Common::DisassembleAArch64(*pos, reinterpret_cast<u64>(pos));
}
#endif
return result;
}
} // namespace Dynarmic::A32

@ -620,7 +620,7 @@ A32OPC(WriteMemory64, Void, U32,
A32OPC(ExclusiveWriteMemory8, U32, U32, U8 )
A32OPC(ExclusiveWriteMemory16, U32, U32, U16 )
A32OPC(ExclusiveWriteMemory32, U32, U32, U32 )
A32OPC(ExclusiveWriteMemory64, U32, U32, U32, U32 )
A32OPC(ExclusiveWriteMemory64, U32, U32, U64 )
// A64 Memory access
//A64OPC(ClearExclusive, Void, )