Compare commits

..

140 Commits

Author SHA1 Message Date
SachinVin
6dfd42dfc8 backend\A64\exception_handler_posix.cpp: remove unused header 2020-05-30 23:05:06 +05:30
SachinVin
adeb4940dd backend\A64\exception_handler_posix.cpp: Fix typo in FindCodeBlockInfo 2020-05-29 20:19:14 +05:30
SachinVin
26ba798719 tests/A32: remove unused function 2020-05-23 19:55:11 +05:30
SachinVin
94e90aba68 backend/A64:port single stepping fix 2020-05-23 19:55:11 +05:30
SachinVin
47e508dd12 travis : a64: remove docker; dont fuzz against unicorn 2020-05-23 19:55:10 +05:30
SachinVin
d0c69355fb backend/A64: Use ASSERT_FALSE where possible 2020-05-23 19:55:10 +05:30
SachinVin
1b9d22bfee backend\A64\block_of_code.cpp: Remove stray semicolon 2020-05-23 19:55:10 +05:30
SachinVin
aef2d8d317 backend\A64\reg_alloc.cpp: Fix assert 2020-05-23 19:55:09 +05:30
SachinVin
877de72c34 CmakeLists: DYNARMIC_FRONTENDS optin for A64 backend 2020-05-23 19:55:09 +05:30
SachinVin
b0230f7def frontend/A32: remove decoder hack vfp instructions 2020-05-23 19:55:09 +05:30
SachinVin
b25b721a6a a64_emiter: CountLeadingZeros intrinsic shortcuts 2020-05-23 19:55:09 +05:30
BreadFish64
73ee4b9480 emit_a64: get rid of useless NOP generation
We don't actually patch anything in those locations beside a jump.
2020-05-23 19:55:08 +05:30
SachinVin
470be4f7dc emit_a64: Do not clear fast_dispatch_table unnecessarily
port 4305c74 - emit_x64: Do not clear fast_dispatch_table unnecessarily
2020-05-23 19:55:08 +05:30
SachinVin
231feee518 backend/A64/block_of_code.cpp: Clean up C style casts 2020-05-23 19:55:08 +05:30
SachinVin
27e21530b3 backend/A64/a32_emit_a64.cpp: EmitA32{Get,Set}Fpscr, set the guest_fpcr to host fpcr 2020-05-23 19:55:07 +05:30
SachinVin
66e7693204 backend/A64: Add Step 2020-05-23 19:55:07 +05:30
SachinVin
a3072d68cb backend/A64/block_of_code: Always specify codeptr to run from 2020-05-23 19:55:06 +05:30
BreadFish64
018b07f186 backend/A64: fix mp 2020-05-23 19:55:06 +05:30
SachinVin
8571f06596 backend/A64: Move SP to FP in GenMemoryAccessors + Minor cleanup and 2020-05-23 19:55:06 +05:30
SachinVin
9c74e334b1 backend/A64: Use X26 for storing remaining cycles. 2020-05-23 19:55:05 +05:30
BreadFish64
b6733a089a backend/A64: add fastmem support
fix crash on game close

fix generic exception handler

reorder hostloc gpr list

use temp register instead of X0 for writes

go back to regular std::partition
2020-05-23 19:55:05 +05:30
BreadFish64
45a758a6f2 merge fastmem 2020-05-23 19:55:05 +05:30
SachinVin
f7fd0cff8f backend\A64\constant_pool.cpp: Correct offset calculation 2020-05-23 19:55:04 +05:30
SachinVin
c99ad2a4f3 backend/A64/a32_jitstate: Upstream changes from x64 backend 2020-05-23 19:55:04 +05:30
SachinVin
968e8cddd3 backend/A64: Add test for q flag being incorrectly set 2020-05-23 19:55:04 +05:30
SachinVin
c7f7a99428 backend/A64/a32_emit_a64.cpp: Use unused HostCall registers 2020-05-23 19:55:03 +05:30
SachinVin
79c7b026ed backend/A64/a32_emit_a64.cpp: Use MOVP2R instead of MOVI2R. 2020-05-23 19:55:03 +05:30
SachinVin
7db182a5c8 backend/A64/abi: Fix FP caller and callee save registers 2020-05-23 19:55:03 +05:30
SachinVin
a7ef959570 a64/block_of_code: use GetWritableCodePtr() instead of const_cast<...>(GetCodePtr()) 2020-05-23 19:55:03 +05:30
SachinVin
534ad728a8 backend/A64/constant_pool: Clean up unused stuff 2020-05-23 19:55:02 +05:30
SachinVin
c8ec8f8945 emit_a64_data_processing.cpp: remove pointless DoNZCV. 2020-05-23 19:55:02 +05:30
SachinVin
6f643b2352 IR + backend/*: add SetCpsrNZCVRaw and change arg1 type of SetCpsrNZCV to IR::NZCV 2020-05-23 19:55:02 +05:30
SachinVin
43d37293b1 backend/A64: Fix ASR impl 2020-05-23 19:55:01 +05:30
SachinVin
e12d635bde a64_emitter: Use Correct alias for ZR and WZR in CMP 2020-05-23 19:55:01 +05:30
SachinVin
8c66a1609e backend/A64: Use CSLE instead of branches for LSL LSR and ASR + minor cleanup 2020-05-23 19:55:01 +05:30
SachinVin
878db6d65d backend/A64: Use correct register size for EmitNot64 2020-05-23 19:55:01 +05:30
SachinVin
f8594f3bb9 tests/A32: Check if Q flag is cleared properly 2020-05-23 19:55:00 +05:30
SachinVin
296bbdd0b0 backend/A64: SignedSaturatedSub and SignedSaturatedAdd 2020-05-23 19:55:00 +05:30
SachinVin
a6c2d1952a backend/A64/emit_a64_saturation.cpp: Implement EmitSignedSaturation and EmitUnsignedSaturation
Implements SSAT SSAT16 USAT USAT16 QASX QSAX UQASX UQSAX
2020-05-23 19:55:00 +05:30
SachinVin
011d62d958 backend/A64: add emit_a64_saturation.cpp 2020-05-23 19:54:59 +05:30
SachinVin
ad59325b45 backend/A64: Fix EmitA32SetCpsr 2020-05-23 19:54:59 +05:30
SachinVin
61ea47ad7b backend/A64/devirtualize: remove unused DevirtualizeItanium 2020-05-23 19:54:59 +05:30
SachinVin
bb39f419e2 backend/A64: refactor to fpscr from mxcsr 2020-05-23 19:54:58 +05:30
SachinVin
47c0632e16 backend/A64: Use ScratchGpr() instead of ABI_SCRATCH1 where possible 2020-05-23 19:54:58 +05:30
SachinVin
60303dbfa8 backend/A64: support for always_little_endian 2020-05-23 19:54:58 +05:30
SachinVin
19cd6f0309 backend/a64: Add hook_hint_instructions option
534eb0f
2020-05-23 19:54:57 +05:30
SachinVin
3d4caa5ee1 backend /A64: cleanup 2020-05-23 19:54:57 +05:30
SachinVin
d027786e4e gitignore: add .vs dir 2020-05-23 19:54:57 +05:30
SachinVin
0c7e261aac Minor style fix 2020-05-23 19:54:57 +05:30
SachinVin
6b167a68e4 backend\A64\emit_a64_packed.cpp: Implement AddSub halving and non halving 2020-05-23 19:54:56 +05:30
SachinVin
a87b13cabf backend\A64: Instructions that got implemented on the way 2020-05-23 19:54:56 +05:30
SachinVin
17e64406aa backend\A64\emit_a64_packed.cpp: Implement Unsigned Sum of Absolute Differences 2020-05-23 19:54:55 +05:30
SachinVin
871617ac3b a64 emitter: Absolute Difference and add across vector instructions 2020-05-23 19:54:55 +05:30
SachinVin
f9ba12a9e6 backend\A64\emit_a64_packed.cpp: Implement Packed Select 2020-05-23 19:54:54 +05:30
SachinVin
607a3c7110 Backend/a64: Fix asset when falling back to interpreter 2020-05-23 19:54:54 +05:30
SachinVin
a5564f588d backend\A64\emit_a64_packed.cpp: Implement Packed Halving Add/Sub instructions 2020-05-23 19:54:53 +05:30
SachinVin
fd01d6fe0a backend\A64\emit_a64_packed.cpp: Implement Packed Saturating instructions 2020-05-23 19:54:53 +05:30
SachinVin
b4fb2569ad backend\A64\emit_a64_packed.cpp: Implement SignedPacked*- ADD and SUB 2020-05-23 19:54:52 +05:30
SachinVin
8f98852249 a64 emitter: Vector Halving and Saturation instructions 2020-05-23 19:54:52 +05:30
SachinVin
9059505a2f backend\A64\emit_a64_packed.cpp: Implement UnsignedPacked*- ADD and SUB...
with few other in the emitter
2020-05-23 19:54:51 +05:30
SachinVin
5ad5784ef8 a64 emitter: fix Scalar Saturating Instructions 2020-05-23 19:54:51 +05:30
SachinVin
f0eee83098 A64 Emitter: Implement Saturating Add and Sub 2020-05-23 19:54:50 +05:30
SachinVin
ebd185968d backend\A64\emit_a64_data_processing.cpp: Implement Division 2020-05-23 19:54:50 +05:30
SachinVin
def0137021 backend\A64\emit_a64_data_processing.cpp: Implement 64bit CLZ 2020-05-23 19:54:50 +05:30
SachinVin
9f227edfe4 backend\A64\emit_a64_data_processing.cpp: Implement 64bit LSL and ROR Instructions
Also EmitTestBit
2020-05-23 19:54:49 +05:30
SachinVin
bb70cdd28c backend\A64\emit_a64_data_processing.cpp: Implement 64bit Logical Instructions 2020-05-23 19:54:49 +05:30
SachinVin
f851695f51 backend/a64: implememnt CheckBit 2020-05-23 19:54:49 +05:30
SachinVin
6d25995375 backend/a64: Redesign Const Pool 2020-05-23 19:54:48 +05:30
SachinVin
410c2010e9 backend\A64\emit_a64_floating_point.cpp: Fix include paths 2020-05-23 19:54:48 +05:30
SachinVin
8e3ad2feb5 backend\A64\a32_emit_a64.cpp: Fix Coproc* after rebase 2020-05-23 19:54:48 +05:30
SachinVin
fe49607add backend/a64/opcodes.inc: Coproc instructions 2020-05-23 19:54:47 +05:30
SachinVin
324e3c1fd1 a64 emitter: Fix LDR literal 2020-05-23 19:54:47 +05:30
SachinVin
3f220d94c6 a64 emitter: Move IsInRange* and MaskImm* into anon namespace 2020-05-23 19:54:47 +05:30
SachinVin
410dcf87a5 backend\A64\emit_a64_floating_point.cpp: Implement VADD VSUB VMUL and other stuff 2020-05-23 19:54:46 +05:30
SachinVin
4459188bfc backend\A64\emit_a64_floating_point.cpp: Implement VABS VNEG VCMP and a few others 2020-05-23 19:54:46 +05:30
SachinVin
23dc3cee01 frontend/A32/Decoder : (backend/a64)VMOV 2020-05-23 19:54:45 +05:30
SachinVin
72c8e5e536 backend\A64\emit_a64_floating_point.cpp: Implement VCVT instructions 2020-05-23 19:54:45 +05:30
SachinVin
50301cffbd backend\A64\emit_a64_floating_point.cpp: part 1 2020-05-23 19:54:44 +05:30
SachinVin
62f7b030e1 backend/a64/reg_alloc: Fix EmitMove for FPRs 2020-05-23 19:54:44 +05:30
SachinVin
b92195f2ae A64 emitter: Support for 64bit FMOV 2020-05-23 19:54:44 +05:30
SachinVin
1bd416aefb a64 backend: Load "guest_FPSR" 2020-05-23 19:54:43 +05:30
SachinVin
7661987e04 A64 backend: Add Get/SetExtendedRegister and Get/SetGEFlags 2020-05-23 19:54:43 +05:30
SachinVin
1a59aaec11 tests: Dont compile A64 tests for non x64 backend 2020-05-23 19:54:43 +05:30
SachinVin
952eb5c83f travis a64: unicorn 2020-05-23 19:54:43 +05:30
SachinVin
1c9ac3284e travis a64 backend 2020-05-23 19:54:42 +05:30
SachinVin
4da93c3130 Frontend/A32: a64 backend; Interpret SEL 2020-05-23 19:54:42 +05:30
SachinVin
8106f2a81b frontend/A32: A64 Backend implemented instructions 2020-05-23 19:54:42 +05:30
SachinVin
db07bfa933 backend\A64\emit_a64_data_processing.cpp: Implement REV and CLZ ops 2020-05-23 19:54:41 +05:30
SachinVin
6835cf34a1 backend\A64\emit_a64_data_processing.cpp: Implement Sext an Zext ops 2020-05-23 19:54:41 +05:30
SachinVin
e3054389a6 backend\A64\emit_a64_data_processing.cpp: Implement Logical ops 2020-05-23 19:54:40 +05:30
SachinVin
d37ec336a4 backend\A64\emit_a64_data_processing.cpp: Implement Arithmetic ops 2020-05-23 19:54:40 +05:30
SachinVin
e086d0df7f backend\A64\emit_a64_data_processing.cpp: Implement Shift and Rotate ops 2020-05-23 19:54:40 +05:30
SachinVin
8781a0f184 backend\A64\emit_a64_data_processing.cpp:Implement ops 2020-05-23 19:54:39 +05:30
SachinVin
a66bcdfc91 backend\A64\emit_a64_data_processing.cpp: Mostly empty file 2020-05-23 19:54:39 +05:30
SachinVin
9df55fc951 backend/a64: Add a32_interface 2020-05-23 19:54:38 +05:30
SachinVin
cb56c74d19 backend/a64: Port a32_emit_a64 2020-05-23 19:54:38 +05:30
SachinVin
4b48391fd3 backend/a64: Port block_of_code and emit_a64 2020-05-23 19:54:38 +05:30
SachinVin
0708019057 backend/a64: Port callback functions 2020-05-23 19:54:37 +05:30
SachinVin
f3bb2e5f92 backend/a64: Port exception handler 2020-05-23 19:54:37 +05:30
SachinVin
0d6b748b2a backend/a64: Port const pool 2020-05-23 19:54:37 +05:30
SachinVin
5c9179e2db backend/a64: Port reg_alloc 2020-05-23 19:54:36 +05:30
SachinVin
a37f9c4cc6 backend/a64: Port ABI functions 2020-05-23 19:54:36 +05:30
SachinVin
ab07872025 backend/a64: Port perfmap 2020-05-23 19:54:36 +05:30
SachinVin
be80e558c9 backend/a64: Port hostloc 2020-05-23 19:54:36 +05:30
SachinVin
9ca0155c19 backend/a64: Devirtualize functions for a64 2020-05-23 19:54:35 +05:30
SachinVin
fbb03a2a1b backend/a64: Port block_range_info 2020-05-23 19:54:35 +05:30
SachinVin
19b7fba235 CMakeModules\DetectArchitecture.cmake: Refactor ARCHITECTURE to DYNARMIC_ARCHITECTURE
Don't rely on super-project's definition of ARCHITECTURE
2020-05-23 19:54:35 +05:30
SachinVin
9bcbdacd2b [HACK] A32/exception_generating: Interpret undefined instructions 2020-05-23 19:54:35 +05:30
SachinVin
c72550f7d9 [HACK] CMakeLists: Do not build A64 tests on AArch64 2020-05-23 19:54:34 +05:30
MerryMage
8fdeb84822 fuzz_thumb: Add [JitA64] tag to supported instructions 2020-05-23 19:54:34 +05:30
SachinVin
4e4f2b8ef0 backend/A64: Port a32_jitstate 2020-05-23 19:54:34 +05:30
MerryMage
8de86b391f code_block: Support Windows and fix munmap check 2020-05-23 19:54:33 +05:30
SachinVin
0a55e1b11e ir_opt: Port a32_merge_interpreter_blocks 2020-05-23 19:54:33 +05:30
SachinVin
f654dbb29b assert: Use __android_log_print on Android 2020-05-23 19:54:33 +05:30
SachinVin
668d20391a CMakeLists: xbyak should only be linked on x64 2020-05-23 19:54:32 +05:30
SachinVin
0ce4fa4480 a64_emitter: Fix ABI push and pop 2020-05-23 19:54:32 +05:30
SachinVin
ddc8b7f932 a64_emitter: More style cleanup 2020-05-23 19:54:32 +05:30
SachinVin
6010c48bd0 a64_emitter: Style cleanup 2020-05-23 19:54:31 +05:30
BreadFish64
b8369d77ac Backend/A64: add jitstate_info.h 2020-05-23 19:54:31 +05:30
BreadFish64
7905eeb94b Backend/A64: Add Dolphin's ARM emitter 2020-05-23 19:54:31 +05:30
BreadFish64
f7664d9161 Add aarch64 CI 2020-05-23 19:54:31 +05:30
Lioncash
659d78c9c4 A32: Implement ASIMD VSWP
A trivial one to implement, this just swaps the contents of two
registers in place.
2020-05-22 19:43:24 +01:00
MerryMage
d0d50c4824 print_info: Use VFP and ASIMD decoders to get dynarmic name for instruction 2020-05-17 22:48:14 +01:00
MerryMage
d0075f4ea6 print_info: Use LLVM to disassemble A32 2020-05-17 22:30:46 +01:00
MerryMage
c59a127e86 opcodes: Switch from std::map to std::array
Optimization.
2020-05-17 17:01:39 +01:00
MerryMage
d0b45f6150 A32: Implement ARMv8 VST{1-4} (multiple) 2020-05-17 17:01:39 +01:00
Lioncash
eb332b3836 asimd_three_same: Unify BitwiseInstructionWithDst with BitwiseInstruction
Now that all bitwise instructions are implemented, we can unify all of
them together using if constexpr.
2020-05-16 20:22:12 +01:00
Lioncash
f42b3ad4a0 A32: Implement ASIMD VBIF (register) 2020-05-16 20:22:12 +01:00
Lioncash
ee9a81dcba A32: Implement ASIMD VBIT (register) 2020-05-16 20:22:12 +01:00
Lioncash
d624059ead A32: Implement ASIMD VBSL (register) 2020-05-16 20:22:12 +01:00
Lioncash
66663cf8e7 asimd_three_same: Collapse all bitwise implementations into a single code path
Less code and results in only writing the parts that matter once.
2020-05-16 20:22:12 +01:00
Lioncash
4b5e3437cf A32: Implement ASIMD VEOR (register) 2020-05-16 20:22:12 +01:00
Lioncash
67b284f6fa A32: Implement ASIMD VORN (register) 2020-05-16 20:22:12 +01:00
Lioncash
1fdd90ca2a A32: Implement ASIMD VORR (register) 2020-05-16 20:22:12 +01:00
Lioncash
9b93a9de46 a32_jitstate: Remove obsoleted debug assert 2020-05-16 20:22:12 +01:00
Lioncash
64fa804dd4 A32: Implement ASIMD VBIC (register) 2020-05-16 20:22:12 +01:00
Lioncash
0441ab81a1 A32: Implement ASIMD VAND (register) 2020-05-16 20:22:12 +01:00
Lioncash
1b25e867ae asimd_load_store_structures: Simplify ToExtRegD()
ExtReg has a supplied operator+, so we can make use of that instead.
2020-05-16 11:27:22 -04:00
MerryMage
2169653c50 a64_emit_x64: Invalid regalloc code for EmitA64ExclusiveReadMemory128
Attempted to allocate args[0] after end of allocation scope
2020-05-16 14:11:23 +01:00
MerryMage
1a0bc5ba91 A32/ASIMD: ARMv8: Implement VLD{1-4} (multiple) 2020-05-16 14:11:23 +01:00
16 changed files with 331 additions and 114 deletions

View File

@ -127,7 +127,7 @@ if (DYNARMIC_USE_LLVM)
find_package(LLVM REQUIRED CONFIG)
include_directories(${LLVM_INCLUDE_DIRS})
add_definitions(-DDYNARMIC_USE_LLVM ${LLVM_DEFINITIONS})
llvm_map_components_to_libnames(llvm_libs aarch64desc aarch64disassembler x86desc x86disassembler)
llvm_map_components_to_libnames(llvm_libs armdesc armdisassembler aarch64desc aarch64disassembler x86desc x86disassembler)
endif()
if (DYNARMIC_TESTS_USE_UNICORN)

View File

@ -123,6 +123,8 @@ if ("A32" IN_LIST DYNARMIC_FRONTENDS)
frontend/A32/location_descriptor.h
frontend/A32/PSR.h
frontend/A32/translate/impl/asimd_load_store_structures.cpp
frontend/A32/translate/impl/asimd_three_same.cpp
frontend/A32/translate/impl/asimd_two_regs_misc.cpp
frontend/A32/translate/impl/barrier.cpp
frontend/A32/translate/impl/branch.cpp
frontend/A32/translate/impl/coprocessor.cpp
@ -378,7 +380,7 @@ elseif(ARCHITECTURE_Aarch64)
)
endif()
if (ANDROID)
if (UNIX)
target_sources(dynarmic PRIVATE backend/A64/exception_handler_posix.cpp)
else()
target_sources(dynarmic PRIVATE backend/A64/exception_handler_generic.cpp)

View File

@ -21,8 +21,6 @@
#include "common/cast_util.h"
#include "common/common_types.h"
#include "jni.h"
namespace Dynarmic::BackendA64 {
namespace {
@ -45,7 +43,7 @@ public:
private:
auto FindCodeBlockInfo(CodePtr PC) {
return std::find_if(code_block_infos.begin(), code_block_infos.end(),
[&](const CodeBlockInfo& x) { return x.block->GetRegion() <= PC && x.block->GetRegion() + x.block->GetRegionSize(); });
[&](const CodeBlockInfo& x) { return x.block->GetRegion() <= PC && x.block->GetRegion() + x.block->GetRegionSize() > PC; });
}
std::vector<CodeBlockInfo> code_block_infos;

View File

@ -46,7 +46,6 @@ namespace Dynarmic::Backend::X64 {
*/
u32 A32JitState::Cpsr() const {
DEBUG_ASSERT((cpsr_nzcv & ~NZCV::x64_mask) == 0);
DEBUG_ASSERT((cpsr_q & ~1) == 0);
DEBUG_ASSERT((cpsr_jaifm & ~0x010001DF) == 0);

View File

@ -54,6 +54,29 @@ std::string DisassembleX64(const void* begin, const void* end) {
return result;
}
std::string DisassembleAArch32([[maybe_unused]] u32 instruction, [[maybe_unused]] u64 pc) {
std::string result;
#ifdef DYNARMIC_USE_LLVM
LLVMInitializeARMTargetInfo();
LLVMInitializeARMTargetMC();
LLVMInitializeARMDisassembler();
LLVMDisasmContextRef llvm_ctx = LLVMCreateDisasm("armv8-arm", nullptr, 0, nullptr, nullptr);
LLVMSetDisasmOptions(llvm_ctx, LLVMDisassembler_Option_AsmPrinterVariant);
char buffer[80];
size_t inst_size = LLVMDisasmInstruction(llvm_ctx, (u8*)&instruction, sizeof(instruction), pc, buffer, sizeof(buffer));
result = inst_size > 0 ? buffer : "<invalid instruction>";
result += '\n';
LLVMDisasmDispose(llvm_ctx);
#else
result += fmt::format("(disassembly disabled)\n");
#endif
return result;
}
std::string DisassembleAArch64([[maybe_unused]] u32 instruction, [[maybe_unused]] u64 pc) {
std::string result;

View File

@ -12,6 +12,7 @@
namespace Dynarmic::Common {
std::string DisassembleX64(const void* pos, const void* end);
std::string DisassembleAArch32(u32 instruction, u64 pc = 0);
std::string DisassembleAArch64(u32 instruction, u64 pc = 0);
} // namespace Dynarmic::Common

View File

@ -2,14 +2,14 @@
//INST(asimd_VHADD, "VHADD", "1111001U0-CC--------0000---0----") // ASIMD
//INST(asimd_VQADD, "VQADD", "1111001U0-CC--------0000---1----") // ASIMD
//INST(asimd_VRHADD, "VRHADD", "1111001U0-CC--------0001---0----") // ASIMD
//INST(asimd_VAND_reg, "VAND (register)", "111100100-00--------0001---1----") // ASIMD
//INST(asimd_VBIC_reg, "VBIC (register)", "111100100-01--------0001---1----") // ASIMD
//INST(asimd_VORR_reg, "VORR (register)", "111100100-10--------0001---1----") // ASIMD
//INST(asimd_VORN_reg, "VORN (register)", "111100100-11--------0001---1----") // ASIMD
//INST(asimd_VEOR_reg, "VEOR (register)", "111100110-00--------0001---1----") // ASIMD
//INST(asimd_VBSL, "VBSL", "111100110-01--------0001---1----") // ASIMD
//INST(asimd_VBIT, "VBIT", "111100110-10--------0001---1----") // ASIMD
//INST(asimd_VBIF, "VBIF", "111100110-11--------0001---1----") // ASIMD
INST(asimd_VAND_reg, "VAND (register)", "111100100D00nnnndddd0001NQM1mmmm") // ASIMD
INST(asimd_VBIC_reg, "VBIC (register)", "111100100D01nnnndddd0001NQM1mmmm") // ASIMD
INST(asimd_VORR_reg, "VORR (register)", "111100100D10nnnndddd0001NQM1mmmm") // ASIMD
INST(asimd_VORN_reg, "VORN (register)", "111100100D11nnnndddd0001NQM1mmmm") // ASIMD
INST(asimd_VEOR_reg, "VEOR (register)", "111100110D00nnnndddd0001NQM1mmmm") // ASIMD
INST(asimd_VBSL, "VBSL", "111100110D01nnnndddd0001NQM1mmmm") // ASIMD
INST(asimd_VBIT, "VBIT", "111100110D10nnnndddd0001NQM1mmmm") // ASIMD
INST(asimd_VBIF, "VBIF", "111100110D11nnnndddd0001NQM1mmmm") // ASIMD
//INST(asimd_VHADD, "VHADD", "1111001U0-CC--------0010---0----") // ASIMD
//INST(asimd_VQSUB, "VQSUB", "1111001U0-CC--------0010---1----") // ASIMD
//INST(asimd_VCGT_reg, "VCGT (register)", "1111001U0-CC--------0011---0----") // ASIMD
@ -94,7 +94,7 @@
//INST(asimd_VCLT_zero, "VCLT (zero)", "111100111-11--01----0x100x-0----") // ASIMD
//INST(asimd_VABS, "VABS", "111100111-11--01----0x110x-0----") // ASIMD
//INST(asimd_VNEG, "VNEG", "111100111-11--01----0x111x-0----") // ASIMD
//INST(asimd_VSWP, "VSWP", "111100111-11--10----00000x-0----") // ASIMD
INST(asimd_VSWP, "VSWP", "111100111D110010dddd00000QM0mmmm") // ASIMD
//INST(asimd_VTRN, "VTRN", "111100111-11--10----00001x-0----") // ASIMD
//INST(asimd_VUZP, "VUZP", "111100111-11--10----00010x-0----") // ASIMD
//INST(asimd_VZIP, "VZIP", "111100111-11--10----00011x-0----") // ASIMD
@ -121,7 +121,7 @@
//INST(asimd_VMOV_imm, "VMOV (immediate)", "1111001a1-000bcd----11100-11efgh") // ASIMD
// Advanced SIMD load/store structures
//INST(v8_VST_multiple, "VST{1-4} (multiple)", "111101000D00nnnnddddxxxxzzaammmm") // v8
INST(v8_VST_multiple, "VST{1-4} (multiple)", "111101000D00nnnnddddxxxxzzaammmm") // v8
INST(v8_VLD_multiple, "VLD{1-4} (multiple)", "111101000D10nnnnddddxxxxzzaammmm") // v8
INST(arm_UDF, "UNALLOCATED", "111101000--0--------1011--------") // v8
INST(arm_UDF, "UNALLOCATED", "111101000--0--------11----------") // v8

View File

@ -5,114 +5,136 @@
#include "frontend/A32/translate/impl/translate_arm.h"
#include <optional>
#include <tuple>
#include "common/bit_util.h"
namespace Dynarmic::A32 {
static ExtReg ToExtRegD(size_t base, bool bit) {
return static_cast<ExtReg>(static_cast<size_t>(ExtReg::D0) + base + (bit ? 16 : 0));
namespace {
ExtReg ToExtReg(size_t base, bool bit) {
return ExtReg::D0 + (base + (bit ? 16 : 0));
}
bool ArmTranslatorVisitor::v8_VLD_multiple(bool D, Reg n, size_t Vd, Imm<4> type, size_t size, size_t align, Reg m) {
size_t nelem, regs, inc;
std::optional<std::tuple<size_t, size_t, size_t>> DecodeType(Imm<4> type, size_t size, size_t align) {
switch (type.ZeroExtend()) {
case 0b0111: // VLD1 A1
nelem = 1;
regs = 1;
inc = 0;
case 0b0111: // VST1 A1 / VLD1 A1
if (Common::Bit<1>(align)) {
return UndefinedInstruction();
return std::nullopt;
}
break;
case 0b1010: // VLD1 A2
nelem = 1;
regs = 2;
inc = 0;
return std::tuple<size_t, size_t, size_t>{1, 1, 0};
case 0b1010: // VST1 A2 / VLD1 A2
if (align == 0b11) {
return UndefinedInstruction();
return std::nullopt;
}
break;
case 0b0110: // VLD1 A3
nelem = 1;
regs = 3;
inc = 0;
return std::tuple<size_t, size_t, size_t>{1, 2, 0};
case 0b0110: // VST1 A3 / VLD1 A3
if (Common::Bit<1>(align)) {
return UndefinedInstruction();
return std::nullopt;
}
break;
case 0b0010: // VLD1 A4
nelem = 1;
regs = 4;
inc = 0;
break;
case 0b1000: // VLD2 A1
nelem = 2;
regs = 1;
inc = 1;
return std::tuple<size_t, size_t, size_t>{1, 3, 0};
case 0b0010: // VST1 A4 / VLD1 A4
return std::tuple<size_t, size_t, size_t>{1, 4, 0};
case 0b1000: // VST2 A1 / VLD2 A1
if (size == 0b11 || align == 0b11) {
return UndefinedInstruction();
return std::nullopt;
}
break;
case 0b1001: // VLD2 A1
nelem = 2;
regs = 1;
inc = 2;
return std::tuple<size_t, size_t, size_t>{2, 1, 1};
case 0b1001: // VST2 A1 / VLD2 A1
if (size == 0b11 || align == 0b11) {
return UndefinedInstruction();
return std::nullopt;
}
break;
case 0b0011: // VLD2 A2
nelem = 2;
regs = 2;
inc = 2;
return std::tuple<size_t, size_t, size_t>{2, 1, 2};
case 0b0011: // VST2 A2 / VLD2 A2
if (size == 0b11) {
return UndefinedInstruction();
return std::nullopt;
}
break;
case 0b0100: // VLD3
nelem = 3;
regs = 1;
inc = 1;
return std::tuple<size_t, size_t, size_t>{2, 2, 2};
case 0b0100: // VST3 / VLD3
if (size == 0b11 || Common::Bit<1>(align)) {
return UndefinedInstruction();
return std::nullopt;
}
break;
case 0b0101: // VLD3
nelem = 3;
regs = 1;
inc = 2;
return std::tuple<size_t, size_t, size_t>{3, 1, 1};
case 0b0101: // VST3 / VLD3
if (size == 0b11 || Common::Bit<1>(align)) {
return UndefinedInstruction();
return std::nullopt;
}
break;
case 0b0000: // VLD4
nelem = 4;
regs = 1;
inc = 1;
return std::tuple<size_t, size_t, size_t>{3, 1, 2};
case 0b0000: // VST4 / VLD4
if (size == 0b11) {
return UndefinedInstruction();
return std::nullopt;
}
break;
case 0b0001: // VLD4
nelem = 4;
regs = 1;
inc = 2;
return std::tuple<size_t, size_t, size_t>{4, 1, 1};
case 0b0001: // VST4 / VLD4
if (size == 0b11) {
return UndefinedInstruction();
return std::nullopt;
}
break;
default:
ASSERT_FALSE("Decode error");
return std::tuple<size_t, size_t, size_t>{4, 1, 2};
}
ASSERT_FALSE("Decode error");
}
} // anoynmous namespace
const ExtReg d = ToExtRegD(Vd, D);
bool ArmTranslatorVisitor::v8_VST_multiple(bool D, Reg n, size_t Vd, Imm<4> type, size_t size, size_t align, Reg m) {
const auto decoded_type = DecodeType(type, size, align);
if (!decoded_type) {
return UndefinedInstruction();
}
const auto [nelem, regs, inc] = *decoded_type;
const ExtReg d = ToExtReg(Vd, D);
const size_t d_last = RegNumber(d) + inc * (nelem - 1);
if (n == Reg::R15 || d_last + regs > 32) {
return UnpredictableInstruction();
}
[[maybe_unused]] const size_t alignment = align == 0 ? 1 : 4 << align;
const size_t ebytes = 1 << size;
const size_t ebytes = static_cast<size_t>(1) << size;
const size_t elements = 8 / ebytes;
const bool wback = m != Reg::R15;
const bool register_index = m != Reg::R15 && m != Reg::R13;
IR::U32 address = ir.GetRegister(n);
for (size_t r = 0; r < regs; r++) {
for (size_t e = 0; e < elements; e++) {
for (size_t i = 0; i < nelem; i++) {
const ExtReg ext_reg = d + i * inc + r;
const IR::U64 shifted_element = ir.LogicalShiftRight(ir.GetExtendedRegister(ext_reg), ir.Imm8(static_cast<u8>(e * ebytes * 8)));
const IR::UAny element = ir.LeastSignificant(8 * ebytes, shifted_element);
ir.WriteMemory(8 * ebytes, address, element);
address = ir.Add(address, ir.Imm32(static_cast<u32>(ebytes)));
}
}
}
if (wback) {
if (register_index) {
ir.SetRegister(n, ir.Add(ir.GetRegister(n), ir.GetRegister(m)));
} else {
ir.SetRegister(n, ir.Add(ir.GetRegister(n), ir.Imm32(static_cast<u32>(8 * nelem * regs))));
}
}
return true;
}
bool ArmTranslatorVisitor::v8_VLD_multiple(bool D, Reg n, size_t Vd, Imm<4> type, size_t size, size_t align, Reg m) {
const auto decoded_type = DecodeType(type, size, align);
if (!decoded_type) {
return UndefinedInstruction();
}
const auto [nelem, regs, inc] = *decoded_type;
const ExtReg d = ToExtReg(Vd, D);
const size_t d_last = RegNumber(d) + inc * (nelem - 1);
if (n == Reg::R15 || d_last + regs > 32) {
return UnpredictableInstruction();
}
[[maybe_unused]] const size_t alignment = align == 0 ? 1 : 4 << align;
const size_t ebytes = static_cast<size_t>(1) << size;
const size_t elements = 8 / ebytes;
const bool wback = m != Reg::R15;
@ -131,10 +153,10 @@ bool ArmTranslatorVisitor::v8_VLD_multiple(bool D, Reg n, size_t Vd, Imm<4> type
for (size_t i = 0; i < nelem; i++) {
const ExtReg ext_reg = d + i * inc + r;
const IR::U64 element = ir.ZeroExtendToLong(ir.ReadMemory(ebytes * 8, address));
const IR::U64 shifted_element = ir.LogicalShiftLeft(element, ir.Imm8(e * ebytes * 8));
const IR::U64 shifted_element = ir.LogicalShiftLeft(element, ir.Imm8(static_cast<u8>(e * ebytes * 8)));
ir.SetExtendedRegister(ext_reg, ir.Or(ir.GetExtendedRegister(ext_reg), shifted_element));
address = ir.Add(address, ir.Imm32(ebytes));
address = ir.Add(address, ir.Imm32(static_cast<u32>(ebytes)));
}
}
}
@ -143,7 +165,7 @@ bool ArmTranslatorVisitor::v8_VLD_multiple(bool D, Reg n, size_t Vd, Imm<4> type
if (register_index) {
ir.SetRegister(n, ir.Add(ir.GetRegister(n), ir.GetRegister(m)));
} else {
ir.SetRegister(n, ir.Add(ir.GetRegister(n), ir.Imm32(8 * nelem * regs)));
ir.SetRegister(n, ir.Add(ir.GetRegister(n), ir.Imm32(static_cast<u32>(8 * nelem * regs))));
}
}

View File

@ -0,0 +1,93 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include "common/bit_util.h"
#include "frontend/A32/translate/impl/translate_arm.h"
namespace Dynarmic::A32 {
namespace {
ExtReg ToExtReg(size_t base, bool bit) {
return ExtReg::D0 + (base + (bit ? 16 : 0));
}
template <bool WithDst, typename Callable>
bool BitwiseInstruction(ArmTranslatorVisitor& v, bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm, Callable fn) {
if (Q && (Common::Bit<0>(Vd) || Common::Bit<0>(Vn) || Common::Bit<0>(Vm))) {
return v.UndefinedInstruction();
}
const auto d = ToExtReg(Vd, D);
const auto m = ToExtReg(Vm, M);
const auto n = ToExtReg(Vn, N);
const size_t regs = Q ? 2 : 1;
for (size_t i = 0; i < regs; i++) {
if constexpr (WithDst) {
const IR::U32U64 reg_d = v.ir.GetExtendedRegister(d + i);
const IR::U32U64 reg_m = v.ir.GetExtendedRegister(m + i);
const IR::U32U64 reg_n = v.ir.GetExtendedRegister(n + i);
const IR::U32U64 result = fn(reg_d, reg_n, reg_m);
v.ir.SetExtendedRegister(d + i, result);
} else {
const IR::U32U64 reg_m = v.ir.GetExtendedRegister(m + i);
const IR::U32U64 reg_n = v.ir.GetExtendedRegister(n + i);
const IR::U32U64 result = fn(reg_n, reg_m);
v.ir.SetExtendedRegister(d + i, result);
}
}
return true;
}
} // Anonymous namespace
bool ArmTranslatorVisitor::asimd_VAND_reg(bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm) {
return BitwiseInstruction<false>(*this, D, Vn, Vd, N, Q, M, Vm, [this](const auto& reg_n, const auto& reg_m) {
return ir.And(reg_n, reg_m);
});
}
bool ArmTranslatorVisitor::asimd_VBIC_reg(bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm) {
return BitwiseInstruction<false>(*this, D, Vn, Vd, N, Q, M, Vm, [this](const auto& reg_n, const auto& reg_m) {
return ir.And(reg_n, ir.Not(reg_m));
});
}
bool ArmTranslatorVisitor::asimd_VORR_reg(bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm) {
return BitwiseInstruction<false>(*this, D, Vn, Vd, N, Q, M, Vm, [this](const auto& reg_n, const auto& reg_m) {
return ir.Or(reg_n, reg_m);
});
}
bool ArmTranslatorVisitor::asimd_VORN_reg(bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm) {
return BitwiseInstruction<false>(*this, D, Vn, Vd, N, Q, M, Vm, [this](const auto& reg_n, const auto& reg_m) {
return ir.Or(reg_n, ir.Not(reg_m));
});
}
bool ArmTranslatorVisitor::asimd_VEOR_reg(bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm) {
return BitwiseInstruction<false>(*this, D, Vn, Vd, N, Q, M, Vm, [this](const auto& reg_n, const auto& reg_m) {
return ir.Eor(reg_n, reg_m);
});
}
bool ArmTranslatorVisitor::asimd_VBSL(bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm) {
return BitwiseInstruction<true>(*this, D, Vn, Vd, N, Q, M, Vm, [this](const auto& reg_d, const auto& reg_n, const auto& reg_m) {
return ir.Or(ir.And(reg_n, reg_d), ir.And(reg_m, ir.Not(reg_d)));
});
}
bool ArmTranslatorVisitor::asimd_VBIT(bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm) {
return BitwiseInstruction<true>(*this, D, Vn, Vd, N, Q, M, Vm, [this](const auto& reg_d, const auto& reg_n, const auto& reg_m) {
return ir.Or(ir.And(reg_n, reg_m), ir.And(reg_d, ir.Not(reg_m)));
});
}
bool ArmTranslatorVisitor::asimd_VBIF(bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm) {
return BitwiseInstruction<true>(*this, D, Vn, Vd, N, Q, M, Vm, [this](const auto& reg_d, const auto& reg_n, const auto& reg_m) {
return ir.Or(ir.And(reg_d, reg_m), ir.And(reg_n, ir.Not(reg_m)));
});
}
} // namespace Dynarmic::A32

View File

@ -0,0 +1,43 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include "common/bit_util.h"
#include "frontend/A32/translate/impl/translate_arm.h"
namespace Dynarmic::A32 {
namespace {
ExtReg ToExtRegD(size_t base, bool bit) {
return ExtReg::D0 + (base + (bit ? 16 : 0));
}
} // Anonymous namespace
bool ArmTranslatorVisitor::asimd_VSWP(bool D, size_t Vd, bool Q, bool M, size_t Vm) {
if (Q && (Common::Bit<0>(Vd) || Common::Bit<0>(Vm))) {
return UndefinedInstruction();
}
// Swapping the same register results in the same contents.
const auto d = ToExtRegD(Vd, D);
const auto m = ToExtRegD(Vm, M);
if (d == m) {
return true;
}
const size_t regs = Q ? 2 : 1;
for (size_t i = 0; i < regs; i++) {
const auto d_index = d + i;
const auto m_index = m + i;
const auto reg_d = ir.GetExtendedRegister(d_index);
const auto reg_m = ir.GetExtendedRegister(m_index);
ir.SetExtendedRegister(m_index, reg_d);
ir.SetExtendedRegister(d_index, reg_m);
}
return true;
}
} // namespace Dynarmic::A32

View File

@ -429,7 +429,21 @@ struct ArmTranslatorVisitor final {
bool vfp_VLDM_a1(Cond cond, bool p, bool u, bool D, bool w, Reg n, size_t Vd, Imm<8> imm8);
bool vfp_VLDM_a2(Cond cond, bool p, bool u, bool D, bool w, Reg n, size_t Vd, Imm<8> imm8);
// Advanced SIMD three register variants
bool asimd_VAND_reg(bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm);
bool asimd_VBIC_reg(bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm);
bool asimd_VORR_reg(bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm);
bool asimd_VORN_reg(bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm);
bool asimd_VEOR_reg(bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm);
bool asimd_VBSL(bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm);
bool asimd_VBIT(bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm);
bool asimd_VBIF(bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm);
// Advanced SIMD two register, miscellaneous
bool asimd_VSWP(bool D, size_t Vd, bool Q, bool M, size_t Vm);
// Advanced SIMD load/store structures
bool v8_VST_multiple(bool D, Reg n, size_t Vd, Imm<4> type, size_t sz, size_t align, Reg m);
bool v8_VLD_multiple(bool D, Reg n, size_t Vd, Imm<4> type, size_t sz, size_t align, Reg m);
};

View File

@ -41,14 +41,26 @@ U128 IREmitter::Pack2x64To1x128(const U64& lo, const U64& hi) {
return Inst<U128>(Opcode::Pack2x64To1x128, lo, hi);
}
U32 IREmitter::LeastSignificantWord(const U64& value) {
return Inst<U32>(Opcode::LeastSignificantWord, value);
UAny IREmitter::LeastSignificant(size_t bitsize, const U32U64& value) {
switch (bitsize) {
case 8:
return LeastSignificantByte(value);
case 16:
return LeastSignificantHalf(value);
case 32:
if (value.GetType() == Type::U32) {
return value;
}
return LeastSignificantWord(value);
case 64:
ASSERT(value.GetType() == Type::U64);
return value;
}
ASSERT_FALSE("Invalid bitsize");
}
ResultAndCarry<U32> IREmitter::MostSignificantWord(const U64& value) {
const auto result = Inst<U32>(Opcode::MostSignificantWord, value);
const auto carry_out = Inst<U1>(Opcode::GetCarryFromOp, result);
return {result, carry_out};
U32 IREmitter::LeastSignificantWord(const U64& value) {
return Inst<U32>(Opcode::LeastSignificantWord, value);
}
U16 IREmitter::LeastSignificantHalf(U32U64 value) {
@ -65,6 +77,12 @@ U8 IREmitter::LeastSignificantByte(U32U64 value) {
return Inst<U8>(Opcode::LeastSignificantByte, value);
}
ResultAndCarry<U32> IREmitter::MostSignificantWord(const U64& value) {
const auto result = Inst<U32>(Opcode::MostSignificantWord, value);
const auto carry_out = Inst<U1>(Opcode::GetCarryFromOp, result);
return {result, carry_out};
}
U1 IREmitter::MostSignificantBit(const U32& value) {
return Inst<U1>(Opcode::MostSignificantBit, value);
}

View File

@ -87,10 +87,11 @@ public:
U64 Pack2x32To1x64(const U32& lo, const U32& hi);
U128 Pack2x64To1x128(const U64& lo, const U64& hi);
UAny LeastSignificant(size_t bitsize, const U32U64& value);
U32 LeastSignificantWord(const U64& value);
ResultAndCarry<U32> MostSignificantWord(const U64& value);
U16 LeastSignificantHalf(U32U64 value);
U8 LeastSignificantByte(U32U64 value);
ResultAndCarry<U32> MostSignificantWord(const U64& value);
U1 MostSignificantBit(const U32& value);
U1 IsZero(const U32& value);
U1 IsZero(const U64& value);

View File

@ -4,7 +4,6 @@
*/
#include <array>
#include <map>
#include <ostream>
#include <string>
#include <vector>
@ -44,34 +43,32 @@ constexpr Type NZCV = Type::NZCVFlags;
constexpr Type Cond = Type::Cond;
constexpr Type Table = Type::Table;
static const std::map<Opcode, Meta> opcode_info {{
#define OPCODE(name, type, ...) { Opcode::name, { #name, type, { __VA_ARGS__ } } },
#define A32OPC(name, type, ...) { Opcode::A32##name, { #name, type, { __VA_ARGS__ } } },
#define A64OPC(name, type, ...) { Opcode::A64##name, { #name, type, { __VA_ARGS__ } } },
static const std::array opcode_info {
#define OPCODE(name, type, ...) Meta{#name, type, {__VA_ARGS__}},
#define A32OPC(name, type, ...) Meta{#name, type, {__VA_ARGS__}},
#define A64OPC(name, type, ...) Meta{#name, type, {__VA_ARGS__}},
#include "opcodes.inc"
#undef OPCODE
#undef A32OPC
#undef A64OPC
}};
};
} // namespace OpcodeInfo
Type GetTypeOf(Opcode op) {
return OpcodeInfo::opcode_info.at(op).type;
return OpcodeInfo::opcode_info.at(static_cast<size_t>(op)).type;
}
size_t GetNumArgsOf(Opcode op) {
return OpcodeInfo::opcode_info.at(op).arg_types.size();
return OpcodeInfo::opcode_info.at(static_cast<size_t>(op)).arg_types.size();
}
Type GetArgTypeOf(Opcode op, size_t arg_index) {
return OpcodeInfo::opcode_info.at(op).arg_types.at(arg_index);
return OpcodeInfo::opcode_info.at(static_cast<size_t>(op)).arg_types.at(arg_index);
}
std::string GetNameOf(Opcode op) {
if (OpcodeInfo::opcode_info.count(op) == 0)
return fmt::format("Unknown Opcode {}", static_cast<Opcode>(op));
return OpcodeInfo::opcode_info.at(op).name;
return OpcodeInfo::opcode_info.at(static_cast<size_t>(op)).name;
}
std::ostream& operator<<(std::ostream& o, Opcode opcode) {

View File

@ -95,9 +95,9 @@ OPCODE(NZCVFromPackedFlags, NZCV, U32
OPCODE(Pack2x32To1x64, U64, U32, U32 )
OPCODE(Pack2x64To1x128, U128, U64, U64 )
OPCODE(LeastSignificantWord, U32, U64 )
OPCODE(MostSignificantWord, U32, U64 )
OPCODE(LeastSignificantHalf, U16, U32 )
OPCODE(LeastSignificantByte, U8, U32 )
OPCODE(MostSignificantWord, U32, U64 )
OPCODE(MostSignificantBit, U1, U32 )
OPCODE(IsZero32, U1, U32 )
OPCODE(IsZero64, U1, U64 )

View File

@ -18,6 +18,8 @@
#include "common/common_types.h"
#include "common/llvm_disassemble.h"
#include "frontend/A32/decoder/arm.h"
#include "frontend/A32/decoder/asimd.h"
#include "frontend/A32/decoder/vfp.h"
#include "frontend/A32/location_descriptor.h"
#include "frontend/A32/translate/impl/translate_arm.h"
#include "frontend/A32/translate/translate.h"
@ -34,7 +36,11 @@
using namespace Dynarmic;
const char* GetNameOfA32Instruction(u32 instruction) {
if (auto decoder = A32::DecodeArm<A32::ArmTranslatorVisitor>(instruction)) {
if (auto vfp_decoder = A32::DecodeVFP<A32::ArmTranslatorVisitor>(instruction)) {
return vfp_decoder->get().GetName();
} else if (auto asimd_decoder = A32::DecodeASIMD<A32::ArmTranslatorVisitor>(instruction)) {
return asimd_decoder->get().GetName();
} else if (auto decoder = A32::DecodeArm<A32::ArmTranslatorVisitor>(instruction)) {
return decoder->get().GetName();
}
return "<null>";
@ -48,7 +54,7 @@ const char* GetNameOfA64Instruction(u32 instruction) {
}
void PrintA32Instruction(u32 instruction) {
fmt::print("{:08x} {}\n", instruction, A32::DisassembleArm(instruction));
fmt::print("{:08x} {}\n", instruction, Common::DisassembleAArch32(instruction));
fmt::print("Name: {}\n", GetNameOfA32Instruction(instruction));
const A32::LocationDescriptor location{0, {}, {}};