diff --git a/src/backend/A64/a32_emit_a64.cpp b/src/backend/A64/a32_emit_a64.cpp index 11bd547d..0284066c 100644 --- a/src/backend/A64/a32_emit_a64.cpp +++ b/src/backend/A64/a32_emit_a64.cpp @@ -52,7 +52,7 @@ static size_t MJitStateExtReg(A32::ExtReg reg) { size_t index = static_cast(reg) - static_cast(A32::ExtReg::D0); return offsetof(A32JitState, ExtReg) + sizeof(u64) * index; } - ASSERT_MSG(false, "Should never happen."); + ASSERT_FALSE("Should never happen."); } A32EmitContext::A32EmitContext(RegAlloc& reg_alloc, IR::Block& block) : EmitContext(reg_alloc, block) {} @@ -129,7 +129,7 @@ A32EmitA64::BlockDescriptor A32EmitA64::Emit(IR::Block& block) { #undef A64OPC default: - ASSERT_MSG(false, "Invalid opcode: {}", inst->GetOpcode()); + ASSERT_FALSE("Invalid opcode: {}", inst->GetOpcode()); break; } @@ -850,7 +850,7 @@ void A32EmitA64::ReadMemory(A32EmitContext& ctx, IR::Inst* inst, const CodePtr c code.LDR(result, result, vaddr); break; default: - ASSERT_MSG(false, "Invalid bit_size"); + ASSERT_FALSE("Invalid bit_size"); break; } end = code.B(); @@ -876,7 +876,7 @@ void A32EmitA64::ReadMemory(A32EmitContext& ctx, IR::Inst* inst, const CodePtr c code.LDR(result, X27, vaddr); break; default: - ASSERT_MSG(false, "Invalid bit_size"); + ASSERT_FALSE("Invalid bit_size"); break; } @@ -964,7 +964,7 @@ void A32EmitA64::WriteMemory(A32EmitContext& ctx, IR::Inst* inst, const CodePtr code.STR(value, addr, vaddr); break; default: - ASSERT_MSG(false, "Invalid bit_size"); + ASSERT_FALSE("Invalid bit_size"); break; } end = code.B(); @@ -988,7 +988,7 @@ void A32EmitA64::WriteMemory(A32EmitContext& ctx, IR::Inst* inst, const CodePtr code.STR(value, X27, vaddr); break; default: - ASSERT_MSG(false, "Invalid bit_size"); + ASSERT_FALSE("Invalid bit_size"); break; } @@ -1116,7 +1116,7 @@ void A32EmitA64::EmitA32ExclusiveWriteMemory64(A32EmitContext& ctx, IR::Inst* in } static void EmitCoprocessorException() { - ASSERT_MSG(false, "Should raise coproc exception here"); + ASSERT_FALSE("Should raise coproc exception here"); } static void CallCoprocCallback(BlockOfCode& code, RegAlloc& reg_alloc, A32::Jit* jit_interface, A32::Coprocessor::Callback callback, @@ -1194,7 +1194,7 @@ void A32EmitA64::EmitA32CoprocSendOneWord(A32EmitContext& ctx, IR::Inst* inst) { return; } default: - ASSERT_MSG(false, "Unreachable"); + UNREACHABLE(); } } @@ -1236,7 +1236,7 @@ void A32EmitA64::EmitA32CoprocSendTwoWords(A32EmitContext& ctx, IR::Inst* inst) return; } default: - ASSERT_MSG(false, "Unreachable"); + UNREACHABLE(); } } @@ -1277,7 +1277,7 @@ void A32EmitA64::EmitA32CoprocGetOneWord(A32EmitContext& ctx, IR::Inst* inst) { return; } default: - ASSERT_MSG(false, "Unreachable"); + UNREACHABLE(); } } @@ -1320,7 +1320,7 @@ void A32EmitA64::EmitA32CoprocGetTwoWords(A32EmitContext& ctx, IR::Inst* inst) { return; } default: - ASSERT_MSG(false, "Unreachable"); + UNREACHABLE(); } } diff --git a/src/backend/A64/emit_a64.cpp b/src/backend/A64/emit_a64.cpp index 604ab060..96da14f0 100644 --- a/src/backend/A64/emit_a64.cpp +++ b/src/backend/A64/emit_a64.cpp @@ -94,23 +94,23 @@ void EmitA64::EmitPushRSB(EmitContext& ctx, IR::Inst* inst) { } void EmitA64::EmitGetCarryFromOp(EmitContext&, IR::Inst*) { - ASSERT_MSG(false, "should never happen"); + ASSERT_FALSE("should never happen"); } void EmitA64::EmitGetOverflowFromOp(EmitContext&, IR::Inst*) { - ASSERT_MSG(false, "should never happen"); + ASSERT_FALSE("should never happen"); } void EmitA64::EmitGetGEFromOp(EmitContext&, IR::Inst*) { - ASSERT_MSG(false, "should never happen"); + ASSERT_FALSE("should never happen"); } void EmitA64::EmitGetUpperFromOp(EmitContext&, IR::Inst*) { - ASSERT_MSG(false, "should never happen"); + ASSERT_FALSE("should never happen"); } void EmitA64::EmitGetLowerFromOp(EmitContext&, IR::Inst*) { - ASSERT_MSG(false, "should never happen"); + ASSERT_FALSE("should never happen"); } void EmitA64::EmitGetNZCVFromOp(EmitContext& ctx, IR::Inst* inst) { diff --git a/src/backend/A64/reg_alloc.cpp b/src/backend/A64/reg_alloc.cpp index 0efb989c..353eecac 100644 --- a/src/backend/A64/reg_alloc.cpp +++ b/src/backend/A64/reg_alloc.cpp @@ -29,7 +29,7 @@ static u64 ImmediateToU64(const IR::Value& imm) { case IR::Type::U64: return u64(imm.GetU64()); default: - ASSERT_MSG(false, "This should never happen."); + ASSERT_FALSE("This should never happen."); } } @@ -48,10 +48,10 @@ static size_t GetBitWidth(IR::Type type) { case IR::Type::Cond: case IR::Type::Void: case IR::Type::Table: - ASSERT_MSG(false, "Type {} cannot be represented at runtime", type); + ASSERT_FALSE("Type {} cannot be represented at runtime", type); return 0; case IR::Type::Opaque: - ASSERT_MSG(false, "Not a concrete type"); + ASSERT_FALSE("Not a concrete type"); return 0; case IR::Type::U1: return 8; @@ -567,7 +567,7 @@ HostLoc RegAlloc::FindFreeSpill() const { return loc; } - ASSERT_MSG(false, "All spill locations are full"); + ASSERT_FALSE("All spill locations are full"); } HostLocInfo& RegAlloc::LocInfo(HostLoc loc) { @@ -630,7 +630,7 @@ void RegAlloc::EmitMove(size_t bit_width, HostLoc to, HostLoc from) { code.STR(Arm64Gen::INDEX_UNSIGNED, DecodeReg(HostLocToReg64(from)), Arm64Gen::X28, spill_to_addr(to)); } } else { - ASSERT_MSG(false, "Invalid RegAlloc::EmitMove"); + ASSERT_FALSE("Invalid RegAlloc::EmitMove"); } } @@ -641,9 +641,9 @@ void RegAlloc::EmitExchange(HostLoc a, HostLoc b) { code.EOR(HostLocToReg64(b), HostLocToReg64(a), HostLocToReg64(b)); code.EOR(HostLocToReg64(a), HostLocToReg64(a), HostLocToReg64(b)); } else if (HostLocIsFPR(a) && HostLocIsFPR(b)) { - ASSERT_MSG(false, "Check your code: Exchanging XMM registers is unnecessary"); + ASSERT_FALSE("Check your code: Exchanging XMM registers is unnecessary"); } else { - ASSERT_MSG(false, "Invalid RegAlloc::EmitExchange"); + ASSERT_FALSE("Invalid RegAlloc::EmitExchange"); } }