backend/A64: Use ASSERT_FALSE where possible

This commit is contained in:
SachinVin 2020-05-16 23:34:32 +05:30
parent 1b9d22bfee
commit d0c69355fb
3 changed files with 23 additions and 23 deletions

View File

@ -52,7 +52,7 @@ static size_t MJitStateExtReg(A32::ExtReg reg) {
size_t index = static_cast<size_t>(reg) - static_cast<size_t>(A32::ExtReg::D0);
return offsetof(A32JitState, ExtReg) + sizeof(u64) * index;
}
ASSERT_MSG(false, "Should never happen.");
ASSERT_FALSE("Should never happen.");
}
A32EmitContext::A32EmitContext(RegAlloc& reg_alloc, IR::Block& block) : EmitContext(reg_alloc, block) {}
@ -129,7 +129,7 @@ A32EmitA64::BlockDescriptor A32EmitA64::Emit(IR::Block& block) {
#undef A64OPC
default:
ASSERT_MSG(false, "Invalid opcode: {}", inst->GetOpcode());
ASSERT_FALSE("Invalid opcode: {}", inst->GetOpcode());
break;
}
@ -850,7 +850,7 @@ void A32EmitA64::ReadMemory(A32EmitContext& ctx, IR::Inst* inst, const CodePtr c
code.LDR(result, result, vaddr);
break;
default:
ASSERT_MSG(false, "Invalid bit_size");
ASSERT_FALSE("Invalid bit_size");
break;
}
end = code.B();
@ -876,7 +876,7 @@ void A32EmitA64::ReadMemory(A32EmitContext& ctx, IR::Inst* inst, const CodePtr c
code.LDR(result, X27, vaddr);
break;
default:
ASSERT_MSG(false, "Invalid bit_size");
ASSERT_FALSE("Invalid bit_size");
break;
}
@ -964,7 +964,7 @@ void A32EmitA64::WriteMemory(A32EmitContext& ctx, IR::Inst* inst, const CodePtr
code.STR(value, addr, vaddr);
break;
default:
ASSERT_MSG(false, "Invalid bit_size");
ASSERT_FALSE("Invalid bit_size");
break;
}
end = code.B();
@ -988,7 +988,7 @@ void A32EmitA64::WriteMemory(A32EmitContext& ctx, IR::Inst* inst, const CodePtr
code.STR(value, X27, vaddr);
break;
default:
ASSERT_MSG(false, "Invalid bit_size");
ASSERT_FALSE("Invalid bit_size");
break;
}
@ -1116,7 +1116,7 @@ void A32EmitA64::EmitA32ExclusiveWriteMemory64(A32EmitContext& ctx, IR::Inst* in
}
static void EmitCoprocessorException() {
ASSERT_MSG(false, "Should raise coproc exception here");
ASSERT_FALSE("Should raise coproc exception here");
}
static void CallCoprocCallback(BlockOfCode& code, RegAlloc& reg_alloc, A32::Jit* jit_interface, A32::Coprocessor::Callback callback,
@ -1194,7 +1194,7 @@ void A32EmitA64::EmitA32CoprocSendOneWord(A32EmitContext& ctx, IR::Inst* inst) {
return;
}
default:
ASSERT_MSG(false, "Unreachable");
UNREACHABLE();
}
}
@ -1236,7 +1236,7 @@ void A32EmitA64::EmitA32CoprocSendTwoWords(A32EmitContext& ctx, IR::Inst* inst)
return;
}
default:
ASSERT_MSG(false, "Unreachable");
UNREACHABLE();
}
}
@ -1277,7 +1277,7 @@ void A32EmitA64::EmitA32CoprocGetOneWord(A32EmitContext& ctx, IR::Inst* inst) {
return;
}
default:
ASSERT_MSG(false, "Unreachable");
UNREACHABLE();
}
}
@ -1320,7 +1320,7 @@ void A32EmitA64::EmitA32CoprocGetTwoWords(A32EmitContext& ctx, IR::Inst* inst) {
return;
}
default:
ASSERT_MSG(false, "Unreachable");
UNREACHABLE();
}
}

View File

@ -94,23 +94,23 @@ void EmitA64::EmitPushRSB(EmitContext& ctx, IR::Inst* inst) {
}
void EmitA64::EmitGetCarryFromOp(EmitContext&, IR::Inst*) {
ASSERT_MSG(false, "should never happen");
ASSERT_FALSE("should never happen");
}
void EmitA64::EmitGetOverflowFromOp(EmitContext&, IR::Inst*) {
ASSERT_MSG(false, "should never happen");
ASSERT_FALSE("should never happen");
}
void EmitA64::EmitGetGEFromOp(EmitContext&, IR::Inst*) {
ASSERT_MSG(false, "should never happen");
ASSERT_FALSE("should never happen");
}
void EmitA64::EmitGetUpperFromOp(EmitContext&, IR::Inst*) {
ASSERT_MSG(false, "should never happen");
ASSERT_FALSE("should never happen");
}
void EmitA64::EmitGetLowerFromOp(EmitContext&, IR::Inst*) {
ASSERT_MSG(false, "should never happen");
ASSERT_FALSE("should never happen");
}
void EmitA64::EmitGetNZCVFromOp(EmitContext& ctx, IR::Inst* inst) {

View File

@ -29,7 +29,7 @@ static u64 ImmediateToU64(const IR::Value& imm) {
case IR::Type::U64:
return u64(imm.GetU64());
default:
ASSERT_MSG(false, "This should never happen.");
ASSERT_FALSE("This should never happen.");
}
}
@ -48,10 +48,10 @@ static size_t GetBitWidth(IR::Type type) {
case IR::Type::Cond:
case IR::Type::Void:
case IR::Type::Table:
ASSERT_MSG(false, "Type {} cannot be represented at runtime", type);
ASSERT_FALSE("Type {} cannot be represented at runtime", type);
return 0;
case IR::Type::Opaque:
ASSERT_MSG(false, "Not a concrete type");
ASSERT_FALSE("Not a concrete type");
return 0;
case IR::Type::U1:
return 8;
@ -567,7 +567,7 @@ HostLoc RegAlloc::FindFreeSpill() const {
return loc;
}
ASSERT_MSG(false, "All spill locations are full");
ASSERT_FALSE("All spill locations are full");
}
HostLocInfo& RegAlloc::LocInfo(HostLoc loc) {
@ -630,7 +630,7 @@ void RegAlloc::EmitMove(size_t bit_width, HostLoc to, HostLoc from) {
code.STR(Arm64Gen::INDEX_UNSIGNED, DecodeReg(HostLocToReg64(from)), Arm64Gen::X28, spill_to_addr(to));
}
} else {
ASSERT_MSG(false, "Invalid RegAlloc::EmitMove");
ASSERT_FALSE("Invalid RegAlloc::EmitMove");
}
}
@ -641,9 +641,9 @@ void RegAlloc::EmitExchange(HostLoc a, HostLoc b) {
code.EOR(HostLocToReg64(b), HostLocToReg64(a), HostLocToReg64(b));
code.EOR(HostLocToReg64(a), HostLocToReg64(a), HostLocToReg64(b));
} else if (HostLocIsFPR(a) && HostLocIsFPR(b)) {
ASSERT_MSG(false, "Check your code: Exchanging XMM registers is unnecessary");
ASSERT_FALSE("Check your code: Exchanging XMM registers is unnecessary");
} else {
ASSERT_MSG(false, "Invalid RegAlloc::EmitExchange");
ASSERT_FALSE("Invalid RegAlloc::EmitExchange");
}
}