/* This file is part of the dynarmic project. * Copyright (c) 2016 MerryMage * SPDX-License-Identifier: 0BSD */ #include #include #include #include #include #include #include #include #include #include #include #include #include "backend/x64/abi.h" #include "backend/x64/block_of_code.h" #include "backend/x64/emit_x64.h" #include "common/assert.h" #include "common/fp/fpcr.h" #include "common/fp/info.h" #include "common/fp/op.h" #include "common/fp/util.h" #include "common/lut_from_list.h" #include "frontend/ir/basic_block.h" #include "frontend/ir/microinstruction.h" namespace Dynarmic::Backend::X64 { using namespace Xbyak::util; namespace { #define FCODE(NAME) \ [&code](auto... args){ \ if constexpr (fsize == 32) { \ code.NAME##s(args...); \ } else { \ code.NAME##d(args...); \ } \ } #define ICODE(NAME) \ [&code](auto... args){ \ if constexpr (fsize == 32) { \ code.NAME##d(args...); \ } else { \ code.NAME##q(args...); \ } \ } template void MaybeStandardFPSCRValue(BlockOfCode& code, EmitContext& ctx, bool fpcr_controlled, Lambda lambda) { const bool switch_mxcsr = ctx.FPCR(fpcr_controlled) != ctx.FPCR(); if (switch_mxcsr) { code.EnterStandardASIMD(); lambda(); code.LeaveStandardASIMD(); } else { lambda(); } } template class Indexer, size_t narg> struct NaNHandler { public: using FPT = mp::unsigned_integer_of_size; using function_type = void(*)(std::array, narg>&, FP::FPCR); static function_type GetDefault() { return GetDefaultImpl(std::make_index_sequence{}); } private: template static function_type GetDefaultImpl(std::index_sequence) { const auto result = [](std::array, narg>& values, FP::FPCR) { VectorArray& result = values[0]; for (size_t elementi = 0; elementi < result.size(); ++elementi) { const auto current_values = Indexer{}(elementi, values[argi + 1]...); if (auto r = FP::ProcessNaNs(std::get(current_values)...)) { result[elementi] = *r; } else if (FP::IsNaN(result[elementi])) { result[elementi] = FP::FPInfo::DefaultNaN(); } } }; return static_cast(result); } }; template void HandleNaNs(BlockOfCode& code, EmitContext& ctx, bool fpcr_controlled, std::array xmms, const Xbyak::Xmm& nan_mask, NaNHandler nan_handler) { static_assert(fsize == 32 || fsize == 64, "fsize must be either 32 or 64"); if (code.HasSSE41()) { code.ptest(nan_mask, nan_mask); } else { const Xbyak::Reg32 bitmask = ctx.reg_alloc.ScratchGpr().cvt32(); code.movmskps(bitmask, nan_mask); code.cmp(bitmask, 0); } Xbyak::Label end; Xbyak::Label nan; code.jnz(nan, code.T_NEAR); code.L(end); code.SwitchToFarCode(); code.L(nan); const Xbyak::Xmm result = xmms[0]; code.sub(rsp, 8); ABI_PushCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx())); const size_t stack_space = xmms.size() * 16; code.sub(rsp, stack_space + ABI_SHADOW_SPACE); for (size_t i = 0; i < xmms.size(); ++i) { code.movaps(xword[rsp + ABI_SHADOW_SPACE + i * 16], xmms[i]); } code.lea(code.ABI_PARAM1, ptr[rsp + ABI_SHADOW_SPACE + 0 * 16]); code.mov(code.ABI_PARAM2, ctx.FPCR(fpcr_controlled).Value()); code.CallFunction(nan_handler); code.movaps(result, xword[rsp + ABI_SHADOW_SPACE + 0 * 16]); code.add(rsp, stack_space + ABI_SHADOW_SPACE); ABI_PopCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx())); code.add(rsp, 8); code.jmp(end, code.T_NEAR); code.SwitchToNearCode(); } template Xbyak::Address GetVectorOf(BlockOfCode& code, u64 value) { if constexpr (fsize == 32) { return code.MConst(xword, (value << 32) | value, (value << 32) | value); } else { return code.MConst(xword, value, value); } } template Xbyak::Address GetVectorOf(BlockOfCode& code) { if constexpr (fsize == 32) { return code.MConst(xword, (value << 32) | value, (value << 32) | value); } else { return code.MConst(xword, value, value); } } template Xbyak::Address GetNaNVector(BlockOfCode& code) { using FPT = mp::unsigned_integer_of_size; return GetVectorOf::DefaultNaN()>(code); } template Xbyak::Address GetNegativeZeroVector(BlockOfCode& code) { using FPT = mp::unsigned_integer_of_size; return GetVectorOf::Zero(true)>(code); } template Xbyak::Address GetSmallestNormalVector(BlockOfCode& code) { using FPT = mp::unsigned_integer_of_size; constexpr FPT smallest_normal_number = FP::FPValue::exponent_min, 1>(); return GetVectorOf(code); } template value> Xbyak::Address GetVectorOf(BlockOfCode& code) { using FPT = mp::unsigned_integer_of_size; return GetVectorOf()>(code); } template void ForceToDefaultNaN(BlockOfCode& code, FP::FPCR fpcr, Xbyak::Xmm result) { if (fpcr.DN()) { const Xbyak::Xmm nan_mask = xmm0; if (code.HasAVX()) { FCODE(vcmpunordp)(nan_mask, result, result); FCODE(blendvp)(result, GetNaNVector(code)); } else { code.movaps(nan_mask, result); FCODE(cmpordp)(nan_mask, nan_mask); code.andps(result, nan_mask); code.andnps(nan_mask, GetNaNVector(code)); code.orps(result, nan_mask); } } } template void ZeroIfNaN(BlockOfCode& code, Xbyak::Xmm result) { const Xbyak::Xmm nan_mask = xmm0; if (code.HasAVX()) { FCODE(vcmpordp)(nan_mask, result, result); FCODE(vandp)(result, result, nan_mask); } else { code.movaps(nan_mask, result); FCODE(cmpordp)(nan_mask, nan_mask); code.andps(result, nan_mask); } } template void DenormalsAreZero(BlockOfCode& code, FP::FPCR fpcr, std::initializer_list to_daz, Xbyak::Xmm tmp) { if (fpcr.FZ()) { if (fpcr.RMode() != FP::RoundingMode::TowardsMinusInfinity) { code.movaps(tmp, GetNegativeZeroVector(code)); } else { code.xorps(tmp, tmp); } for (const Xbyak::Xmm& xmm : to_daz) { FCODE(addp)(xmm, tmp); } } } template struct DefaultIndexer { std::tuple operator()(size_t i, const VectorArray& a) { return std::make_tuple(a[i]); } std::tuple operator()(size_t i, const VectorArray& a, const VectorArray& b) { return std::make_tuple(a[i], b[i]); } std::tuple operator()(size_t i, const VectorArray& a, const VectorArray& b, const VectorArray& c) { return std::make_tuple(a[i], b[i], c[i]); } }; template struct PairedIndexer { std::tuple operator()(size_t i, const VectorArray& a, const VectorArray& b) { constexpr size_t halfway = std::tuple_size_v> / 2; const size_t which_array = i / halfway; i %= halfway; switch (which_array) { case 0: return std::make_tuple(a[2 * i], a[2 * i + 1]); case 1: return std::make_tuple(b[2 * i], b[2 * i + 1]); } UNREACHABLE(); } }; template struct PairedLowerIndexer { std::tuple operator()(size_t i, const VectorArray& a, const VectorArray& b) { constexpr size_t array_size = std::tuple_size_v>; if constexpr (array_size == 4) { switch (i) { case 0: return std::make_tuple(a[0], a[1]); case 1: return std::make_tuple(b[0], b[1]); default: return std::make_tuple(0, 0); } } else if constexpr (array_size == 2) { if (i == 0) { return std::make_tuple(a[0], b[0]); } return std::make_tuple(0, 0); } else { UNREACHABLE(); } } }; template class Indexer, size_t fpcr_controlled_arg_index = 1, typename Function> void EmitTwoOpVectorOperation(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, Function fn, typename NaNHandler::function_type nan_handler = NaNHandler::GetDefault()) { static_assert(fsize == 32 || fsize == 64, "fsize must be either 32 or 64"); auto args = ctx.reg_alloc.GetArgumentInfo(inst); const bool fpcr_controlled = args[fpcr_controlled_arg_index].GetImmediateU1(); if (ctx.FPCR(fpcr_controlled).DN()) { Xbyak::Xmm result; if constexpr (std::is_member_function_pointer_v) { result = ctx.reg_alloc.UseScratchXmm(args[0]); MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&]{ (code.*fn)(result); }); } else { const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseXmm(args[0]); result = ctx.reg_alloc.ScratchXmm(); MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&]{ fn(result, xmm_a); }); } ForceToDefaultNaN(code, ctx.FPCR(fpcr_controlled), result); ctx.reg_alloc.DefineValue(inst, result); return; } const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(); const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseXmm(args[0]); const Xbyak::Xmm nan_mask = ctx.reg_alloc.ScratchXmm(); if constexpr (std::is_member_function_pointer_v) { code.movaps(result, xmm_a); (code.*fn)(result); } else { fn(result, xmm_a); } if (code.HasAVX()) { FCODE(vcmpunordp)(nan_mask, result, result); } else { code.movaps(nan_mask, result); FCODE(cmpunordp)(nan_mask, nan_mask); } HandleNaNs(code, ctx, fpcr_controlled, {result, xmm_a}, nan_mask, nan_handler); ctx.reg_alloc.DefineValue(inst, result); } template class Indexer, typename Function> void EmitThreeOpVectorOperation(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, Function fn, typename NaNHandler::function_type nan_handler = NaNHandler::GetDefault()) { static_assert(fsize == 32 || fsize == 64, "fsize must be either 32 or 64"); auto args = ctx.reg_alloc.GetArgumentInfo(inst); const bool fpcr_controlled = args[2].GetImmediateU1(); if (ctx.FPCR(fpcr_controlled).DN()) { const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]); const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]); if constexpr (std::is_member_function_pointer_v) { MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&]{ (code.*fn)(xmm_a, xmm_b); }); } else { MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&]{ fn(xmm_a, xmm_b); }); } ForceToDefaultNaN(code, ctx.FPCR(fpcr_controlled), xmm_a); ctx.reg_alloc.DefineValue(inst, xmm_a); return; } const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(); const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseXmm(args[0]); const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]); const Xbyak::Xmm nan_mask = ctx.reg_alloc.ScratchXmm(); code.movaps(nan_mask, xmm_b); code.movaps(result, xmm_a); FCODE(cmpunordp)(nan_mask, xmm_a); if constexpr (std::is_member_function_pointer_v) { (code.*fn)(result, xmm_b); } else { fn(result, xmm_b); } FCODE(cmpunordp)(nan_mask, result); HandleNaNs(code, ctx, fpcr_controlled, {result, xmm_a, xmm_b}, nan_mask, nan_handler); ctx.reg_alloc.DefineValue(inst, result); } template void EmitTwoOpFallback(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, Lambda lambda) { const auto fn = static_cast*>(lambda); auto args = ctx.reg_alloc.GetArgumentInfo(inst); const bool fpcr_controlled = args[fpcr_controlled_arg_index].GetImmediateU1(); const Xbyak::Xmm arg1 = ctx.reg_alloc.UseXmm(args[0]); const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(); ctx.reg_alloc.EndOfAllocScope(); ctx.reg_alloc.HostCall(nullptr); constexpr u32 stack_space = 2 * 16; code.sub(rsp, stack_space + ABI_SHADOW_SPACE); code.lea(code.ABI_PARAM1, ptr[rsp + ABI_SHADOW_SPACE + 0 * 16]); code.lea(code.ABI_PARAM2, ptr[rsp + ABI_SHADOW_SPACE + 1 * 16]); code.mov(code.ABI_PARAM3.cvt32(), ctx.FPCR(fpcr_controlled).Value()); code.lea(code.ABI_PARAM4, code.ptr[code.r15 + code.GetJitStateInfo().offsetof_fpsr_exc]); code.movaps(xword[code.ABI_PARAM2], arg1); code.CallFunction(fn); code.movaps(result, xword[rsp + ABI_SHADOW_SPACE + 0 * 16]); code.add(rsp, stack_space + ABI_SHADOW_SPACE); ctx.reg_alloc.DefineValue(inst, result); } template void EmitThreeOpFallbackWithoutRegAlloc(BlockOfCode& code, EmitContext& ctx, Xbyak::Xmm result, Xbyak::Xmm arg1, Xbyak::Xmm arg2, Lambda lambda, bool fpcr_controlled) { const auto fn = static_cast*>(lambda); const u32 fpcr = ctx.FPCR(fpcr_controlled).Value(); #ifdef _WIN32 constexpr u32 stack_space = 4 * 16; code.sub(rsp, stack_space + ABI_SHADOW_SPACE); code.lea(code.ABI_PARAM1, ptr[rsp + ABI_SHADOW_SPACE + 1 * 16]); code.lea(code.ABI_PARAM2, ptr[rsp + ABI_SHADOW_SPACE + 2 * 16]); code.lea(code.ABI_PARAM3, ptr[rsp + ABI_SHADOW_SPACE + 3 * 16]); code.mov(code.ABI_PARAM4.cvt32(), fpcr); code.lea(rax, code.ptr[code.r15 + code.GetJitStateInfo().offsetof_fpsr_exc]); code.mov(qword[rsp + ABI_SHADOW_SPACE + 0], rax); #else constexpr u32 stack_space = 3 * 16; code.sub(rsp, stack_space + ABI_SHADOW_SPACE); code.lea(code.ABI_PARAM1, ptr[rsp + ABI_SHADOW_SPACE + 0 * 16]); code.lea(code.ABI_PARAM2, ptr[rsp + ABI_SHADOW_SPACE + 1 * 16]); code.lea(code.ABI_PARAM3, ptr[rsp + ABI_SHADOW_SPACE + 2 * 16]); code.mov(code.ABI_PARAM4.cvt32(), fpcr); code.lea(code.ABI_PARAM5, code.ptr[code.r15 + code.GetJitStateInfo().offsetof_fpsr_exc]); #endif code.movaps(xword[code.ABI_PARAM2], arg1); code.movaps(xword[code.ABI_PARAM3], arg2); code.CallFunction(fn); #ifdef _WIN32 code.movaps(result, xword[rsp + ABI_SHADOW_SPACE + 1 * 16]); #else code.movaps(result, xword[rsp + ABI_SHADOW_SPACE + 0 * 16]); #endif code.add(rsp, stack_space + ABI_SHADOW_SPACE); } template void EmitThreeOpFallback(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, Lambda lambda) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); const Xbyak::Xmm arg1 = ctx.reg_alloc.UseXmm(args[0]); const Xbyak::Xmm arg2 = ctx.reg_alloc.UseXmm(args[1]); const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(); ctx.reg_alloc.EndOfAllocScope(); ctx.reg_alloc.HostCall(nullptr); const bool fpcr_controlled = args[2].GetImmediateU1(); EmitThreeOpFallbackWithoutRegAlloc(code, ctx, result, arg1, arg2, lambda, fpcr_controlled); ctx.reg_alloc.DefineValue(inst, result); } template void EmitFourOpFallbackWithoutRegAlloc(BlockOfCode& code, EmitContext& ctx, Xbyak::Xmm result, Xbyak::Xmm arg1, Xbyak::Xmm arg2, Xbyak::Xmm arg3, Lambda lambda, bool fpcr_controlled) { const auto fn = static_cast*>(lambda); #ifdef _WIN32 constexpr u32 stack_space = 5 * 16; code.sub(rsp, stack_space + ABI_SHADOW_SPACE); code.lea(code.ABI_PARAM1, ptr[rsp + ABI_SHADOW_SPACE + 1 * 16]); code.lea(code.ABI_PARAM2, ptr[rsp + ABI_SHADOW_SPACE + 2 * 16]); code.lea(code.ABI_PARAM3, ptr[rsp + ABI_SHADOW_SPACE + 3 * 16]); code.lea(code.ABI_PARAM4, ptr[rsp + ABI_SHADOW_SPACE + 4 * 16]); code.mov(qword[rsp + ABI_SHADOW_SPACE + 0], ctx.FPCR(fpcr_controlled).Value()); code.lea(rax, code.ptr[code.r15 + code.GetJitStateInfo().offsetof_fpsr_exc]); code.mov(qword[rsp + ABI_SHADOW_SPACE + 8], rax); #else constexpr u32 stack_space = 4 * 16; code.sub(rsp, stack_space + ABI_SHADOW_SPACE); code.lea(code.ABI_PARAM1, ptr[rsp + ABI_SHADOW_SPACE + 0 * 16]); code.lea(code.ABI_PARAM2, ptr[rsp + ABI_SHADOW_SPACE + 1 * 16]); code.lea(code.ABI_PARAM3, ptr[rsp + ABI_SHADOW_SPACE + 2 * 16]); code.lea(code.ABI_PARAM4, ptr[rsp + ABI_SHADOW_SPACE + 3 * 16]); code.mov(code.ABI_PARAM5.cvt32(), ctx.FPCR(fpcr_controlled).Value()); code.lea(code.ABI_PARAM6, code.ptr[code.r15 + code.GetJitStateInfo().offsetof_fpsr_exc]); #endif code.movaps(xword[code.ABI_PARAM2], arg1); code.movaps(xword[code.ABI_PARAM3], arg2); code.movaps(xword[code.ABI_PARAM4], arg3); code.CallFunction(fn); #ifdef _WIN32 code.movaps(result, xword[rsp + ABI_SHADOW_SPACE + 1 * 16]); #else code.movaps(result, xword[rsp + ABI_SHADOW_SPACE + 0 * 16]); #endif code.add(rsp, stack_space + ABI_SHADOW_SPACE); } template void EmitFourOpFallback(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, Lambda lambda) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); const bool fpcr_controlled = args[3].GetImmediateU1(); const Xbyak::Xmm arg1 = ctx.reg_alloc.UseXmm(args[0]); const Xbyak::Xmm arg2 = ctx.reg_alloc.UseXmm(args[1]); const Xbyak::Xmm arg3 = ctx.reg_alloc.UseXmm(args[2]); const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(); ctx.reg_alloc.EndOfAllocScope(); ctx.reg_alloc.HostCall(nullptr); EmitFourOpFallbackWithoutRegAlloc(code, ctx, result, arg1, arg2, arg3, lambda, fpcr_controlled); ctx.reg_alloc.DefineValue(inst, result); } } // anonymous namespace void EmitX64::EmitFPVectorAbs16(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); const Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(args[0]); const Xbyak::Address mask = code.MConst(xword, 0x7FFF7FFF7FFF7FFF, 0x7FFF7FFF7FFF7FFF); code.pand(a, mask); ctx.reg_alloc.DefineValue(inst, a); } void EmitX64::EmitFPVectorAbs32(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); const Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(args[0]); const Xbyak::Address mask = code.MConst(xword, 0x7FFFFFFF7FFFFFFF, 0x7FFFFFFF7FFFFFFF); code.andps(a, mask); ctx.reg_alloc.DefineValue(inst, a); } void EmitX64::EmitFPVectorAbs64(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); const Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(args[0]); const Xbyak::Address mask = code.MConst(xword, 0x7FFFFFFFFFFFFFFF, 0x7FFFFFFFFFFFFFFF); code.andpd(a, mask); ctx.reg_alloc.DefineValue(inst, a); } void EmitX64::EmitFPVectorAdd32(EmitContext& ctx, IR::Inst* inst) { EmitThreeOpVectorOperation<32, DefaultIndexer>(code, ctx, inst, &Xbyak::CodeGenerator::addps); } void EmitX64::EmitFPVectorAdd64(EmitContext& ctx, IR::Inst* inst) { EmitThreeOpVectorOperation<64, DefaultIndexer>(code, ctx, inst, &Xbyak::CodeGenerator::addpd); } void EmitX64::EmitFPVectorDiv32(EmitContext& ctx, IR::Inst* inst) { EmitThreeOpVectorOperation<32, DefaultIndexer>(code, ctx, inst, &Xbyak::CodeGenerator::divps); } void EmitX64::EmitFPVectorDiv64(EmitContext& ctx, IR::Inst* inst) { EmitThreeOpVectorOperation<64, DefaultIndexer>(code, ctx, inst, &Xbyak::CodeGenerator::divpd); } void EmitX64::EmitFPVectorEqual16(EmitContext& ctx, IR::Inst* inst) { EmitThreeOpFallback(code, ctx, inst, [](VectorArray& result, const VectorArray& op1, const VectorArray& op2, FP::FPCR fpcr, FP::FPSR& fpsr) { for (size_t i = 0; i < result.size(); i++) { result[i] = FP::FPCompareEQ(op1[i], op2[i], fpcr, fpsr) ? 0xFFFF : 0; } }); } void EmitX64::EmitFPVectorEqual32(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); const bool fpcr_controlled = args[2].GetImmediateU1(); const Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(args[0]); const Xbyak::Xmm b = ctx.FPCR(fpcr_controlled).FZ() ? ctx.reg_alloc.UseScratchXmm(args[1]) : ctx.reg_alloc.UseXmm(args[1]); MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&]{ DenormalsAreZero<32>(code, ctx.FPCR(fpcr_controlled), {a, b}, xmm0); code.cmpeqps(a, b); }); ctx.reg_alloc.DefineValue(inst, a); } void EmitX64::EmitFPVectorEqual64(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); const bool fpcr_controlled = args[2].GetImmediateU1(); const Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(args[0]); const Xbyak::Xmm b = ctx.FPCR(fpcr_controlled).FZ() ? ctx.reg_alloc.UseScratchXmm(args[1]) : ctx.reg_alloc.UseXmm(args[1]); MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&]{ DenormalsAreZero<64>(code, ctx.FPCR(fpcr_controlled), {a, b}, xmm0); code.cmpeqpd(a, b); }); ctx.reg_alloc.DefineValue(inst, a); } void EmitX64::EmitFPVectorFromSignedFixed32(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); const Xbyak::Xmm xmm = ctx.reg_alloc.UseScratchXmm(args[0]); const int fbits = args[1].GetImmediateU8(); const FP::RoundingMode rounding_mode = static_cast(args[2].GetImmediateU8()); const bool fpcr_controlled = args[3].GetImmediateU1(); ASSERT(rounding_mode == ctx.FPCR(fpcr_controlled).RMode()); MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&]{ code.cvtdq2ps(xmm, xmm); if (fbits != 0) { code.mulps(xmm, GetVectorOf<32>(code, static_cast(127 - fbits) << 23)); } }); ctx.reg_alloc.DefineValue(inst, xmm); } void EmitX64::EmitFPVectorFromSignedFixed64(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); const Xbyak::Xmm xmm = ctx.reg_alloc.UseScratchXmm(args[0]); const int fbits = args[1].GetImmediateU8(); const FP::RoundingMode rounding_mode = static_cast(args[2].GetImmediateU8()); const bool fpcr_controlled = args[3].GetImmediateU1(); ASSERT(rounding_mode == ctx.FPCR(fpcr_controlled).RMode()); MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&]{ if (code.HasAVX512_Skylake()) { code.vcvtqq2pd(xmm, xmm); } else if (code.HasSSE41()) { const Xbyak::Xmm xmm_tmp = ctx.reg_alloc.ScratchXmm(); const Xbyak::Reg64 tmp = ctx.reg_alloc.ScratchGpr(); // First quadword code.movq(tmp, xmm); code.cvtsi2sd(xmm, tmp); // Second quadword code.pextrq(tmp, xmm, 1); code.cvtsi2sd(xmm_tmp, tmp); // Combine code.unpcklpd(xmm, xmm_tmp); } else { const Xbyak::Xmm high_xmm = ctx.reg_alloc.ScratchXmm(); const Xbyak::Xmm xmm_tmp = ctx.reg_alloc.ScratchXmm(); const Xbyak::Reg64 tmp = ctx.reg_alloc.ScratchGpr(); // First quadword code.movhlps(high_xmm, xmm); code.movq(tmp, xmm); code.cvtsi2sd(xmm, tmp); // Second quadword code.movq(tmp, high_xmm); code.cvtsi2sd(xmm_tmp, tmp); // Combine code.unpcklpd(xmm, xmm_tmp); } if (fbits != 0) { code.mulpd(xmm, GetVectorOf<64>(code, static_cast(1023 - fbits) << 52)); } }); ctx.reg_alloc.DefineValue(inst, xmm); } void EmitX64::EmitFPVectorFromUnsignedFixed32(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); const Xbyak::Xmm xmm = ctx.reg_alloc.UseScratchXmm(args[0]); const int fbits = args[1].GetImmediateU8(); const FP::RoundingMode rounding_mode = static_cast(args[2].GetImmediateU8()); const bool fpcr_controlled = args[3].GetImmediateU1(); ASSERT(rounding_mode == ctx.FPCR(fpcr_controlled).RMode()); MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&]{ if (code.HasAVX512_Skylake()) { code.vcvtudq2ps(xmm, xmm); } else { const Xbyak::Address mem_4B000000 = code.MConst(xword, 0x4B0000004B000000, 0x4B0000004B000000); const Xbyak::Address mem_53000000 = code.MConst(xword, 0x5300000053000000, 0x5300000053000000); const Xbyak::Address mem_D3000080 = code.MConst(xword, 0xD3000080D3000080, 0xD3000080D3000080); const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm(); if (code.HasAVX()) { code.vpblendw(tmp, xmm, mem_4B000000, 0b10101010); code.vpsrld(xmm, xmm, 16); code.vpblendw(xmm, xmm, mem_53000000, 0b10101010); code.vaddps(xmm, xmm, mem_D3000080); code.vaddps(xmm, tmp, xmm); } else { const Xbyak::Address mem_0xFFFF = code.MConst(xword, 0x0000FFFF0000FFFF, 0x0000FFFF0000FFFF); code.movdqa(tmp, mem_0xFFFF); code.pand(tmp, xmm); code.por(tmp, mem_4B000000); code.psrld(xmm, 16); code.por(xmm, mem_53000000); code.addps(xmm, mem_D3000080); code.addps(xmm, tmp); } } if (fbits != 0) { code.mulps(xmm, GetVectorOf<32>(code, static_cast(127 - fbits) << 23)); } if (ctx.FPCR(fpcr_controlled).RMode() == FP::RoundingMode::TowardsMinusInfinity) { code.pand(xmm, code.MConst(xword, 0x7FFFFFFF7FFFFFFF, 0x7FFFFFFF7FFFFFFF)); } }); ctx.reg_alloc.DefineValue(inst, xmm); } void EmitX64::EmitFPVectorFromUnsignedFixed64(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); const Xbyak::Xmm xmm = ctx.reg_alloc.UseScratchXmm(args[0]); const int fbits = args[1].GetImmediateU8(); const FP::RoundingMode rounding_mode = static_cast(args[2].GetImmediateU8()); const bool fpcr_controlled = args[3].GetImmediateU1(); ASSERT(rounding_mode == ctx.FPCR(fpcr_controlled).RMode()); MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&]{ if (code.HasAVX512_Skylake()) { code.vcvtuqq2pd(xmm, xmm); } else { const Xbyak::Address unpack = code.MConst(xword, 0x4530000043300000, 0); const Xbyak::Address subtrahend = code.MConst(xword, 0x4330000000000000, 0x4530000000000000); const Xbyak::Xmm unpack_reg = ctx.reg_alloc.ScratchXmm(); const Xbyak::Xmm subtrahend_reg = ctx.reg_alloc.ScratchXmm(); const Xbyak::Xmm tmp1 = ctx.reg_alloc.ScratchXmm(); if (code.HasAVX()) { code.vmovapd(unpack_reg, unpack); code.vmovapd(subtrahend_reg, subtrahend); code.vunpcklps(tmp1, xmm, unpack_reg); code.vsubpd(tmp1, tmp1, subtrahend_reg); code.vpermilps(xmm, xmm, 0b01001110); code.vunpcklps(xmm, xmm, unpack_reg); code.vsubpd(xmm, xmm, subtrahend_reg); code.vhaddpd(xmm, tmp1, xmm); } else { const Xbyak::Xmm tmp2 = ctx.reg_alloc.ScratchXmm(); code.movapd(unpack_reg, unpack); code.movapd(subtrahend_reg, subtrahend); code.pshufd(tmp1, xmm, 0b01001110); code.punpckldq(xmm, unpack_reg); code.subpd(xmm, subtrahend_reg); code.pshufd(tmp2, xmm, 0b01001110); code.addpd(xmm, tmp2); code.punpckldq(tmp1, unpack_reg); code.subpd(tmp1, subtrahend_reg); code.pshufd(unpack_reg, tmp1, 0b01001110); code.addpd(unpack_reg, tmp1); code.unpcklpd(xmm, unpack_reg); } } if (fbits != 0) { code.mulpd(xmm, GetVectorOf<64>(code, static_cast(1023 - fbits) << 52)); } if (ctx.FPCR(fpcr_controlled).RMode() == FP::RoundingMode::TowardsMinusInfinity) { code.pand(xmm, code.MConst(xword, 0x7FFFFFFFFFFFFFFF, 0x7FFFFFFFFFFFFFFF)); } }); ctx.reg_alloc.DefineValue(inst, xmm); } void EmitX64::EmitFPVectorGreater32(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); const bool fpcr_controlled = args[2].GetImmediateU1(); const Xbyak::Xmm a = ctx.FPCR(fpcr_controlled).FZ() ? ctx.reg_alloc.UseScratchXmm(args[0]) : ctx.reg_alloc.UseXmm(args[0]); const Xbyak::Xmm b = ctx.reg_alloc.UseScratchXmm(args[1]); MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&]{ DenormalsAreZero<32>(code, ctx.FPCR(fpcr_controlled), {a, b}, xmm0); code.cmpltps(b, a); }); ctx.reg_alloc.DefineValue(inst, b); } void EmitX64::EmitFPVectorGreater64(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); const bool fpcr_controlled = args[2].GetImmediateU1(); const Xbyak::Xmm a = ctx.FPCR(fpcr_controlled).FZ() ? ctx.reg_alloc.UseScratchXmm(args[0]) : ctx.reg_alloc.UseXmm(args[0]); const Xbyak::Xmm b = ctx.reg_alloc.UseScratchXmm(args[1]); MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&]{ DenormalsAreZero<64>(code, ctx.FPCR(fpcr_controlled), {a, b}, xmm0); code.cmpltpd(b, a); }); ctx.reg_alloc.DefineValue(inst, b); } void EmitX64::EmitFPVectorGreaterEqual32(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); const bool fpcr_controlled = args[2].GetImmediateU1(); const Xbyak::Xmm a = ctx.FPCR(fpcr_controlled).FZ() ? ctx.reg_alloc.UseScratchXmm(args[0]) : ctx.reg_alloc.UseXmm(args[0]); const Xbyak::Xmm b = ctx.reg_alloc.UseScratchXmm(args[1]); MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&]{ DenormalsAreZero<32>(code, ctx.FPCR(fpcr_controlled), {a, b}, xmm0); code.cmpleps(b, a); }); ctx.reg_alloc.DefineValue(inst, b); } void EmitX64::EmitFPVectorGreaterEqual64(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); const bool fpcr_controlled = args[2].GetImmediateU1(); const Xbyak::Xmm a = ctx.FPCR(fpcr_controlled).FZ() ? ctx.reg_alloc.UseScratchXmm(args[0]) : ctx.reg_alloc.UseXmm(args[0]); const Xbyak::Xmm b = ctx.reg_alloc.UseScratchXmm(args[1]); MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&]{ DenormalsAreZero<64>(code, ctx.FPCR(fpcr_controlled), {a, b}, xmm0); code.cmplepd(b, a); }); ctx.reg_alloc.DefineValue(inst, b); } template static void EmitFPVectorMinMax(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) { const bool fpcr_controlled = inst->GetArg(2).GetU1(); if (ctx.FPCR(fpcr_controlled).DN()) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]); const Xbyak::Xmm xmm_b = ctx.FPCR(fpcr_controlled).FZ() ? ctx.reg_alloc.UseScratchXmm(args[1]) : ctx.reg_alloc.UseXmm(args[1]); const Xbyak::Xmm mask = xmm0; const Xbyak::Xmm eq = ctx.reg_alloc.ScratchXmm(); const Xbyak::Xmm nan_mask = ctx.reg_alloc.ScratchXmm(); MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&]{ DenormalsAreZero(code, ctx.FPCR(fpcr_controlled), {result, xmm_b}, mask); if (code.HasAVX()) { FCODE(vcmpeqp)(mask, result, xmm_b); FCODE(vcmpunordp)(nan_mask, result, xmm_b); if constexpr (is_max) { FCODE(vandp)(eq, result, xmm_b); FCODE(vmaxp)(result, result, xmm_b); } else { FCODE(vorp)(eq, result, xmm_b); FCODE(vminp)(result, result, xmm_b); } FCODE(blendvp)(result, eq); FCODE(vblendvp)(result, result, GetNaNVector(code), nan_mask); } else { code.movaps(mask, result); code.movaps(eq, result); code.movaps(nan_mask, result); FCODE(cmpneqp)(mask, xmm_b); FCODE(cmpordp)(nan_mask, xmm_b); if constexpr (is_max) { code.andps(eq, xmm_b); FCODE(maxp)(result, xmm_b); } else { code.orps(eq, xmm_b); FCODE(minp)(result, xmm_b); } code.andps(result, mask); code.andnps(mask, eq); code.orps(result, mask); code.andps(result, nan_mask); code.andnps(nan_mask, GetNaNVector(code)); code.orps(result, nan_mask); } }); ctx.reg_alloc.DefineValue(inst, result); return; } EmitThreeOpVectorOperation(code, ctx, inst, [&](const Xbyak::Xmm& result, Xbyak::Xmm xmm_b){ const Xbyak::Xmm mask = xmm0; const Xbyak::Xmm eq = ctx.reg_alloc.ScratchXmm(); if (ctx.FPCR(fpcr_controlled).FZ()) { const Xbyak::Xmm prev_xmm_b = xmm_b; xmm_b = ctx.reg_alloc.ScratchXmm(); code.movaps(xmm_b, prev_xmm_b); DenormalsAreZero(code, ctx.FPCR(fpcr_controlled), {result, xmm_b}, mask); } // What we are doing here is handling the case when the inputs are differently signed zeros. // x86-64 treats differently signed zeros as equal while ARM does not. // Thus if we AND together things that x86-64 thinks are equal we'll get the positive zero. if (code.HasAVX()) { FCODE(vcmpeqp)(mask, result, xmm_b); if constexpr (is_max) { FCODE(vandp)(eq, result, xmm_b); FCODE(vmaxp)(result, result, xmm_b); } else { FCODE(vorp)(eq, result, xmm_b); FCODE(vminp)(result, result, xmm_b); } FCODE(blendvp)(result, eq); } else { code.movaps(mask, result); code.movaps(eq, result); FCODE(cmpneqp)(mask, xmm_b); if constexpr (is_max) { code.andps(eq, xmm_b); FCODE(maxp)(result, xmm_b); } else { code.orps(eq, xmm_b); FCODE(minp)(result, xmm_b); } code.andps(result, mask); code.andnps(mask, eq); code.orps(result, mask); } }); } void EmitX64::EmitFPVectorMax32(EmitContext& ctx, IR::Inst* inst) { EmitFPVectorMinMax<32, true>(code, ctx, inst); } void EmitX64::EmitFPVectorMax64(EmitContext& ctx, IR::Inst* inst) { EmitFPVectorMinMax<64, true>(code, ctx, inst); } void EmitX64::EmitFPVectorMin32(EmitContext& ctx, IR::Inst* inst) { EmitFPVectorMinMax<32, false>(code, ctx, inst); } void EmitX64::EmitFPVectorMin64(EmitContext& ctx, IR::Inst* inst) { EmitFPVectorMinMax<64, false>(code, ctx, inst); } void EmitX64::EmitFPVectorMul32(EmitContext& ctx, IR::Inst* inst) { EmitThreeOpVectorOperation<32, DefaultIndexer>(code, ctx, inst, &Xbyak::CodeGenerator::mulps); } void EmitX64::EmitFPVectorMul64(EmitContext& ctx, IR::Inst* inst) { EmitThreeOpVectorOperation<64, DefaultIndexer>(code, ctx, inst, &Xbyak::CodeGenerator::mulpd); } template void EmitFPVectorMulAdd(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) { using FPT = mp::unsigned_integer_of_size; const auto fallback_fn = [](VectorArray& result, const VectorArray& addend, const VectorArray& op1, const VectorArray& op2, FP::FPCR fpcr, FP::FPSR& fpsr) { for (size_t i = 0; i < result.size(); i++) { result[i] = FP::FPMulAdd(addend[i], op1[i], op2[i], fpcr, fpsr); } }; if constexpr (fsize != 16) { if (code.HasFMA() && code.HasAVX()) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); const bool fpcr_controlled = args[3].GetImmediateU1(); const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(); const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseXmm(args[0]); const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]); const Xbyak::Xmm xmm_c = ctx.reg_alloc.UseXmm(args[2]); const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm(); Xbyak::Label end, fallback; MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&]{ code.movaps(result, xmm_a); FCODE(vfmadd231p)(result, xmm_b, xmm_c); code.movaps(tmp, GetNegativeZeroVector(code)); code.andnps(tmp, result); FCODE(vcmpeq_uqp)(tmp, tmp, GetSmallestNormalVector(code)); code.vptest(tmp, tmp); code.jnz(fallback, code.T_NEAR); code.L(end); }); code.SwitchToFarCode(); code.L(fallback); code.sub(rsp, 8); ABI_PushCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx())); EmitFourOpFallbackWithoutRegAlloc(code, ctx, result, xmm_a, xmm_b, xmm_c, fallback_fn, fpcr_controlled); ABI_PopCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx())); code.add(rsp, 8); code.jmp(end, code.T_NEAR); code.SwitchToNearCode(); ctx.reg_alloc.DefineValue(inst, result); return; } } EmitFourOpFallback(code, ctx, inst, fallback_fn); } void EmitX64::EmitFPVectorMulAdd16(EmitContext& ctx, IR::Inst* inst) { EmitFPVectorMulAdd<16>(code, ctx, inst); } void EmitX64::EmitFPVectorMulAdd32(EmitContext& ctx, IR::Inst* inst) { EmitFPVectorMulAdd<32>(code, ctx, inst); } void EmitX64::EmitFPVectorMulAdd64(EmitContext& ctx, IR::Inst* inst) { EmitFPVectorMulAdd<64>(code, ctx, inst); } template static void EmitFPVectorMulX(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) { using FPT = mp::unsigned_integer_of_size; auto args = ctx.reg_alloc.GetArgumentInfo(inst); const bool fpcr_controlled = args[2].GetImmediateU1(); if (ctx.FPCR(fpcr_controlled).DN() && code.HasAVX()) { const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]); const Xbyak::Xmm operand = ctx.reg_alloc.UseXmm(args[1]); const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm(); const Xbyak::Xmm twos = ctx.reg_alloc.ScratchXmm(); MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&]{ FCODE(vcmpunordp)(xmm0, result, operand); FCODE(vxorp)(twos, result, operand); FCODE(mulp)(result, operand); FCODE(andp)(twos, GetNegativeZeroVector(code)); FCODE(vcmpunordp)(tmp, result, result); FCODE(blendvp)(result, GetNaNVector(code)); FCODE(orp)(twos, GetVectorOf(code)); FCODE(andnp)(xmm0, tmp); FCODE(blendvp)(result, twos); }); ctx.reg_alloc.DefineValue(inst, result); return; } const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(); const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseXmm(args[0]); const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]); const Xbyak::Xmm nan_mask = ctx.reg_alloc.ScratchXmm(); code.movaps(nan_mask, xmm_b); code.movaps(result, xmm_a); FCODE(cmpunordp)(nan_mask, xmm_a); FCODE(mulp)(result, xmm_b); FCODE(cmpunordp)(nan_mask, result); const auto nan_handler = Common::FptrCast( [](std::array, 3>& values, FP::FPCR fpcr) { VectorArray& result = values[0]; for (size_t elementi = 0; elementi < result.size(); ++elementi) { if (auto r = FP::ProcessNaNs(values[1][elementi], values[2][elementi])) { result[elementi] = fpcr.DN() ? FP::FPInfo::DefaultNaN() : *r; } else if (FP::IsNaN(result[elementi])) { const FPT sign = (values[1][elementi] ^ values[2][elementi]) & FP::FPInfo::sign_mask; result[elementi] = sign | FP::FPValue(); } } } ); HandleNaNs(code, ctx, fpcr_controlled, {result, xmm_a, xmm_b}, nan_mask, nan_handler); ctx.reg_alloc.DefineValue(inst, result); } void EmitX64::EmitFPVectorMulX32(EmitContext& ctx, IR::Inst* inst) { EmitFPVectorMulX<32>(code, ctx, inst); } void EmitX64::EmitFPVectorMulX64(EmitContext& ctx, IR::Inst* inst) { EmitFPVectorMulX<64>(code, ctx, inst); } void EmitX64::EmitFPVectorNeg16(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); const Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(args[0]); const Xbyak::Address mask = code.MConst(xword, 0x8000800080008000, 0x8000800080008000); code.pxor(a, mask); ctx.reg_alloc.DefineValue(inst, a); } void EmitX64::EmitFPVectorNeg32(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); const Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(args[0]); const Xbyak::Address mask = code.MConst(xword, 0x8000000080000000, 0x8000000080000000); code.pxor(a, mask); ctx.reg_alloc.DefineValue(inst, a); } void EmitX64::EmitFPVectorNeg64(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); const Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(args[0]); const Xbyak::Address mask = code.MConst(xword, 0x8000000000000000, 0x8000000000000000); code.pxor(a, mask); ctx.reg_alloc.DefineValue(inst, a); } void EmitX64::EmitFPVectorPairedAdd32(EmitContext& ctx, IR::Inst* inst) { EmitThreeOpVectorOperation<32, PairedIndexer>(code, ctx, inst, &Xbyak::CodeGenerator::haddps); } void EmitX64::EmitFPVectorPairedAdd64(EmitContext& ctx, IR::Inst* inst) { EmitThreeOpVectorOperation<64, PairedIndexer>(code, ctx, inst, &Xbyak::CodeGenerator::haddpd); } void EmitX64::EmitFPVectorPairedAddLower32(EmitContext& ctx, IR::Inst* inst) { EmitThreeOpVectorOperation<32, PairedLowerIndexer>(code, ctx, inst, [&](Xbyak::Xmm result, Xbyak::Xmm xmm_b) { const Xbyak::Xmm zero = ctx.reg_alloc.ScratchXmm(); code.xorps(zero, zero); code.punpcklqdq(result, xmm_b); code.haddps(result, zero); }); } void EmitX64::EmitFPVectorPairedAddLower64(EmitContext& ctx, IR::Inst* inst) { EmitThreeOpVectorOperation<64, PairedLowerIndexer>(code, ctx, inst, [&](Xbyak::Xmm result, Xbyak::Xmm xmm_b) { const Xbyak::Xmm zero = ctx.reg_alloc.ScratchXmm(); code.xorps(zero, zero); code.punpcklqdq(result, xmm_b); code.haddpd(result, zero); }); } template static void EmitRecipEstimate(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) { EmitTwoOpFallback(code, ctx, inst, [](VectorArray& result, const VectorArray& operand, FP::FPCR fpcr, FP::FPSR& fpsr) { for (size_t i = 0; i < result.size(); i++) { result[i] = FP::FPRecipEstimate(operand[i], fpcr, fpsr); } }); } void EmitX64::EmitFPVectorRecipEstimate16(EmitContext& ctx, IR::Inst* inst) { EmitRecipEstimate(code, ctx, inst); } void EmitX64::EmitFPVectorRecipEstimate32(EmitContext& ctx, IR::Inst* inst) { EmitRecipEstimate(code, ctx, inst); } void EmitX64::EmitFPVectorRecipEstimate64(EmitContext& ctx, IR::Inst* inst) { EmitRecipEstimate(code, ctx, inst); } template static void EmitRecipStepFused(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) { using FPT = mp::unsigned_integer_of_size; const auto fallback_fn = [](VectorArray& result, const VectorArray& op1, const VectorArray& op2, FP::FPCR fpcr, FP::FPSR& fpsr) { for (size_t i = 0; i < result.size(); i++) { result[i] = FP::FPRecipStepFused(op1[i], op2[i], fpcr, fpsr); } }; if constexpr (fsize != 16) { if (code.HasFMA() && code.HasAVX()) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); const bool fpcr_controlled = args[2].GetImmediateU1(); const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(); const Xbyak::Xmm operand1 = ctx.reg_alloc.UseXmm(args[0]); const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]); const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm(); Xbyak::Label end, fallback; MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&]{ code.movaps(result, GetVectorOf(code)); FCODE(vfnmadd231p)(result, operand1, operand2); FCODE(vcmpunordp)(tmp, result, result); code.vptest(tmp, tmp); code.jnz(fallback, code.T_NEAR); code.L(end); }); code.SwitchToFarCode(); code.L(fallback); code.sub(rsp, 8); ABI_PushCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx())); EmitThreeOpFallbackWithoutRegAlloc(code, ctx, result, operand1, operand2, fallback_fn, fpcr_controlled); ABI_PopCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx())); code.add(rsp, 8); code.jmp(end, code.T_NEAR); code.SwitchToNearCode(); ctx.reg_alloc.DefineValue(inst, result); return; } } EmitThreeOpFallback(code, ctx, inst, fallback_fn); } void EmitX64::EmitFPVectorRecipStepFused16(EmitContext& ctx, IR::Inst* inst) { EmitRecipStepFused<16>(code, ctx, inst); } void EmitX64::EmitFPVectorRecipStepFused32(EmitContext& ctx, IR::Inst* inst) { EmitRecipStepFused<32>(code, ctx, inst); } void EmitX64::EmitFPVectorRecipStepFused64(EmitContext& ctx, IR::Inst* inst) { EmitRecipStepFused<64>(code, ctx, inst); } template void EmitFPVectorRoundInt(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) { using FPT = mp::unsigned_integer_of_size; const auto rounding = static_cast(inst->GetArg(1).GetU8()); const bool exact = inst->GetArg(2).GetU1(); if constexpr (fsize != 16) { if (code.HasSSE41() && rounding != FP::RoundingMode::ToNearest_TieAwayFromZero && !exact) { const u8 round_imm = [&]() -> u8 { switch (rounding) { case FP::RoundingMode::ToNearest_TieEven: return 0b00; case FP::RoundingMode::TowardsPlusInfinity: return 0b10; case FP::RoundingMode::TowardsMinusInfinity: return 0b01; case FP::RoundingMode::TowardsZero: return 0b11; default: UNREACHABLE(); } }(); EmitTwoOpVectorOperation(code, ctx, inst, [&](const Xbyak::Xmm& result, const Xbyak::Xmm& xmm_a){ FCODE(roundp)(result, xmm_a, round_imm); }); return; } } using rounding_list = mp::list< mp::lift_value, mp::lift_value, mp::lift_value, mp::lift_value, mp::lift_value >; using exact_list = mp::list; static const auto lut = Common::GenerateLookupTableFromList( [](auto arg) { return std::pair{ mp::lower_to_tuple_v, Common::FptrCast( [](VectorArray& output, const VectorArray& input, FP::FPCR fpcr, FP::FPSR& fpsr) { constexpr auto t = mp::lower_to_tuple_v; constexpr FP::RoundingMode rounding_mode = std::get<0>(t); constexpr bool exact = std::get<1>(t); for (size_t i = 0; i < output.size(); ++i) { output[i] = static_cast(FP::FPRoundInt(input[i], fpcr, rounding_mode, exact, fpsr)); } } ) }; }, mp::cartesian_product{} ); EmitTwoOpFallback<3>(code, ctx, inst, lut.at(std::make_tuple(rounding, exact))); } void EmitX64::EmitFPVectorRoundInt16(EmitContext& ctx, IR::Inst* inst) { EmitFPVectorRoundInt<16>(code, ctx, inst); } void EmitX64::EmitFPVectorRoundInt32(EmitContext& ctx, IR::Inst* inst) { EmitFPVectorRoundInt<32>(code, ctx, inst); } void EmitX64::EmitFPVectorRoundInt64(EmitContext& ctx, IR::Inst* inst) { EmitFPVectorRoundInt<64>(code, ctx, inst); } template static void EmitRSqrtEstimate(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) { EmitTwoOpFallback(code, ctx, inst, [](VectorArray& result, const VectorArray& operand, FP::FPCR fpcr, FP::FPSR& fpsr) { for (size_t i = 0; i < result.size(); i++) { result[i] = FP::FPRSqrtEstimate(operand[i], fpcr, fpsr); } }); } void EmitX64::EmitFPVectorRSqrtEstimate16(EmitContext& ctx, IR::Inst* inst) { EmitRSqrtEstimate(code, ctx, inst); } void EmitX64::EmitFPVectorRSqrtEstimate32(EmitContext& ctx, IR::Inst* inst) { EmitRSqrtEstimate(code, ctx, inst); } void EmitX64::EmitFPVectorRSqrtEstimate64(EmitContext& ctx, IR::Inst* inst) { EmitRSqrtEstimate(code, ctx, inst); } template static void EmitRSqrtStepFused(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) { using FPT = mp::unsigned_integer_of_size; const auto fallback_fn = [](VectorArray& result, const VectorArray& op1, const VectorArray& op2, FP::FPCR fpcr, FP::FPSR& fpsr) { for (size_t i = 0; i < result.size(); i++) { result[i] = FP::FPRSqrtStepFused(op1[i], op2[i], fpcr, fpsr); } }; if constexpr (fsize != 16) { if (code.HasFMA() && code.HasAVX()) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); const bool fpcr_controlled = args[2].GetImmediateU1(); const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(); const Xbyak::Xmm operand1 = ctx.reg_alloc.UseXmm(args[0]); const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]); const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm(); const Xbyak::Xmm mask = ctx.reg_alloc.ScratchXmm(); Xbyak::Label end, fallback; MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&]{ code.vmovaps(result, GetVectorOf(code)); FCODE(vfnmadd231p)(result, operand1, operand2); // An explanation for this is given in EmitFPRSqrtStepFused. code.vmovaps(mask, GetVectorOf(code)); FCODE(vandp)(tmp, result, mask); ICODE(vpcmpeq)(tmp, tmp, mask); code.ptest(tmp, tmp); code.jnz(fallback, code.T_NEAR); FCODE(vmulp)(result, result, GetVectorOf(code)); code.L(end); }); code.SwitchToFarCode(); code.L(fallback); code.sub(rsp, 8); ABI_PushCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx())); EmitThreeOpFallbackWithoutRegAlloc(code, ctx, result, operand1, operand2, fallback_fn, fpcr_controlled); ABI_PopCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx())); code.add(rsp, 8); code.jmp(end, code.T_NEAR); code.SwitchToNearCode(); ctx.reg_alloc.DefineValue(inst, result); return; } } EmitThreeOpFallback(code, ctx, inst, fallback_fn); } void EmitX64::EmitFPVectorRSqrtStepFused16(EmitContext& ctx, IR::Inst* inst) { EmitRSqrtStepFused<16>(code, ctx, inst); } void EmitX64::EmitFPVectorRSqrtStepFused32(EmitContext& ctx, IR::Inst* inst) { EmitRSqrtStepFused<32>(code, ctx, inst); } void EmitX64::EmitFPVectorRSqrtStepFused64(EmitContext& ctx, IR::Inst* inst) { EmitRSqrtStepFused<64>(code, ctx, inst); } void EmitX64::EmitFPVectorSqrt32(EmitContext& ctx, IR::Inst* inst) { EmitTwoOpVectorOperation<32, DefaultIndexer>(code, ctx, inst, [this](const Xbyak::Xmm& result, const Xbyak::Xmm& operand) { code.sqrtps(result, operand); }); } void EmitX64::EmitFPVectorSqrt64(EmitContext& ctx, IR::Inst* inst) { EmitTwoOpVectorOperation<64, DefaultIndexer>(code, ctx, inst, [this](const Xbyak::Xmm& result, const Xbyak::Xmm& operand) { code.sqrtpd(result, operand); }); } void EmitX64::EmitFPVectorSub32(EmitContext& ctx, IR::Inst* inst) { EmitThreeOpVectorOperation<32, DefaultIndexer>(code, ctx, inst, &Xbyak::CodeGenerator::subps); } void EmitX64::EmitFPVectorSub64(EmitContext& ctx, IR::Inst* inst) { EmitThreeOpVectorOperation<64, DefaultIndexer>(code, ctx, inst, &Xbyak::CodeGenerator::subpd); } template void EmitFPVectorToFixed(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) { using FPT = mp::unsigned_integer_of_size; const size_t fbits = inst->GetArg(1).GetU8(); const auto rounding = static_cast(inst->GetArg(2).GetU8()); [[maybe_unused]] const bool fpcr_controlled = inst->GetArg(3).GetU1(); // TODO: AVX512 implementation if constexpr (fsize != 16) { if (code.HasSSE41() && rounding != FP::RoundingMode::ToNearest_TieAwayFromZero) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); const Xbyak::Xmm src = ctx.reg_alloc.UseScratchXmm(args[0]); MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&]{ const int round_imm = [&]{ switch (rounding) { case FP::RoundingMode::ToNearest_TieEven: default: return 0b00; case FP::RoundingMode::TowardsPlusInfinity: return 0b10; case FP::RoundingMode::TowardsMinusInfinity: return 0b01; case FP::RoundingMode::TowardsZero: return 0b11; } }(); const auto perform_conversion = [&code, &ctx](const Xbyak::Xmm& src) { // MSVC doesn't allow us to use a [&] capture, so we have to do this instead. (void)ctx; if constexpr (fsize == 32) { code.cvttps2dq(src, src); } else { const Xbyak::Reg64 hi = ctx.reg_alloc.ScratchGpr(); const Xbyak::Reg64 lo = ctx.reg_alloc.ScratchGpr(); code.cvttsd2si(lo, src); code.punpckhqdq(src, src); code.cvttsd2si(hi, src); code.movq(src, lo); code.pinsrq(src, hi, 1); ctx.reg_alloc.Release(hi); ctx.reg_alloc.Release(lo); } }; if (fbits != 0) { const u64 scale_factor = fsize == 32 ? static_cast(fbits + 127) << 23 : static_cast(fbits + 1023) << 52; FCODE(mulp)(src, GetVectorOf(code, scale_factor)); } FCODE(roundp)(src, src, static_cast(round_imm)); ZeroIfNaN(code, src); constexpr u64 float_upper_limit_signed = fsize == 32 ? 0x4f000000 : 0x43e0000000000000; [[maybe_unused]] constexpr u64 float_upper_limit_unsigned = fsize == 32 ? 0x4f800000 : 0x43f0000000000000; if constexpr (unsigned_) { // Zero is minimum code.xorps(xmm0, xmm0); FCODE(cmplep)(xmm0, src); FCODE(andp)(src, xmm0); // Will we exceed unsigned range? const Xbyak::Xmm exceed_unsigned = ctx.reg_alloc.ScratchXmm(); code.movaps(exceed_unsigned, GetVectorOf(code)); FCODE(cmplep)(exceed_unsigned, src); // Will be exceed signed range? const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm(); code.movaps(tmp, GetVectorOf(code)); code.movaps(xmm0, tmp); FCODE(cmplep)(xmm0, src); FCODE(andp)(tmp, xmm0); FCODE(subp)(src, tmp); perform_conversion(src); ICODE(psll)(xmm0, static_cast(fsize - 1)); FCODE(orp)(src, xmm0); // Saturate to max FCODE(orp)(src, exceed_unsigned); } else { constexpr u64 integer_max = static_cast(std::numeric_limits>>::max()); code.movaps(xmm0, GetVectorOf(code)); FCODE(cmplep)(xmm0, src); perform_conversion(src); FCODE(blendvp)(src, GetVectorOf(code)); } }); ctx.reg_alloc.DefineValue(inst, src); return; } } using fbits_list = mp::lift_sequence>; using rounding_list = mp::list< mp::lift_value, mp::lift_value, mp::lift_value, mp::lift_value, mp::lift_value >; static const auto lut = Common::GenerateLookupTableFromList( [](auto arg) { return std::pair{ mp::lower_to_tuple_v, Common::FptrCast( [](VectorArray& output, const VectorArray& input, FP::FPCR fpcr, FP::FPSR& fpsr) { constexpr auto t = mp::lower_to_tuple_v; constexpr size_t fbits = std::get<0>(t); constexpr FP::RoundingMode rounding_mode = std::get<1>(t); for (size_t i = 0; i < output.size(); ++i) { output[i] = static_cast(FP::FPToFixed(fsize, input[i], fbits, unsigned_, fpcr, rounding_mode, fpsr)); } } ) }; }, mp::cartesian_product{} ); EmitTwoOpFallback<3>(code, ctx, inst, lut.at(std::make_tuple(fbits, rounding))); } void EmitX64::EmitFPVectorToSignedFixed16(EmitContext& ctx, IR::Inst* inst) { EmitFPVectorToFixed<16, false>(code, ctx, inst); } void EmitX64::EmitFPVectorToSignedFixed32(EmitContext& ctx, IR::Inst* inst) { EmitFPVectorToFixed<32, false>(code, ctx, inst); } void EmitX64::EmitFPVectorToSignedFixed64(EmitContext& ctx, IR::Inst* inst) { EmitFPVectorToFixed<64, false>(code, ctx, inst); } void EmitX64::EmitFPVectorToUnsignedFixed16(EmitContext& ctx, IR::Inst* inst) { EmitFPVectorToFixed<16, true>(code, ctx, inst); } void EmitX64::EmitFPVectorToUnsignedFixed32(EmitContext& ctx, IR::Inst* inst) { EmitFPVectorToFixed<32, true>(code, ctx, inst); } void EmitX64::EmitFPVectorToUnsignedFixed64(EmitContext& ctx, IR::Inst* inst) { EmitFPVectorToFixed<64, true>(code, ctx, inst); } } // namespace Dynarmic::Backend::X64