From 6da041111172b7ce6070f566ac8c3b033e48c180 Mon Sep 17 00:00:00 2001 From: Lioncash Date: Sat, 13 Apr 2019 18:52:36 -0400 Subject: [PATCH] frontend/ir_emitter: Add half-precision opcode for FPRecipStepFused --- src/backend/x64/emit_x64_floating_point.cpp | 60 +++++++++++---------- src/frontend/ir/ir_emitter.cpp | 15 ++++-- src/frontend/ir/ir_emitter.h | 2 +- src/frontend/ir/microinstruction.cpp | 1 + src/frontend/ir/opcodes.inc | 1 + 5 files changed, 48 insertions(+), 31 deletions(-) diff --git a/src/backend/x64/emit_x64_floating_point.cpp b/src/backend/x64/emit_x64_floating_point.cpp index 2321b77b..43135eab 100644 --- a/src/backend/x64/emit_x64_floating_point.cpp +++ b/src/backend/x64/emit_x64_floating_point.cpp @@ -787,40 +787,42 @@ template static void EmitFPRecipStepFused(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) { using FPT = mp::unsigned_integer_of_size; - if (code.DoesCpuSupport(Xbyak::util::Cpu::tFMA)) { - auto args = ctx.reg_alloc.GetArgumentInfo(inst); + if constexpr (fsize != 16) { + if (code.DoesCpuSupport(Xbyak::util::Cpu::tFMA)) { + auto args = ctx.reg_alloc.GetArgumentInfo(inst); - Xbyak::Label end, fallback; + Xbyak::Label end, fallback; - const Xbyak::Xmm operand1 = ctx.reg_alloc.UseXmm(args[0]); - const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]); - const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(); + const Xbyak::Xmm operand1 = ctx.reg_alloc.UseXmm(args[0]); + const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]); + const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(); - code.movaps(result, code.MConst(xword, FP::FPValue())); - FCODE(vfnmadd231s)(result, operand1, operand2); - FCODE(ucomis)(result, result); - code.jp(fallback, code.T_NEAR); - code.L(end); + code.movaps(result, code.MConst(xword, FP::FPValue())); + FCODE(vfnmadd231s)(result, operand1, operand2); + FCODE(ucomis)(result, result); + code.jp(fallback, code.T_NEAR); + code.L(end); - code.SwitchToFarCode(); - code.L(fallback); + code.SwitchToFarCode(); + code.L(fallback); - code.sub(rsp, 8); - ABI_PushCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx())); - code.movq(code.ABI_PARAM1, operand1); - code.movq(code.ABI_PARAM2, operand2); - code.mov(code.ABI_PARAM3.cvt32(), ctx.FPCR().Value()); - code.lea(code.ABI_PARAM4, code.ptr[code.r15 + code.GetJitStateInfo().offsetof_fpsr_exc]); - code.CallFunction(&FP::FPRecipStepFused); - code.movq(result, code.ABI_RETURN); - ABI_PopCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx())); - code.add(rsp, 8); + code.sub(rsp, 8); + ABI_PushCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx())); + code.movq(code.ABI_PARAM1, operand1); + code.movq(code.ABI_PARAM2, operand2); + code.mov(code.ABI_PARAM3.cvt32(), ctx.FPCR().Value()); + code.lea(code.ABI_PARAM4, code.ptr[code.r15 + code.GetJitStateInfo().offsetof_fpsr_exc]); + code.CallFunction(&FP::FPRecipStepFused); + code.movq(result, code.ABI_RETURN); + ABI_PopCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx())); + code.add(rsp, 8); - code.jmp(end, code.T_NEAR); - code.SwitchToNearCode(); + code.jmp(end, code.T_NEAR); + code.SwitchToNearCode(); - ctx.reg_alloc.DefineValue(inst, result); - return; + ctx.reg_alloc.DefineValue(inst, result); + return; + } } auto args = ctx.reg_alloc.GetArgumentInfo(inst); @@ -830,6 +832,10 @@ static void EmitFPRecipStepFused(BlockOfCode& code, EmitContext& ctx, IR::Inst* code.CallFunction(&FP::FPRecipStepFused); } +void EmitX64::EmitFPRecipStepFused16(EmitContext& ctx, IR::Inst* inst) { + EmitFPRecipStepFused<16>(code, ctx, inst); +} + void EmitX64::EmitFPRecipStepFused32(EmitContext& ctx, IR::Inst* inst) { EmitFPRecipStepFused<32>(code, ctx, inst); } diff --git a/src/frontend/ir/ir_emitter.cpp b/src/frontend/ir/ir_emitter.cpp index 27527f6c..42032457 100644 --- a/src/frontend/ir/ir_emitter.cpp +++ b/src/frontend/ir/ir_emitter.cpp @@ -1943,11 +1943,20 @@ U16U32U64 IREmitter::FPRecipExponent(const U16U32U64& a) { } } -U32U64 IREmitter::FPRecipStepFused(const U32U64& a, const U32U64& b) { - if (a.GetType() == Type::U32) { +U16U32U64 IREmitter::FPRecipStepFused(const U16U32U64& a, const U16U32U64& b) { + ASSERT(a.GetType() == b.GetType()); + + switch (a.GetType()) { + case Type::U16: + return Inst(Opcode::FPRecipStepFused16, a, b); + case Type::U32: return Inst(Opcode::FPRecipStepFused32, a, b); + case Type::U64: + return Inst(Opcode::FPRecipStepFused64, a, b); + default: + UNREACHABLE(); + return U16U32U64{}; } - return Inst(Opcode::FPRecipStepFused64, a, b); } U16U32U64 IREmitter::FPRoundInt(const U16U32U64& a, FP::RoundingMode rounding, bool exact) { diff --git a/src/frontend/ir/ir_emitter.h b/src/frontend/ir/ir_emitter.h index 09935cf6..5f6d32fc 100644 --- a/src/frontend/ir/ir_emitter.h +++ b/src/frontend/ir/ir_emitter.h @@ -307,7 +307,7 @@ public: U16U32U64 FPNeg(const U16U32U64& a); U32U64 FPRecipEstimate(const U32U64& a); U16U32U64 FPRecipExponent(const U16U32U64& a); - U32U64 FPRecipStepFused(const U32U64& a, const U32U64& b); + U16U32U64 FPRecipStepFused(const U16U32U64& a, const U16U32U64& b); U16U32U64 FPRoundInt(const U16U32U64& a, FP::RoundingMode rounding, bool exact); U32U64 FPRSqrtEstimate(const U32U64& a); U32U64 FPRSqrtStepFused(const U32U64& a, const U32U64& b); diff --git a/src/frontend/ir/microinstruction.cpp b/src/frontend/ir/microinstruction.cpp index a7c3386e..d0ad05b8 100644 --- a/src/frontend/ir/microinstruction.cpp +++ b/src/frontend/ir/microinstruction.cpp @@ -277,6 +277,7 @@ bool Inst::ReadsFromAndWritesToFPSRCumulativeExceptionBits() const { case Opcode::FPRecipExponent16: case Opcode::FPRecipExponent32: case Opcode::FPRecipExponent64: + case Opcode::FPRecipStepFused16: case Opcode::FPRecipStepFused32: case Opcode::FPRecipStepFused64: case Opcode::FPRoundInt16: diff --git a/src/frontend/ir/opcodes.inc b/src/frontend/ir/opcodes.inc index c7a4c227..d266190b 100644 --- a/src/frontend/ir/opcodes.inc +++ b/src/frontend/ir/opcodes.inc @@ -496,6 +496,7 @@ OPCODE(FPRecipEstimate64, U64, U64 OPCODE(FPRecipExponent16, U16, U16 ) OPCODE(FPRecipExponent32, U32, U32 ) OPCODE(FPRecipExponent64, U64, U64 ) +OPCODE(FPRecipStepFused16, U16, U16, U16 ) OPCODE(FPRecipStepFused32, U32, U32, U32 ) OPCODE(FPRecipStepFused64, U64, U64, U64 ) OPCODE(FPRoundInt16, U16, U16, U8, U1 )