From d32d6fe598b8ad3153021ee3f241df0580ea0321 Mon Sep 17 00:00:00 2001
From: MerryMage <MerryMage@users.noreply.github.com>
Date: Sun, 24 Mar 2019 11:42:22 +0000
Subject: [PATCH] emit_x64_floating_point: F16C implementation of
 FPHalfToSingle and FPHalfToDouble

---
 src/backend/x64/emit_x64_floating_point.cpp | 33 +++++++++++++++++++++
 1 file changed, 33 insertions(+)

diff --git a/src/backend/x64/emit_x64_floating_point.cpp b/src/backend/x64/emit_x64_floating_point.cpp
index 4bbdedbd..e4d97e86 100644
--- a/src/backend/x64/emit_x64_floating_point.cpp
+++ b/src/backend/x64/emit_x64_floating_point.cpp
@@ -1062,6 +1062,21 @@ void EmitX64::EmitFPHalfToDouble(EmitContext& ctx, IR::Inst* inst) {
     auto args = ctx.reg_alloc.GetArgumentInfo(inst);
     const auto rounding_mode = static_cast<FP::RoundingMode>(args[1].GetImmediateU8());
 
+    if (code.DoesCpuSupport(Xbyak::util::Cpu::tF16C) && !ctx.FPCR().AHP() && !ctx.FPCR().FZ16()) {
+        const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm();
+        const Xbyak::Xmm value = ctx.reg_alloc.UseXmm(args[0]);
+
+        // Double-conversion here is acceptable as this is expanding precision.
+        code.vcvtph2ps(result, value);
+        code.vcvtps2pd(result, result);
+        if (ctx.FPCR().DN()) {
+            ForceToDefaultNaN<64>(code, result);
+        }
+
+        ctx.reg_alloc.DefineValue(inst, result);
+        return;
+    }
+
     ctx.reg_alloc.HostCall(inst, args[0]);
     code.mov(code.ABI_PARAM2.cvt32(), ctx.FPCR().Value());
     code.mov(code.ABI_PARAM3.cvt32(), static_cast<u32>(rounding_mode));
@@ -1073,6 +1088,19 @@ void EmitX64::EmitFPHalfToSingle(EmitContext& ctx, IR::Inst* inst) {
     auto args = ctx.reg_alloc.GetArgumentInfo(inst);
     const auto rounding_mode = static_cast<FP::RoundingMode>(args[1].GetImmediateU8());
 
+    if (code.DoesCpuSupport(Xbyak::util::Cpu::tF16C) && !ctx.FPCR().AHP() && !ctx.FPCR().FZ16()) {
+        const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm();
+        const Xbyak::Xmm value = ctx.reg_alloc.UseXmm(args[0]);
+
+        code.vcvtph2ps(result, value);
+        if (ctx.FPCR().DN()) {
+            ForceToDefaultNaN<32>(code, result);
+        }
+
+        ctx.reg_alloc.DefineValue(inst, result);
+        return;
+    }
+
     ctx.reg_alloc.HostCall(inst, args[0]);
     code.mov(code.ABI_PARAM2.cvt32(), ctx.FPCR().Value());
     code.mov(code.ABI_PARAM3.cvt32(), static_cast<u32>(rounding_mode));
@@ -1106,6 +1134,8 @@ void EmitX64::EmitFPSingleToHalf(EmitContext& ctx, IR::Inst* inst) {
     auto args = ctx.reg_alloc.GetArgumentInfo(inst);
     const auto rounding_mode = static_cast<FP::RoundingMode>(args[1].GetImmediateU8());
 
+    // TODO: F16C implementation requires ForceToDefaultNaN<16> to be implemented.
+
     ctx.reg_alloc.HostCall(inst, args[0]);
     code.mov(code.ABI_PARAM2.cvt32(), ctx.FPCR().Value());
     code.mov(code.ABI_PARAM3.cvt32(), static_cast<u32>(rounding_mode));
@@ -1117,6 +1147,9 @@ void EmitX64::EmitFPDoubleToHalf(EmitContext& ctx, IR::Inst* inst) {
     auto args = ctx.reg_alloc.GetArgumentInfo(inst);
     const auto rounding_mode = static_cast<FP::RoundingMode>(args[1].GetImmediateU8());
 
+    // NOTE: Do not double-convert here as that is inaccurate.
+    //       To be accurate, the first conversion would need to be "round-to-odd", which x64 doesn't support.
+
     ctx.reg_alloc.HostCall(inst, args[0]);
     code.mov(code.ABI_PARAM2.cvt32(), ctx.FPCR().Value());
     code.mov(code.ABI_PARAM3.cvt32(), static_cast<u32>(rounding_mode));