backend\A64\emit_a64_floating_point.cpp: Implement VADD VSUB VMUL and other stuff
This commit is contained in:
parent
4459188bfc
commit
410dcf87a5
@ -352,6 +352,29 @@ void EmitA64::EmitFPNeg64(EmitContext& ctx, IR::Inst* inst) {
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
}
|
||||
|
||||
void EmitA64::EmitFPAdd32(EmitContext& ctx, IR::Inst* inst) {
|
||||
FPThreeOp<32, void(Arm64Gen::ARM64FloatEmitter::*)(ARM64Reg, ARM64Reg, ARM64Reg)>(code, ctx, inst, &Arm64Gen::ARM64FloatEmitter::FADD);
|
||||
}
|
||||
|
||||
void EmitA64::EmitFPAdd64(EmitContext& ctx, IR::Inst* inst) {
|
||||
FPThreeOp<64, void(Arm64Gen::ARM64FloatEmitter::*)(ARM64Reg, ARM64Reg, ARM64Reg)>(code, ctx, inst, &Arm64Gen::ARM64FloatEmitter::FADD);
|
||||
}
|
||||
|
||||
void EmitA64::EmitFPDiv32(EmitContext& ctx, IR::Inst* inst) {
|
||||
FPThreeOp<32, void(Arm64Gen::ARM64FloatEmitter::*)(ARM64Reg, ARM64Reg, ARM64Reg)>(code, ctx, inst, &Arm64Gen::ARM64FloatEmitter::FDIV);
|
||||
}
|
||||
|
||||
void EmitA64::EmitFPDiv64(EmitContext& ctx, IR::Inst* inst) {
|
||||
FPThreeOp<64, void(Arm64Gen::ARM64FloatEmitter::*)(ARM64Reg, ARM64Reg, ARM64Reg)>(code, ctx, inst, &Arm64Gen::ARM64FloatEmitter::FDIV);
|
||||
}
|
||||
|
||||
void EmitA64::EmitFPMul32(EmitContext& ctx, IR::Inst* inst) {
|
||||
FPThreeOp<32, void(Arm64Gen::ARM64FloatEmitter::*)(ARM64Reg, ARM64Reg, ARM64Reg)>(code, ctx, inst, &Arm64Gen::ARM64FloatEmitter::FMUL);
|
||||
}
|
||||
|
||||
void EmitA64::EmitFPMul64(EmitContext& ctx, IR::Inst* inst) {
|
||||
FPThreeOp<64, void(Arm64Gen::ARM64FloatEmitter::*)(ARM64Reg, ARM64Reg, ARM64Reg)>(code, ctx, inst, &Arm64Gen::ARM64FloatEmitter::FMUL);
|
||||
}
|
||||
void EmitA64::EmitFPSqrt32(EmitContext& ctx, IR::Inst* inst) {
|
||||
FPTwoOp<32>(code, ctx, inst, &Arm64Gen::ARM64FloatEmitter::FSQRT);
|
||||
}
|
||||
@ -360,6 +383,14 @@ void EmitA64::EmitFPSqrt64(EmitContext& ctx, IR::Inst* inst) {
|
||||
FPTwoOp<64>(code, ctx, inst, &Arm64Gen::ARM64FloatEmitter::FSQRT);
|
||||
}
|
||||
|
||||
void EmitA64::EmitFPSub32(EmitContext& ctx, IR::Inst* inst) {
|
||||
FPThreeOp<32, void(Arm64Gen::ARM64FloatEmitter::*)(ARM64Reg, ARM64Reg, ARM64Reg)>(code, ctx, inst, &Arm64Gen::ARM64FloatEmitter::FSUB);
|
||||
}
|
||||
|
||||
void EmitA64::EmitFPSub64(EmitContext& ctx, IR::Inst* inst) {
|
||||
FPThreeOp<64, void(Arm64Gen::ARM64FloatEmitter::*)(ARM64Reg, ARM64Reg, ARM64Reg)>(code, ctx, inst, &Arm64Gen::ARM64FloatEmitter::FSUB);
|
||||
}
|
||||
|
||||
static ARM64Reg SetFpscrNzcvFromFlags(BlockOfCode& code, EmitContext& ctx) {
|
||||
ARM64Reg nzcv = ctx.reg_alloc.ScratchGpr();
|
||||
// Fpsr's nzcv is copied across integer nzcv
|
||||
|
@ -465,14 +465,14 @@ OPCODE(CountLeadingZeros32, U32, U32
|
||||
|
||||
// Floating-point operations
|
||||
//OPCODE(FPAbs16, U16, U16 )
|
||||
//OPCODE(FPAdd32, U32, U32, U32 )
|
||||
//OPCODE(FPAdd64, U64, U64, U64 )
|
||||
//OPCODE(FPDiv32, U32, U32, U32 )
|
||||
//OPCODE(FPDiv64, U64, U64, U64 )
|
||||
OPCODE(FPAbs32, U32, U32 )
|
||||
OPCODE(FPAbs64, U64, U64 )
|
||||
OPCODE(FPAdd32, U32, U32, U32 )
|
||||
OPCODE(FPAdd64, U64, U64, U64 )
|
||||
OPCODE(FPCompare32, NZCV, U32, U32, U1 )
|
||||
OPCODE(FPCompare64, NZCV, U64, U64, U1 )
|
||||
OPCODE(FPDiv32, U32, U32, U32 )
|
||||
OPCODE(FPDiv64, U64, U64, U64 )
|
||||
//OPCODE(FPMax32, U32, U32, U32 )
|
||||
//OPCODE(FPMax64, U64, U64, U64 )
|
||||
//OPCODE(FPMaxNumeric32, U32, U32, U32 )
|
||||
@ -481,8 +481,8 @@ OPCODE(FPCompare64, NZCV, U64,
|
||||
//OPCODE(FPMin64, U64, U64, U64 )
|
||||
//OPCODE(FPMinNumeric32, U32, U32, U32 )
|
||||
//OPCODE(FPMinNumeric64, U64, U64, U64 )
|
||||
//OPCODE(FPMul32, U32, U32, U32 )
|
||||
//OPCODE(FPMul64, U64, U64, U64 )
|
||||
OPCODE(FPMul32, U32, U32, U32 )
|
||||
OPCODE(FPMul64, U64, U64, U64 )
|
||||
//OPCODE(FPMulAdd16, U16, U16, U16, U16 )
|
||||
//OPCODE(FPMulAdd32, U32, U32, U32, U32 )
|
||||
//OPCODE(FPMulAdd64, U64, U64, U64, U64 )
|
||||
@ -509,10 +509,10 @@ OPCODE(FPNeg64, U64, U64
|
||||
//OPCODE(FPRSqrtStepFused16, U16, U16, U16 )
|
||||
//OPCODE(FPRSqrtStepFused32, U32, U32, U32 )
|
||||
//OPCODE(FPRSqrtStepFused64, U64, U64, U64 )
|
||||
//OPCODE(FPSub32, U32, U32, U32 )
|
||||
//OPCODE(FPSub64, U64, U64, U64 )
|
||||
OPCODE(FPSqrt32, U32, U32 )
|
||||
OPCODE(FPSqrt64, U64, U64 )
|
||||
OPCODE(FPSub32, U32, U32, U32 )
|
||||
OPCODE(FPSub64, U64, U64, U64 )
|
||||
|
||||
// Floating-point conversions
|
||||
OPCODE(FPHalfToDouble, U64, U16, U8 )
|
||||
|
@ -1,15 +1,15 @@
|
||||
// cccc1110________----101-__-0----
|
||||
|
||||
// Floating-point three-register data processing instructions
|
||||
//INST(vfp_VMLA, "VMLA", "cccc11100D00nnnndddd101zN0M0mmmm") // VFPv2
|
||||
//INST(vfp_VMLS, "VMLS", "cccc11100D00nnnndddd101zN1M0mmmm") // VFPv2
|
||||
//INST(vfp_VNMLS, "VNMLS", "cccc11100D01nnnndddd101zN0M0mmmm") // VFPv2
|
||||
//INST(vfp_VNMLA, "VNMLA", "cccc11100D01nnnndddd101zN1M0mmmm") // VFPv2
|
||||
//INST(vfp_VMUL, "VMUL", "cccc11100D10nnnndddd101zN0M0mmmm") // VFPv2
|
||||
//INST(vfp_VNMUL, "VNMUL", "cccc11100D10nnnndddd101zN1M0mmmm") // VFPv2
|
||||
//INST(vfp_VADD, "VADD", "cccc11100D11nnnndddd101zN0M0mmmm") // VFPv2
|
||||
//INST(vfp_VSUB, "VSUB", "cccc11100D11nnnndddd101zN1M0mmmm") // VFPv2
|
||||
//INST(vfp_VDIV, "VDIV", "cccc11101D00nnnndddd101zN0M0mmmm") // VFPv2
|
||||
INST(vfp_VMLA, "VMLA", "cccc11100D00nnnndddd101zN0M0mmmm") // VFPv2
|
||||
INST(vfp_VMLS, "VMLS", "cccc11100D00nnnndddd101zN1M0mmmm") // VFPv2
|
||||
INST(vfp_VNMLS, "VNMLS", "cccc11100D01nnnndddd101zN0M0mmmm") // VFPv2
|
||||
INST(vfp_VNMLA, "VNMLA", "cccc11100D01nnnndddd101zN1M0mmmm") // VFPv2
|
||||
INST(vfp_VMUL, "VMUL", "cccc11100D10nnnndddd101zN0M0mmmm") // VFPv2
|
||||
INST(vfp_VNMUL, "VNMUL", "cccc11100D10nnnndddd101zN1M0mmmm") // VFPv2
|
||||
INST(vfp_VADD, "VADD", "cccc11100D11nnnndddd101zN0M0mmmm") // VFPv2
|
||||
INST(vfp_VSUB, "VSUB", "cccc11100D11nnnndddd101zN1M0mmmm") // VFPv2
|
||||
INST(vfp_VDIV, "VDIV", "cccc11101D00nnnndddd101zN0M0mmmm") // VFPv2
|
||||
|
||||
// Floating-point move instructions
|
||||
INST(vfp_VMOV_u32_f64, "VMOV (core to f64)", "cccc11100000ddddtttt1011D0010000") // VFPv2
|
||||
|
Loading…
x
Reference in New Issue
Block a user