backend\A64\emit_a64_data_processing.cpp: Implement 64bit LSL and ROR Instructions

Also EmitTestBit
This commit is contained in:
SachinVin 2019-07-19 20:09:15 +05:30
parent bb70cdd28c
commit 9f227edfe4
2 changed files with 57 additions and 66 deletions

View File

@ -103,15 +103,14 @@ void EmitA64::EmitIsZero64(EmitContext& ctx, IR::Inst* inst) {
ctx.reg_alloc.DefineValue(inst, result);
}
//void EmitA64::EmitTestBit(EmitContext& ctx, IR::Inst* inst) {
// auto args = ctx.reg_alloc.GetArgumentInfo(inst);
// Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(args[0]);
// ASSERT(args[1].IsImmediate());
// // TODO: Flag optimization
// code.bt(result, args[1].GetImmediateU8());
// code.setc(result.cvt8());
// ctx.reg_alloc.DefineValue(inst, result);
//}
void EmitA64::EmitTestBit(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ARM64Reg result = ctx.reg_alloc.UseScratchGpr(args[0]);
ASSERT(args[1].IsImmediate());
// TODO: Flag optimization
code.UBFX(result, result, args[1].GetImmediateU8(), 1);
ctx.reg_alloc.DefineValue(inst, result);
}
static void EmitConditionalSelect(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, int bitsize) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
@ -293,38 +292,31 @@ void EmitA64::EmitLogicalShiftLeft32(EmitContext& ctx, IR::Inst* inst) {
}
}
//void EmitA64::EmitLogicalShiftLeft64(EmitContext& ctx, IR::Inst* inst) {
// auto args = ctx.reg_alloc.GetArgumentInfo(inst);
// auto& operand_arg = args[0];
// auto& shift_arg = args[1];
//
// if (shift_arg.IsImmediate()) {
// Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(operand_arg);
// u8 shift = shift_arg.GetImmediateU8();
//
// if (shift < 64) {
// code.shl(result, shift);
// } else {
// code.xor_(result.cvt32(), result.cvt32());
// }
//
// ctx.reg_alloc.DefineValue(inst, result);
// } else {
// ctx.reg_alloc.Use(shift_arg, HostLoc::RCX);
// Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(operand_arg);
// Xbyak::Reg64 zero = ctx.reg_alloc.ScratchGpr();
//
// // The x64 SHL instruction masks the shift count by 0x1F before performing the shift.
// // ARM differs from the behaviour: It does not mask the count, so shifts above 31 result in zeros.
//
// code.shl(result, code.cl);
// code.xor_(zero.cvt32(), zero.cvt32());
// code.cmp(code.cl, 64);
// code.cmovnb(result, zero);
//
// ctx.reg_alloc.DefineValue(inst, result);
// }
//}
void EmitA64::EmitLogicalShiftLeft64(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
auto& operand_arg = args[0];
auto& shift_arg = args[1];
if (shift_arg.IsImmediate()) {
ARM64Reg result = ctx.reg_alloc.UseScratchGpr(operand_arg);
u8 shift = shift_arg.GetImmediateU8();
if (shift < 64) {
code.LSL(result, result, shift);
} else {
code.MOV(result, ZR);
}
ctx.reg_alloc.DefineValue(inst, result);
} else {
ARM64Reg result = ctx.reg_alloc.UseScratchGpr(operand_arg);
ARM64Reg shift = ctx.reg_alloc.UseGpr(shift_arg);
code.LSLV(result, result, shift);
ctx.reg_alloc.DefineValue(inst, result);
}
}
void EmitA64::EmitLogicalShiftRight32(EmitContext& ctx, IR::Inst* inst) {
auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp);
@ -668,28 +660,27 @@ void EmitA64::EmitRotateRight32(EmitContext& ctx, IR::Inst* inst) {
}
}
//void EmitA64::EmitRotateRight64(EmitContext& ctx, IR::Inst* inst) {
// auto args = ctx.reg_alloc.GetArgumentInfo(inst);
// auto& operand_arg = args[0];
// auto& shift_arg = args[1];
//
// if (shift_arg.IsImmediate()) {
// u8 shift = shift_arg.GetImmediateU8();
// Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(operand_arg);
//
// code.ror(result, u8(shift & 0x3F));
//
// ctx.reg_alloc.DefineValue(inst, result);
// } else {
// ctx.reg_alloc.Use(shift_arg, HostLoc::RCX);
// Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(operand_arg);
//
// // x64 ROR instruction does (shift & 0x3F) for us.
// code.ror(result, code.cl);
//
// ctx.reg_alloc.DefineValue(inst, result);
// }
//}
void EmitA64::EmitRotateRight64(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
auto& operand_arg = args[0];
auto& shift_arg = args[1];
if (shift_arg.IsImmediate()) {
u8 shift = shift_arg.GetImmediateU8();
ARM64Reg result = ctx.reg_alloc.UseScratchGpr(operand_arg);
code.ROR(result, result, u8(shift & 0x3F));
ctx.reg_alloc.DefineValue(inst, result);
} else {
ARM64Reg result = ctx.reg_alloc.UseScratchGpr(operand_arg);
ARM64Reg shift = ctx.reg_alloc.UseGpr(shift_arg);
code.RORV(result, result, shift);
ctx.reg_alloc.DefineValue(inst, result);
}
}
void EmitA64::EmitRotateRightExtended(EmitContext& ctx, IR::Inst* inst) {
auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp);

View File

@ -97,18 +97,18 @@ OPCODE(LeastSignificantByte, U8, U32
OPCODE(MostSignificantBit, U1, U32 )
OPCODE(IsZero32, U1, U32 )
OPCODE(IsZero64, U1, U64 )
//OPCODE(TestBit, U1, U64, U8 )
OPCODE(TestBit, U1, U64, U8 )
OPCODE(ConditionalSelect32, U32, Cond, U32, U32 )
OPCODE(ConditionalSelect64, U64, Cond, U64, U64 )
OPCODE(ConditionalSelectNZCV, NZCV, Cond, NZCV, NZCV )
OPCODE(LogicalShiftLeft32, U32, U32, U8, U1 )
//OPCODE(LogicalShiftLeft64, U64, U64, U8 )
OPCODE(LogicalShiftLeft64, U64, U64, U8 )
OPCODE(LogicalShiftRight32, U32, U32, U8, U1 )
OPCODE(LogicalShiftRight64, U64, U64, U8 )
OPCODE(ArithmeticShiftRight32, U32, U32, U8, U1 )
//OPCODE(ArithmeticShiftRight64, U64, U64, U8 )
OPCODE(RotateRight32, U32, U32, U8, U1 )
//OPCODE(RotateRight64, U64, U64, U8 )
OPCODE(RotateRight64, U64, U64, U8 )
OPCODE(RotateRightExtended, U32, U32, U1 )
OPCODE(Add32, U32, U32, U32, U1 )
OPCODE(Add64, U64, U64, U64, U1 )