diff --git a/src/backend_x64/a64_emit_x64.cpp b/src/backend_x64/a64_emit_x64.cpp
index 9397c67e..718fcc9c 100644
--- a/src/backend_x64/a64_emit_x64.cpp
+++ b/src/backend_x64/a64_emit_x64.cpp
@@ -180,8 +180,8 @@ void A64EmitX64::EmitA64SetX(A64EmitContext& ctx, IR::Inst* inst) {
     auto args = ctx.reg_alloc.GetArgumentInfo(inst);
     A64::Reg reg = inst->GetArg(0).GetA64RegRef();
     auto addr = qword[r15 + offsetof(A64JitState, reg) + sizeof(u64) * static_cast<size_t>(reg)];
-    if (args[1].FitsInImmediateU32()) {
-        code->mov(addr, args[1].GetImmediateU32());
+    if (args[1].FitsInImmediateS32()) {
+        code->mov(addr, args[1].GetImmediateS32());
     } else if (args[1].IsInXmm()) {
         Xbyak::Xmm to_store = ctx.reg_alloc.UseXmm(args[1]);
         code->movq(addr, to_store);
diff --git a/src/backend_x64/emit_x64.cpp b/src/backend_x64/emit_x64.cpp
index 0cf2b1ec..09d5cf6c 100644
--- a/src/backend_x64/emit_x64.cpp
+++ b/src/backend_x64/emit_x64.cpp
@@ -993,8 +993,8 @@ void EmitX64<JST>::EmitAnd64(EmitContext& ctx, IR::Inst* inst) {
 
     Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(args[0]);
 
-    if (args[1].FitsInImmediateU32()) {
-        u32 op_arg = args[1].GetImmediateU32();
+    if (args[1].FitsInImmediateS32()) {
+        u32 op_arg = u32(args[1].GetImmediateS32());
 
         code->and_(result, op_arg);
     } else {
@@ -1033,8 +1033,8 @@ void EmitX64<JST>::EmitEor64(EmitContext& ctx, IR::Inst* inst) {
 
     Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(args[0]);
 
-    if (args[1].FitsInImmediateU32()) {
-        u32 op_arg = args[1].GetImmediateU32();
+    if (args[1].FitsInImmediateS32()) {
+        u32 op_arg = u32(args[1].GetImmediateS32());
 
         code->xor_(result, op_arg);
     } else {
@@ -1073,8 +1073,8 @@ void EmitX64<JST>::EmitOr64(EmitContext& ctx, IR::Inst* inst) {
 
     Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(args[0]);
 
-    if (args[1].FitsInImmediateU32()) {
-        u32 op_arg = args[1].GetImmediateU32();
+    if (args[1].FitsInImmediateS32()) {
+        u32 op_arg = u32(args[1].GetImmediateS32());
 
         code->or_(result, op_arg);
     } else {
diff --git a/src/backend_x64/reg_alloc.cpp b/src/backend_x64/reg_alloc.cpp
index 25c099ca..8104e665 100644
--- a/src/backend_x64/reg_alloc.cpp
+++ b/src/backend_x64/reg_alloc.cpp
@@ -107,6 +107,13 @@ bool Argument::FitsInImmediateU32() const {
     return imm < 0x100000000;
 }
 
+bool Argument::FitsInImmediateS32() const {
+    if (!IsImmediate())
+        return false;
+    s64 imm = static_cast<s64>(ImmediateToU64(value));
+    return -s64(0x80000000) <= imm && imm <= s64(0x7FFFFFFF);
+}
+
 bool Argument::GetImmediateU1() const {
     return value.GetU1();
 }
@@ -129,19 +136,31 @@ u32 Argument::GetImmediateU32() const {
     return u32(imm);
 }
 
+u64 Argument::GetImmediateS32() const {
+    ASSERT(FitsInImmediateS32());
+    u64 imm = ImmediateToU64(value);
+    return imm;
+}
+
 u64 Argument::GetImmediateU64() const {
     return ImmediateToU64(value);
 }
 
 bool Argument::IsInGpr() const {
+    if (IsImmediate())
+        return false;
     return HostLocIsGPR(*reg_alloc.ValueLocation(value.GetInst()));
 }
 
 bool Argument::IsInXmm() const {
+    if (IsImmediate())
+        return false;
     return HostLocIsXMM(*reg_alloc.ValueLocation(value.GetInst()));
 }
 
 bool Argument::IsInMemory() const {
+    if (IsImmediate())
+        return false;
     return HostLocIsSpill(*reg_alloc.ValueLocation(value.GetInst()));
 }
 
diff --git a/src/backend_x64/reg_alloc.h b/src/backend_x64/reg_alloc.h
index 917151ce..e56584aa 100644
--- a/src/backend_x64/reg_alloc.h
+++ b/src/backend_x64/reg_alloc.h
@@ -57,11 +57,13 @@ public:
     bool IsImmediate() const;
 
     bool FitsInImmediateU32() const;
+    bool FitsInImmediateS32() const;
 
     bool GetImmediateU1() const;
     u8 GetImmediateU8() const;
     u16 GetImmediateU16() const;
     u32 GetImmediateU32() const;
+    u64 GetImmediateS32() const;
     u64 GetImmediateU64() const;
 
     /// Is this value currently in a GPR?