aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorYang Liu <[email protected]>2024-02-19 16:31:05 +0800
committerMerry <[email protected]>2024-03-02 19:38:46 +0000
commit8a1179036305a8899de29b7097dcdd35d6ccc248 (patch)
tree1fa91ac137d1207ac48b2d0af7ce7a13cd626558
parent6142db864714572ebab318b71b09f5403352c6b1 (diff)
downloaddynarmic-8a1179036305a8899de29b7097dcdd35d6ccc248.tar.gz
dynarmic-8a1179036305a8899de29b7097dcdd35d6ccc248.zip
backend/rv64: Implement basic Add32
-rw-r--r--src/dynarmic/backend/riscv64/emit_riscv64_data_processing.cpp85
-rw-r--r--src/dynarmic/backend/riscv64/reg_alloc.cpp4
2 files changed, 56 insertions, 33 deletions
diff --git a/src/dynarmic/backend/riscv64/emit_riscv64_data_processing.cpp b/src/dynarmic/backend/riscv64/emit_riscv64_data_processing.cpp
index e7f2c45a..51ed027a 100644
--- a/src/dynarmic/backend/riscv64/emit_riscv64_data_processing.cpp
+++ b/src/dynarmic/backend/riscv64/emit_riscv64_data_processing.cpp
@@ -225,54 +225,59 @@ static void AddImmWithFlags(biscuit::Assembler& as, biscuit::GPR rd, biscuit::GP
imm = static_cast<u32>(imm);
}
if (mcl::bit::sign_extend<12>(imm) == imm) {
- as.ADDIW(rd, rs, imm);
+ bitsize == 32 ? as.ADDIW(rd, rs, imm) : as.ADDI(rd, rs, imm);
} else {
as.LI(Xscratch0, imm);
- as.ADDW(rd, rs, Xscratch0);
+ bitsize == 32 ? as.ADDW(rd, rs, Xscratch0) : as.ADD(rd, rs, Xscratch0);
}
// N
as.SEQZ(flags, rd);
as.SLLI(flags, flags, 30);
-
// Z
as.SLTZ(Xscratch1, rd);
as.SLLI(Xscratch1, Xscratch1, 31);
as.OR(flags, flags, Xscratch1);
- // C
- if (mcl::bit::sign_extend<12>(imm) == imm) {
- as.ADDI(Xscratch1, rs, imm);
- } else {
+ if constexpr (bitsize == 32) {
+ // C
+ if (mcl::bit::sign_extend<12>(imm) == imm) {
+ as.ADDI(Xscratch1, rs, imm);
+ } else {
+ as.ADD(Xscratch1, rs, Xscratch0);
+ }
+ as.SRLI(Xscratch1, Xscratch1, 3);
+ as.LUI(Xscratch0, 0x20000);
+ as.AND(Xscratch1, Xscratch1, Xscratch0);
+ as.OR(flags, flags, Xscratch1);
+ // V
+ as.LI(Xscratch0, imm);
as.ADD(Xscratch1, rs, Xscratch0);
+ as.XOR(Xscratch0, Xscratch0, rs);
+ as.NOT(Xscratch0, Xscratch0);
+ as.XOR(Xscratch1, Xscratch1, rs);
+ as.AND(Xscratch1, Xscratch0, Xscratch1);
+ as.SRLIW(Xscratch1, Xscratch1, 31);
+ as.SLLI(Xscratch1, Xscratch1, 28);
+ as.OR(flags, flags, Xscratch1);
+ } else {
+ UNIMPLEMENTED();
}
- as.SRLI(Xscratch1, Xscratch1, 3);
- as.LUI(Xscratch0, 0x20000);
- as.AND(Xscratch1, Xscratch1, Xscratch0);
- as.OR(flags, flags, Xscratch1);
-
- // V
- as.LI(Xscratch0, imm);
- as.ADD(Xscratch1, rs, Xscratch0);
- as.XOR(Xscratch0, Xscratch0, rs);
- as.NOT(Xscratch0, Xscratch0);
- as.XOR(Xscratch1, Xscratch1, rs);
- as.AND(Xscratch1, Xscratch0, Xscratch1);
- as.SRLIW(Xscratch1, Xscratch1, 31);
- as.SLLI(Xscratch1, Xscratch1, 28);
- as.OR(flags, flags, Xscratch1);
}
-template<size_t bitsize>
-static void EmitSub(biscuit::Assembler& as, EmitContext& ctx, IR::Inst* inst) {
+template<size_t bitsize, bool sub>
+static void EmitAddSub(biscuit::Assembler& as, EmitContext& ctx, IR::Inst* inst) {
const auto nzcv_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetNZCVFromOp);
+ const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
auto Xresult = ctx.reg_alloc.WriteX(inst);
auto Xa = ctx.reg_alloc.ReadX(args[0]);
- if (nzcv_inst) {
+ if (overflow_inst) {
+ UNIMPLEMENTED();
+ } else if (nzcv_inst) {
if (args[1].IsImmediate()) {
const u64 imm = args[1].GetImmediateU64();
@@ -281,9 +286,9 @@ static void EmitSub(biscuit::Assembler& as, EmitContext& ctx, IR::Inst* inst) {
RegAlloc::Realize(Xresult, Xflags, Xa);
if (args[2].GetImmediateU1()) {
- AddImmWithFlags<bitsize>(as, *Xresult, *Xa, ~imm, *Xflags);
+ AddImmWithFlags<bitsize>(as, *Xresult, *Xa, sub ? ~imm : imm + 1, *Xflags);
} else {
- AddImmWithFlags<bitsize>(as, *Xresult, *Xa, -imm, *Xflags);
+ AddImmWithFlags<bitsize>(as, *Xresult, *Xa, sub ? -imm : imm, *Xflags);
}
} else {
UNIMPLEMENTED();
@@ -292,13 +297,31 @@ static void EmitSub(biscuit::Assembler& as, EmitContext& ctx, IR::Inst* inst) {
UNIMPLEMENTED();
}
} else {
- UNIMPLEMENTED();
+ if (args[1].IsImmediate()) {
+ const u64 imm = args[1].GetImmediateU64();
+
+ if (args[2].IsImmediate()) {
+ UNIMPLEMENTED();
+ } else {
+ auto Xnzcv = ctx.reg_alloc.ReadX(args[2]);
+ RegAlloc::Realize(Xresult, Xa, Xnzcv);
+
+ as.LUI(Xscratch0, 0x20000);
+ as.AND(Xscratch0, Xnzcv, Xscratch0);
+ as.SRLI(Xscratch0, Xscratch0, 29);
+ as.LI(Xscratch1, imm);
+ as.ADD(Xscratch0, Xscratch0, Xscratch1);
+ as.ADDW(Xresult, Xa, Xscratch0);
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
}
}
template<>
-void EmitIR<IR::Opcode::Add32>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
- UNIMPLEMENTED();
+void EmitIR<IR::Opcode::Add32>(biscuit::Assembler& as, EmitContext& ctx, IR::Inst* inst) {
+ EmitAddSub<32, false>(as, ctx, inst);
}
template<>
@@ -308,7 +331,7 @@ void EmitIR<IR::Opcode::Add64>(biscuit::Assembler&, EmitContext&, IR::Inst*) {
template<>
void EmitIR<IR::Opcode::Sub32>(biscuit::Assembler& as, EmitContext& ctx, IR::Inst* inst) {
- EmitSub<32>(as, ctx, inst);
+ EmitAddSub<32, true>(as, ctx, inst);
}
template<>
diff --git a/src/dynarmic/backend/riscv64/reg_alloc.cpp b/src/dynarmic/backend/riscv64/reg_alloc.cpp
index e8a0a64f..6c89d7b0 100644
--- a/src/dynarmic/backend/riscv64/reg_alloc.cpp
+++ b/src/dynarmic/backend/riscv64/reg_alloc.cpp
@@ -197,7 +197,7 @@ u32 RegAlloc::RealizeReadImpl(const IR::Value& value) {
// ASSERT size fits
break;
case HostLoc::Kind::Spill:
- as.LD(biscuit::GPR{new_location_index}, spill_offset + new_location_index * spill_slot_size, biscuit::sp);
+ as.LD(biscuit::GPR{new_location_index}, spill_offset + current_location->index * spill_slot_size, biscuit::sp);
break;
}
@@ -216,7 +216,7 @@ u32 RegAlloc::RealizeReadImpl(const IR::Value& value) {
ASSERT_FALSE("Logic error");
break;
case HostLoc::Kind::Spill:
- as.FLD(biscuit::FPR{new_location_index}, spill_offset + new_location_index * spill_slot_size, biscuit::sp);
+ as.FLD(biscuit::FPR{new_location_index}, spill_offset + current_location->index * spill_slot_size, biscuit::sp);
break;
}