summaryrefslogtreecommitdiffstats
path: root/src/core/arm/dynarmic
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/arm/dynarmic')
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.cpp148
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.h19
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.cpp202
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.h18
-rw-r--r--src/core/arm/dynarmic/arm_exclusive_monitor.h2
5 files changed, 249 insertions, 140 deletions
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
index 286976623..da5659046 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
@@ -25,6 +25,9 @@ namespace Core {
using namespace Common::Literals;
+constexpr Dynarmic::HaltReason break_loop = Dynarmic::HaltReason::UserDefined2;
+constexpr Dynarmic::HaltReason svc_call = Dynarmic::HaltReason::UserDefined3;
+
class DynarmicCallbacks32 : public Dynarmic::A32::UserCallbacks {
public:
explicit DynarmicCallbacks32(ARM_Dynarmic_32& parent_)
@@ -70,11 +73,13 @@ public:
}
void InterpreterFallback(u32 pc, std::size_t num_instructions) override {
+ parent.LogBacktrace();
UNIMPLEMENTED_MSG("This should never happen, pc = {:08X}, code = {:08X}", pc,
MemoryReadCode(pc));
}
void ExceptionRaised(u32 pc, Dynarmic::A32::Exception exception) override {
+ parent.LogBacktrace();
LOG_CRITICAL(Core_ARM,
"ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X}, thumb = {})",
exception, pc, MemoryReadCode(pc), parent.IsInThumbMode());
@@ -82,15 +87,13 @@ public:
}
void CallSVC(u32 swi) override {
- parent.svc_called = true;
parent.svc_swi = swi;
- parent.jit->HaltExecution();
+ parent.jit.load()->HaltExecution(svc_call);
}
void AddTicks(u64 ticks) override {
- if (parent.uses_wall_clock) {
- return;
- }
+ ASSERT_MSG(!parent.uses_wall_clock, "This should never happen - dynarmic ticking disabled");
+
// Divide the number of ticks by the amount of CPU cores. TODO(Subv): This yields only a
// rough approximation of the amount of executed ticks in the system, it may be thrown off
// if not all cores are doing a similar amount of work. Instead of doing this, we should
@@ -106,12 +109,8 @@ public:
}
u64 GetTicksRemaining() override {
- if (parent.uses_wall_clock) {
- if (!parent.interrupt_handlers[parent.core_index].IsInterrupted()) {
- return minimum_run_cycles;
- }
- return 0U;
- }
+ ASSERT_MSG(!parent.uses_wall_clock, "This should never happen - dynarmic ticking disabled");
+
return std::max<s64>(parent.system.CoreTiming().GetDowncount(), 0);
}
@@ -146,11 +145,19 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
// Timing
config.wall_clock_cntpct = uses_wall_clock;
+ config.enable_cycle_counting = !uses_wall_clock;
// Code cache size
config.code_cache_size = 512_MiB;
config.far_code_offset = 400_MiB;
+ // null_jit
+ if (!page_table) {
+ // Don't waste too much memory on null_jit
+ config.code_cache_size = 8_MiB;
+ config.far_code_offset = 4_MiB;
+ }
+
// Safe optimizations
if (Settings::values.cpu_debug_mode) {
if (!Settings::values.cpuopt_page_tables) {
@@ -186,35 +193,41 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
if (!Settings::values.cpuopt_recompile_exclusives) {
config.recompile_on_exclusive_fastmem_failure = false;
}
- }
+ } else {
+ // Unsafe optimizations
+ if (Settings::values.cpu_accuracy.GetValue() == Settings::CPUAccuracy::Unsafe) {
+ config.unsafe_optimizations = true;
+ if (Settings::values.cpuopt_unsafe_unfuse_fma) {
+ config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_UnfuseFMA;
+ }
+ if (Settings::values.cpuopt_unsafe_reduce_fp_error) {
+ config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_ReducedErrorFP;
+ }
+ if (Settings::values.cpuopt_unsafe_ignore_standard_fpcr) {
+ config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_IgnoreStandardFPCRValue;
+ }
+ if (Settings::values.cpuopt_unsafe_inaccurate_nan) {
+ config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_InaccurateNaN;
+ }
+ if (Settings::values.cpuopt_unsafe_ignore_global_monitor) {
+ config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_IgnoreGlobalMonitor;
+ }
+ }
- // Unsafe optimizations
- if (Settings::values.cpu_accuracy.GetValue() == Settings::CPUAccuracy::Unsafe) {
- config.unsafe_optimizations = true;
- if (Settings::values.cpuopt_unsafe_unfuse_fma) {
+ // Curated optimizations
+ if (Settings::values.cpu_accuracy.GetValue() == Settings::CPUAccuracy::Auto) {
+ config.unsafe_optimizations = true;
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_UnfuseFMA;
- }
- if (Settings::values.cpuopt_unsafe_reduce_fp_error) {
- config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_ReducedErrorFP;
- }
- if (Settings::values.cpuopt_unsafe_ignore_standard_fpcr) {
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_IgnoreStandardFPCRValue;
- }
- if (Settings::values.cpuopt_unsafe_inaccurate_nan) {
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_InaccurateNaN;
- }
- if (Settings::values.cpuopt_unsafe_ignore_global_monitor) {
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_IgnoreGlobalMonitor;
}
- }
- // Curated optimizations
- if (Settings::values.cpu_accuracy.GetValue() == Settings::CPUAccuracy::Auto) {
- config.unsafe_optimizations = true;
- config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_UnfuseFMA;
- config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_IgnoreStandardFPCRValue;
- config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_InaccurateNaN;
- config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_IgnoreGlobalMonitor;
+ // Paranoia mode for debugging optimizations
+ if (Settings::values.cpu_accuracy.GetValue() == Settings::CPUAccuracy::Paranoid) {
+ config.unsafe_optimizations = false;
+ config.optimizations = Dynarmic::no_optimizations;
+ }
}
return std::make_unique<Dynarmic::A32::Jit>(config);
@@ -222,20 +235,18 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
void ARM_Dynarmic_32::Run() {
while (true) {
- jit->Run();
- if (!svc_called) {
- break;
+ const auto hr = jit.load()->Run();
+ if (Has(hr, svc_call)) {
+ Kernel::Svc::Call(system, svc_swi);
}
- svc_called = false;
- Kernel::Svc::Call(system, svc_swi);
- if (shutdown) {
+ if (Has(hr, break_loop) || !uses_wall_clock) {
break;
}
}
}
void ARM_Dynarmic_32::Step() {
- jit->Step();
+ jit.load()->Step();
}
ARM_Dynarmic_32::ARM_Dynarmic_32(System& system_, CPUInterrupts& interrupt_handlers_,
@@ -245,24 +256,28 @@ ARM_Dynarmic_32::ARM_Dynarmic_32(System& system_, CPUInterrupts& interrupt_handl
cb(std::make_unique<DynarmicCallbacks32>(*this)),
cp15(std::make_shared<DynarmicCP15>(*this)), core_index{core_index_},
exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor_)},
- jit(MakeJit(nullptr)) {}
+ null_jit{MakeJit(nullptr)}, jit{null_jit.get()} {}
ARM_Dynarmic_32::~ARM_Dynarmic_32() = default;
void ARM_Dynarmic_32::SetPC(u64 pc) {
- jit->Regs()[15] = static_cast<u32>(pc);
+ jit.load()->Regs()[15] = static_cast<u32>(pc);
}
u64 ARM_Dynarmic_32::GetPC() const {
- return jit->Regs()[15];
+ return jit.load()->Regs()[15];
+}
+
+u64 ARM_Dynarmic_32::GetSP() const {
+ return jit.load()->Regs()[13];
}
u64 ARM_Dynarmic_32::GetReg(int index) const {
- return jit->Regs()[index];
+ return jit.load()->Regs()[index];
}
void ARM_Dynarmic_32::SetReg(int index, u64 value) {
- jit->Regs()[index] = static_cast<u32>(value);
+ jit.load()->Regs()[index] = static_cast<u32>(value);
}
u128 ARM_Dynarmic_32::GetVectorReg(int index) const {
@@ -272,11 +287,11 @@ u128 ARM_Dynarmic_32::GetVectorReg(int index) const {
void ARM_Dynarmic_32::SetVectorReg(int index, u128 value) {}
u32 ARM_Dynarmic_32::GetPSTATE() const {
- return jit->Cpsr();
+ return jit.load()->Cpsr();
}
void ARM_Dynarmic_32::SetPSTATE(u32 cpsr) {
- jit->SetCpsr(cpsr);
+ jit.load()->SetCpsr(cpsr);
}
u64 ARM_Dynarmic_32::GetTlsAddress() const {
@@ -297,7 +312,7 @@ void ARM_Dynarmic_32::SetTPIDR_EL0(u64 value) {
void ARM_Dynarmic_32::SaveContext(ThreadContext32& ctx) {
Dynarmic::A32::Context context;
- jit->SaveContext(context);
+ jit.load()->SaveContext(context);
ctx.cpu_registers = context.Regs();
ctx.extension_registers = context.ExtRegs();
ctx.cpsr = context.Cpsr();
@@ -310,24 +325,27 @@ void ARM_Dynarmic_32::LoadContext(const ThreadContext32& ctx) {
context.ExtRegs() = ctx.extension_registers;
context.SetCpsr(ctx.cpsr);
context.SetFpscr(ctx.fpscr);
- jit->LoadContext(context);
+ jit.load()->LoadContext(context);
}
void ARM_Dynarmic_32::PrepareReschedule() {
- jit->HaltExecution();
- shutdown = true;
+ jit.load()->HaltExecution(break_loop);
+}
+
+void ARM_Dynarmic_32::SignalInterrupt() {
+ jit.load()->HaltExecution(break_loop);
}
void ARM_Dynarmic_32::ClearInstructionCache() {
- jit->ClearCache();
+ jit.load()->ClearCache();
}
void ARM_Dynarmic_32::InvalidateCacheRange(VAddr addr, std::size_t size) {
- jit->InvalidateCacheRange(static_cast<u32>(addr), size);
+ jit.load()->InvalidateCacheRange(static_cast<u32>(addr), size);
}
void ARM_Dynarmic_32::ClearExclusiveState() {
- jit->ClearExclusiveState();
+ jit.load()->ClearExclusiveState();
}
void ARM_Dynarmic_32::PageTableChanged(Common::PageTable& page_table,
@@ -338,13 +356,29 @@ void ARM_Dynarmic_32::PageTableChanged(Common::PageTable& page_table,
auto key = std::make_pair(&page_table, new_address_space_size_in_bits);
auto iter = jit_cache.find(key);
if (iter != jit_cache.end()) {
- jit = iter->second;
+ jit.store(iter->second.get());
LoadContext(ctx);
return;
}
- jit = MakeJit(&page_table);
+ std::shared_ptr new_jit = MakeJit(&page_table);
+ jit.store(new_jit.get());
LoadContext(ctx);
- jit_cache.emplace(key, jit);
+ jit_cache.emplace(key, std::move(new_jit));
+}
+
+std::vector<ARM_Interface::BacktraceEntry> ARM_Dynarmic_32::GetBacktrace(Core::System& system,
+ u64 sp, u64 lr) {
+ // No way to get accurate stack traces in A32 yet
+ return {};
+}
+
+std::vector<ARM_Interface::BacktraceEntry> ARM_Dynarmic_32::GetBacktraceFromContext(
+ System& system, const ThreadContext32& ctx) {
+ return GetBacktrace(system, ctx.cpu_registers[13], ctx.cpu_registers[14]);
+}
+
+std::vector<ARM_Interface::BacktraceEntry> ARM_Dynarmic_32::GetBacktrace() const {
+ return GetBacktrace(system, GetReg(13), GetReg(14));
}
} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.h b/src/core/arm/dynarmic/arm_dynarmic_32.h
index 5d47b600d..1b628f94d 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.h
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.h
@@ -4,6 +4,7 @@
#pragma once
+#include <atomic>
#include <memory>
#include <unordered_map>
@@ -34,6 +35,7 @@ public:
void SetPC(u64 pc) override;
u64 GetPC() const override;
+ u64 GetSP() const override;
u64 GetReg(int index) const override;
void SetReg(int index, u64 value) override;
u128 GetVectorReg(int index) const override;
@@ -57,6 +59,7 @@ public:
void LoadContext(const ThreadContext64& ctx) override {}
void PrepareReschedule() override;
+ void SignalInterrupt() override;
void ClearExclusiveState() override;
void ClearInstructionCache() override;
@@ -64,9 +67,16 @@ public:
void PageTableChanged(Common::PageTable& new_page_table,
std::size_t new_address_space_size_in_bits) override;
+ static std::vector<BacktraceEntry> GetBacktraceFromContext(System& system,
+ const ThreadContext32& ctx);
+
+ std::vector<BacktraceEntry> GetBacktrace() const override;
+
private:
std::shared_ptr<Dynarmic::A32::Jit> MakeJit(Common::PageTable* page_table) const;
+ static std::vector<BacktraceEntry> GetBacktrace(Core::System& system, u64 sp, u64 lr);
+
using JitCacheKey = std::pair<Common::PageTable*, std::size_t>;
using JitCacheType =
std::unordered_map<JitCacheKey, std::shared_ptr<Dynarmic::A32::Jit>, Common::PairHash>;
@@ -79,13 +89,14 @@ private:
std::shared_ptr<DynarmicCP15> cp15;
std::size_t core_index;
DynarmicExclusiveMonitor& exclusive_monitor;
- std::shared_ptr<Dynarmic::A32::Jit> jit;
+
+ std::shared_ptr<Dynarmic::A32::Jit> null_jit;
+
+ // A raw pointer here is fine; we never delete Jit instances.
+ std::atomic<Dynarmic::A32::Jit*> jit;
// SVC callback
u32 svc_swi{};
- bool svc_called{};
-
- bool shutdown{};
};
} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
index d96226c41..871d9d10e 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
@@ -26,6 +26,9 @@ namespace Core {
using Vector = Dynarmic::A64::Vector;
using namespace Common::Literals;
+constexpr Dynarmic::HaltReason break_loop = Dynarmic::HaltReason::UserDefined2;
+constexpr Dynarmic::HaltReason svc_call = Dynarmic::HaltReason::UserDefined3;
+
class DynarmicCallbacks64 : public Dynarmic::A64::UserCallbacks {
public:
explicit DynarmicCallbacks64(ARM_Dynarmic_64& parent_)
@@ -81,6 +84,7 @@ public:
}
void InterpreterFallback(u64 pc, std::size_t num_instructions) override {
+ parent.LogBacktrace();
LOG_ERROR(Core_ARM,
"Unimplemented instruction @ 0x{:X} for {} instructions (instr = {:08X})", pc,
num_instructions, MemoryReadCode(pc));
@@ -93,17 +97,19 @@ public:
static constexpr u64 ICACHE_LINE_SIZE = 64;
const u64 cache_line_start = value & ~(ICACHE_LINE_SIZE - 1);
- parent.InvalidateCacheRange(cache_line_start, ICACHE_LINE_SIZE);
+ parent.system.InvalidateCpuInstructionCacheRange(cache_line_start, ICACHE_LINE_SIZE);
break;
}
case Dynarmic::A64::InstructionCacheOperation::InvalidateAllToPoU:
- parent.ClearInstructionCache();
+ parent.system.InvalidateCpuInstructionCaches();
break;
case Dynarmic::A64::InstructionCacheOperation::InvalidateAllToPoUInnerSharable:
default:
LOG_DEBUG(Core_ARM, "Unprocesseed instruction cache operation: {}", op);
break;
}
+
+ parent.jit.load()->HaltExecution(Dynarmic::HaltReason::CacheInvalidation);
}
void ExceptionRaised(u64 pc, Dynarmic::A64::Exception exception) override {
@@ -116,21 +122,19 @@ public:
return;
case Dynarmic::A64::Exception::Breakpoint:
default:
+ parent.LogBacktrace();
ASSERT_MSG(false, "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X})",
static_cast<std::size_t>(exception), pc, MemoryReadCode(pc));
}
}
void CallSVC(u32 swi) override {
- parent.svc_called = true;
parent.svc_swi = swi;
- parent.jit->HaltExecution();
+ parent.jit.load()->HaltExecution(svc_call);
}
void AddTicks(u64 ticks) override {
- if (parent.uses_wall_clock) {
- return;
- }
+ ASSERT_MSG(!parent.uses_wall_clock, "This should never happen - dynarmic ticking disabled");
// Divide the number of ticks by the amount of CPU cores. TODO(Subv): This yields only a
// rough approximation of the amount of executed ticks in the system, it may be thrown off
@@ -145,12 +149,8 @@ public:
}
u64 GetTicksRemaining() override {
- if (parent.uses_wall_clock) {
- if (!parent.interrupt_handlers[parent.core_index].IsInterrupted()) {
- return minimum_run_cycles;
- }
- return 0U;
- }
+ ASSERT_MSG(!parent.uses_wall_clock, "This should never happen - dynarmic ticking disabled");
+
return std::max<s64>(parent.system.CoreTiming().GetDowncount(), 0);
}
@@ -206,11 +206,19 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable*
// Timing
config.wall_clock_cntpct = uses_wall_clock;
+ config.enable_cycle_counting = !uses_wall_clock;
// Code cache size
config.code_cache_size = 512_MiB;
config.far_code_offset = 400_MiB;
+ // null_jit
+ if (!page_table) {
+ // Don't waste too much memory on null_jit
+ config.code_cache_size = 8_MiB;
+ config.far_code_offset = 4_MiB;
+ }
+
// Safe optimizations
if (Settings::values.cpu_debug_mode) {
if (!Settings::values.cpuopt_page_tables) {
@@ -246,35 +254,41 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable*
if (!Settings::values.cpuopt_recompile_exclusives) {
config.recompile_on_exclusive_fastmem_failure = false;
}
- }
+ } else {
+ // Unsafe optimizations
+ if (Settings::values.cpu_accuracy.GetValue() == Settings::CPUAccuracy::Unsafe) {
+ config.unsafe_optimizations = true;
+ if (Settings::values.cpuopt_unsafe_unfuse_fma) {
+ config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_UnfuseFMA;
+ }
+ if (Settings::values.cpuopt_unsafe_reduce_fp_error) {
+ config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_ReducedErrorFP;
+ }
+ if (Settings::values.cpuopt_unsafe_inaccurate_nan) {
+ config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_InaccurateNaN;
+ }
+ if (Settings::values.cpuopt_unsafe_fastmem_check) {
+ config.fastmem_address_space_bits = 64;
+ }
+ if (Settings::values.cpuopt_unsafe_ignore_global_monitor) {
+ config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_IgnoreGlobalMonitor;
+ }
+ }
- // Unsafe optimizations
- if (Settings::values.cpu_accuracy.GetValue() == Settings::CPUAccuracy::Unsafe) {
- config.unsafe_optimizations = true;
- if (Settings::values.cpuopt_unsafe_unfuse_fma) {
+ // Curated optimizations
+ if (Settings::values.cpu_accuracy.GetValue() == Settings::CPUAccuracy::Auto) {
+ config.unsafe_optimizations = true;
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_UnfuseFMA;
- }
- if (Settings::values.cpuopt_unsafe_reduce_fp_error) {
- config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_ReducedErrorFP;
- }
- if (Settings::values.cpuopt_unsafe_inaccurate_nan) {
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_InaccurateNaN;
- }
- if (Settings::values.cpuopt_unsafe_fastmem_check) {
config.fastmem_address_space_bits = 64;
- }
- if (Settings::values.cpuopt_unsafe_ignore_global_monitor) {
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_IgnoreGlobalMonitor;
}
- }
- // Curated optimizations
- if (Settings::values.cpu_accuracy.GetValue() == Settings::CPUAccuracy::Auto) {
- config.unsafe_optimizations = true;
- config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_UnfuseFMA;
- config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_InaccurateNaN;
- config.fastmem_address_space_bits = 64;
- config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_IgnoreGlobalMonitor;
+ // Paranoia mode for debugging optimizations
+ if (Settings::values.cpu_accuracy.GetValue() == Settings::CPUAccuracy::Paranoid) {
+ config.unsafe_optimizations = false;
+ config.optimizations = Dynarmic::no_optimizations;
+ }
}
return std::make_shared<Dynarmic::A64::Jit>(config);
@@ -282,20 +296,18 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable*
void ARM_Dynarmic_64::Run() {
while (true) {
- jit->Run();
- if (!svc_called) {
- break;
+ const auto hr = jit.load()->Run();
+ if (Has(hr, svc_call)) {
+ Kernel::Svc::Call(system, svc_swi);
}
- svc_called = false;
- Kernel::Svc::Call(system, svc_swi);
- if (shutdown) {
+ if (Has(hr, break_loop) || !uses_wall_clock) {
break;
}
}
}
void ARM_Dynarmic_64::Step() {
- jit->Step();
+ jit.load()->Step();
}
ARM_Dynarmic_64::ARM_Dynarmic_64(System& system_, CPUInterrupts& interrupt_handlers_,
@@ -304,40 +316,44 @@ ARM_Dynarmic_64::ARM_Dynarmic_64(System& system_, CPUInterrupts& interrupt_handl
: ARM_Interface{system_, interrupt_handlers_, uses_wall_clock_},
cb(std::make_unique<DynarmicCallbacks64>(*this)), core_index{core_index_},
exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor_)},
- jit(MakeJit(nullptr, 48)) {}
+ null_jit{MakeJit(nullptr, 48)}, jit{null_jit.get()} {}
ARM_Dynarmic_64::~ARM_Dynarmic_64() = default;
void ARM_Dynarmic_64::SetPC(u64 pc) {
- jit->SetPC(pc);
+ jit.load()->SetPC(pc);
}
u64 ARM_Dynarmic_64::GetPC() const {
- return jit->GetPC();
+ return jit.load()->GetPC();
+}
+
+u64 ARM_Dynarmic_64::GetSP() const {
+ return jit.load()->GetSP();
}
u64 ARM_Dynarmic_64::GetReg(int index) const {
- return jit->GetRegister(index);
+ return jit.load()->GetRegister(index);
}
void ARM_Dynarmic_64::SetReg(int index, u64 value) {
- jit->SetRegister(index, value);
+ jit.load()->SetRegister(index, value);
}
u128 ARM_Dynarmic_64::GetVectorReg(int index) const {
- return jit->GetVector(index);
+ return jit.load()->GetVector(index);
}
void ARM_Dynarmic_64::SetVectorReg(int index, u128 value) {
- jit->SetVector(index, value);
+ jit.load()->SetVector(index, value);
}
u32 ARM_Dynarmic_64::GetPSTATE() const {
- return jit->GetPstate();
+ return jit.load()->GetPstate();
}
void ARM_Dynarmic_64::SetPSTATE(u32 pstate) {
- jit->SetPstate(pstate);
+ jit.load()->SetPstate(pstate);
}
u64 ARM_Dynarmic_64::GetTlsAddress() const {
@@ -357,42 +373,47 @@ void ARM_Dynarmic_64::SetTPIDR_EL0(u64 value) {
}
void ARM_Dynarmic_64::SaveContext(ThreadContext64& ctx) {
- ctx.cpu_registers = jit->GetRegisters();
- ctx.sp = jit->GetSP();
- ctx.pc = jit->GetPC();
- ctx.pstate = jit->GetPstate();
- ctx.vector_registers = jit->GetVectors();
- ctx.fpcr = jit->GetFpcr();
- ctx.fpsr = jit->GetFpsr();
+ Dynarmic::A64::Jit* j = jit.load();
+ ctx.cpu_registers = j->GetRegisters();
+ ctx.sp = j->GetSP();
+ ctx.pc = j->GetPC();
+ ctx.pstate = j->GetPstate();
+ ctx.vector_registers = j->GetVectors();
+ ctx.fpcr = j->GetFpcr();
+ ctx.fpsr = j->GetFpsr();
ctx.tpidr = cb->tpidr_el0;
}
void ARM_Dynarmic_64::LoadContext(const ThreadContext64& ctx) {
- jit->SetRegisters(ctx.cpu_registers);
- jit->SetSP(ctx.sp);
- jit->SetPC(ctx.pc);
- jit->SetPstate(ctx.pstate);
- jit->SetVectors(ctx.vector_registers);
- jit->SetFpcr(ctx.fpcr);
- jit->SetFpsr(ctx.fpsr);
+ Dynarmic::A64::Jit* j = jit.load();
+ j->SetRegisters(ctx.cpu_registers);
+ j->SetSP(ctx.sp);
+ j->SetPC(ctx.pc);
+ j->SetPstate(ctx.pstate);
+ j->SetVectors(ctx.vector_registers);
+ j->SetFpcr(ctx.fpcr);
+ j->SetFpsr(ctx.fpsr);
SetTPIDR_EL0(ctx.tpidr);
}
void ARM_Dynarmic_64::PrepareReschedule() {
- jit->HaltExecution();
- shutdown = true;
+ jit.load()->HaltExecution(break_loop);
+}
+
+void ARM_Dynarmic_64::SignalInterrupt() {
+ jit.load()->HaltExecution(break_loop);
}
void ARM_Dynarmic_64::ClearInstructionCache() {
- jit->ClearCache();
+ jit.load()->ClearCache();
}
void ARM_Dynarmic_64::InvalidateCacheRange(VAddr addr, std::size_t size) {
- jit->InvalidateCacheRange(addr, size);
+ jit.load()->InvalidateCacheRange(addr, size);
}
void ARM_Dynarmic_64::ClearExclusiveState() {
- jit->ClearExclusiveState();
+ jit.load()->ClearExclusiveState();
}
void ARM_Dynarmic_64::PageTableChanged(Common::PageTable& page_table,
@@ -403,13 +424,48 @@ void ARM_Dynarmic_64::PageTableChanged(Common::PageTable& page_table,
auto key = std::make_pair(&page_table, new_address_space_size_in_bits);
auto iter = jit_cache.find(key);
if (iter != jit_cache.end()) {
- jit = iter->second;
+ jit.store(iter->second.get());
LoadContext(ctx);
return;
}
- jit = MakeJit(&page_table, new_address_space_size_in_bits);
+ std::shared_ptr new_jit = MakeJit(&page_table, new_address_space_size_in_bits);
+ jit.store(new_jit.get());
LoadContext(ctx);
- jit_cache.emplace(key, jit);
+ jit_cache.emplace(key, std::move(new_jit));
+}
+
+std::vector<ARM_Interface::BacktraceEntry> ARM_Dynarmic_64::GetBacktrace(Core::System& system,
+ u64 fp, u64 lr) {
+ std::vector<BacktraceEntry> out;
+ auto& memory = system.Memory();
+
+ // fp (= r29) points to the last frame record.
+ // Note that this is the frame record for the *previous* frame, not the current one.
+ // Note we need to subtract 4 from our last read to get the proper address
+ // Frame records are two words long:
+ // fp+0 : pointer to previous frame record
+ // fp+8 : value of lr for frame
+ while (true) {
+ out.push_back({"", 0, lr, 0, ""});
+ if (!fp) {
+ break;
+ }
+ lr = memory.Read64(fp + 8) - 4;
+ fp = memory.Read64(fp);
+ }
+
+ SymbolicateBacktrace(system, out);
+
+ return out;
+}
+
+std::vector<ARM_Interface::BacktraceEntry> ARM_Dynarmic_64::GetBacktraceFromContext(
+ System& system, const ThreadContext64& ctx) {
+ return GetBacktrace(system, ctx.cpu_registers[29], ctx.cpu_registers[30]);
+}
+
+std::vector<ARM_Interface::BacktraceEntry> ARM_Dynarmic_64::GetBacktrace() const {
+ return GetBacktrace(system, GetReg(29), GetReg(30));
}
} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.h b/src/core/arm/dynarmic/arm_dynarmic_64.h
index 0c4e46c64..78773e293 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.h
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.h
@@ -4,6 +4,7 @@
#pragma once
+#include <atomic>
#include <memory>
#include <unordered_map>
@@ -32,6 +33,7 @@ public:
void SetPC(u64 pc) override;
u64 GetPC() const override;
+ u64 GetSP() const override;
u64 GetReg(int index) const override;
void SetReg(int index, u64 value) override;
u128 GetVectorReg(int index) const override;
@@ -51,6 +53,7 @@ public:
void LoadContext(const ThreadContext64& ctx) override;
void PrepareReschedule() override;
+ void SignalInterrupt() override;
void ClearExclusiveState() override;
void ClearInstructionCache() override;
@@ -58,10 +61,17 @@ public:
void PageTableChanged(Common::PageTable& new_page_table,
std::size_t new_address_space_size_in_bits) override;
+ static std::vector<BacktraceEntry> GetBacktraceFromContext(System& system,
+ const ThreadContext64& ctx);
+
+ std::vector<BacktraceEntry> GetBacktrace() const override;
+
private:
std::shared_ptr<Dynarmic::A64::Jit> MakeJit(Common::PageTable* page_table,
std::size_t address_space_bits) const;
+ static std::vector<BacktraceEntry> GetBacktrace(Core::System& system, u64 fp, u64 lr);
+
using JitCacheKey = std::pair<Common::PageTable*, std::size_t>;
using JitCacheType =
std::unordered_map<JitCacheKey, std::shared_ptr<Dynarmic::A64::Jit>, Common::PairHash>;
@@ -73,13 +83,13 @@ private:
std::size_t core_index;
DynarmicExclusiveMonitor& exclusive_monitor;
- std::shared_ptr<Dynarmic::A64::Jit> jit;
+ std::shared_ptr<Dynarmic::A64::Jit> null_jit;
+
+ // A raw pointer here is fine; we never delete Jit instances.
+ std::atomic<Dynarmic::A64::Jit*> jit;
// SVC callback
u32 svc_swi{};
- bool svc_called{};
-
- bool shutdown{};
};
} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_exclusive_monitor.h b/src/core/arm/dynarmic/arm_exclusive_monitor.h
index 5a15b43ef..b82c77f76 100644
--- a/src/core/arm/dynarmic/arm_exclusive_monitor.h
+++ b/src/core/arm/dynarmic/arm_exclusive_monitor.h
@@ -4,8 +4,6 @@
#pragma once
-#include <unordered_map>
-
#include <dynarmic/interface/exclusive_monitor.h>
#include "common/common_types.h"