summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp6
-rw-r--r--src/core/hle/kernel/global_scheduler_context.h3
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp16
-rw-r--r--src/core/hle/kernel/hle_ipc.h8
-rw-r--r--src/core/hle/kernel/k_auto_object.h7
-rw-r--r--src/core/hle/kernel/k_code_memory.cpp14
-rw-r--r--src/core/hle/kernel/k_page_linked_list.h4
-rw-r--r--src/core/hle/kernel/k_page_table.cpp253
-rw-r--r--src/core/hle/kernel/k_page_table.h38
-rw-r--r--src/core/hle/kernel/k_process.h2
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp8
-rw-r--r--src/core/hle/kernel/k_scheduler_lock.h3
-rw-r--r--src/core/hle/kernel/k_server_port.cpp6
-rw-r--r--src/core/hle/kernel/k_server_session.cpp3
-rw-r--r--src/core/hle/kernel/k_spin_lock.cpp39
-rw-r--r--src/core/hle/kernel/k_spin_lock.h4
-rw-r--r--src/core/hle/kernel/k_thread.cpp39
-rw-r--r--src/core/hle/kernel/k_thread.h16
-rw-r--r--src/core/hle/kernel/kernel.cpp93
-rw-r--r--src/core/hle/kernel/kernel.h30
-rw-r--r--src/core/hle/kernel/physical_core.cpp4
-rw-r--r--src/core/hle/kernel/physical_core.h7
-rw-r--r--src/core/hle/kernel/svc.cpp16
-rw-r--r--src/core/hle/kernel/time_manager.cpp4
24 files changed, 439 insertions, 184 deletions
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
index 8027bec00..7765e7848 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
+++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
@@ -148,9 +148,9 @@ u64 GenerateUniformRange(u64 min, u64 max, F f) {
} // Anonymous namespace
u64 KSystemControl::GenerateRandomU64() {
- static std::random_device device;
- static std::mt19937 gen(device());
- static std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max());
+ std::random_device device;
+ std::mt19937 gen(device());
+ std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max());
return distribution(gen);
}
diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h
index 6f44b534f..47425a3a1 100644
--- a/src/core/hle/kernel/global_scheduler_context.h
+++ b/src/core/hle/kernel/global_scheduler_context.h
@@ -8,7 +8,6 @@
#include <vector>
#include "common/common_types.h"
-#include "common/spin_lock.h"
#include "core/hardware_properties.h"
#include "core/hle/kernel/k_priority_queue.h"
#include "core/hle/kernel/k_scheduler_lock.h"
@@ -80,7 +79,7 @@ private:
/// Lists all thread ids that aren't deleted/etc.
std::vector<KThread*> thread_list;
- Common::SpinLock global_list_guard{};
+ std::mutex global_list_guard;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index 42d1b0e31..5828ac923 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -24,8 +24,15 @@
namespace Kernel {
-SessionRequestHandler::SessionRequestHandler(KernelCore& kernel_, const char* service_name_)
- : kernel{kernel_}, service_thread{kernel.CreateServiceThread(service_name_)} {}
+SessionRequestHandler::SessionRequestHandler(KernelCore& kernel_, const char* service_name_,
+ ServiceThreadType thread_type)
+ : kernel{kernel_} {
+ if (thread_type == ServiceThreadType::CreateNew) {
+ service_thread = kernel.CreateServiceThread(service_name_);
+ } else {
+ service_thread = kernel.GetDefaultServiceThread();
+ }
+}
SessionRequestHandler::~SessionRequestHandler() {
kernel.ReleaseServiceThread(service_thread);
@@ -44,7 +51,7 @@ bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& co
LOG_CRITICAL(IPC, "object_id {} is too big!", object_id);
return false;
}
- return DomainHandler(object_id - 1).lock() != nullptr;
+ return !DomainHandler(object_id - 1).expired();
} else {
return session_handler != nullptr;
}
@@ -52,6 +59,9 @@ bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& co
void SessionRequestHandler::ClientConnected(KServerSession* session) {
session->ClientConnected(shared_from_this());
+
+ // Ensure our server session is tracked globally.
+ kernel.RegisterServerObject(session);
}
void SessionRequestHandler::ClientDisconnected(KServerSession* session) {
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h
index 670cc741c..640146137 100644
--- a/src/core/hle/kernel/hle_ipc.h
+++ b/src/core/hle/kernel/hle_ipc.h
@@ -33,6 +33,11 @@ namespace Service {
class ServiceFrameworkBase;
}
+enum class ServiceThreadType {
+ Default,
+ CreateNew,
+};
+
namespace Kernel {
class Domain;
@@ -57,7 +62,8 @@ enum class ThreadWakeupReason;
*/
class SessionRequestHandler : public std::enable_shared_from_this<SessionRequestHandler> {
public:
- SessionRequestHandler(KernelCore& kernel, const char* service_name_);
+ SessionRequestHandler(KernelCore& kernel_, const char* service_name_,
+ ServiceThreadType thread_type);
virtual ~SessionRequestHandler();
/**
diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h
index 05779f2d5..423e8d8f5 100644
--- a/src/core/hle/kernel/k_auto_object.h
+++ b/src/core/hle/kernel/k_auto_object.h
@@ -89,9 +89,7 @@ public:
explicit KAutoObject(KernelCore& kernel_) : kernel(kernel_) {
RegisterWithKernel();
}
- virtual ~KAutoObject() {
- UnregisterWithKernel();
- }
+ virtual ~KAutoObject() = default;
static KAutoObject* Create(KAutoObject* ptr);
@@ -163,11 +161,12 @@ public:
do {
ASSERT(cur_ref_count > 0);
} while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count - 1,
- std::memory_order_relaxed));
+ std::memory_order_acq_rel));
// If ref count hits zero, destroy the object.
if (cur_ref_count - 1 == 0) {
this->Destroy();
+ this->UnregisterWithKernel();
}
}
diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp
index b365ce7b7..09eaf004c 100644
--- a/src/core/hle/kernel/k_code_memory.cpp
+++ b/src/core/hle/kernel/k_code_memory.cpp
@@ -28,15 +28,21 @@ ResultCode KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr
auto& page_table = m_owner->PageTable();
// Construct the page group.
- m_page_group = KPageLinkedList(addr, Common::DivideUp(size, PageSize));
+ m_page_group =
+ KPageLinkedList(page_table.GetPhysicalAddr(addr), Common::DivideUp(size, PageSize));
// Lock the memory.
R_TRY(page_table.LockForCodeMemory(addr, size))
// Clear the memory.
- for (const auto& block : m_page_group.Nodes()) {
- std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize());
- }
+ //
+ // FIXME: this ends up clobbering address ranges outside the scope of the mapping within
+ // guest memory, and is not specifically required if the guest program is correctly
+ // written, so disable until this is further investigated.
+ //
+ // for (const auto& block : m_page_group.Nodes()) {
+ // std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize());
+ // }
// Set remaining tracking members.
m_address = addr;
diff --git a/src/core/hle/kernel/k_page_linked_list.h b/src/core/hle/kernel/k_page_linked_list.h
index 0e2ae582a..869228322 100644
--- a/src/core/hle/kernel/k_page_linked_list.h
+++ b/src/core/hle/kernel/k_page_linked_list.h
@@ -89,6 +89,10 @@ public:
return ResultSuccess;
}
+ bool Empty() const {
+ return nodes.empty();
+ }
+
private:
std::list<Node> nodes;
};
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 02d93b12e..47ea3c89c 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -346,7 +346,8 @@ ResultCode KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::
return ResultSuccess;
}
-ResultCode KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) {
+ResultCode KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
+ ICacheInvalidationStrategy icache_invalidation_strategy) {
// Validate the mapping request.
R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
ResultInvalidMemoryRegion);
@@ -396,7 +397,11 @@ ResultCode KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std
bool reprotected_pages = false;
SCOPE_EXIT({
if (reprotected_pages && any_code_pages) {
- system.InvalidateCpuInstructionCacheRange(dst_address, size);
+ if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) {
+ system.InvalidateCpuInstructionCacheRange(dst_address, size);
+ } else {
+ system.InvalidateCpuInstructionCaches();
+ }
}
});
@@ -486,6 +491,58 @@ VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
return address;
}
+ResultCode KPageTable::MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ const size_t size = num_pages * PageSize;
+
+ // We're making a new group, not adding to an existing one.
+ R_UNLESS(pg.Empty(), ResultInvalidCurrentMemory);
+
+ // Begin traversal.
+ Common::PageTable::TraversalContext context;
+ Common::PageTable::TraversalEntry next_entry;
+ R_UNLESS(page_table_impl.BeginTraversal(next_entry, context, addr), ResultInvalidCurrentMemory);
+
+ // Prepare tracking variables.
+ PAddr cur_addr = next_entry.phys_addr;
+ size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ // Iterate, adding to group as we go.
+ const auto& memory_layout = system.Kernel().MemoryLayout();
+ while (tot_size < size) {
+ R_UNLESS(page_table_impl.ContinueTraversal(next_entry, context),
+ ResultInvalidCurrentMemory);
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ const size_t cur_pages = cur_size / PageSize;
+
+ R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
+ R_TRY(pg.AddBlock(cur_addr, cur_pages));
+
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we add the right amount for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ // Add the last block.
+ const size_t cur_pages = cur_size / PageSize;
+ R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
+ R_TRY(pg.AddBlock(cur_addr, cur_pages));
+
+ return ResultSuccess;
+}
+
ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
KPageTable& src_page_table, VAddr src_addr) {
KScopedLightLock lk(general_lock);
@@ -511,6 +568,8 @@ ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
block_manager->Update(dst_addr, num_pages, KMemoryState::Free, KMemoryPermission::None,
KMemoryAttribute::None);
+ system.InvalidateCpuInstructionCaches();
+
return ResultSuccess;
}
@@ -1223,6 +1282,31 @@ ResultCode KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryS
return ResultSuccess;
}
+ResultCode KPageTable::MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address, size_t num_pages,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr) {
+ // Ensure that the page group isn't null.
+ ASSERT(out != nullptr);
+
+ // Make sure that the region we're mapping is valid for the table.
+ const size_t size = num_pages * PageSize;
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(general_lock);
+
+ // Check if state allows us to create the group.
+ R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted,
+ state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
+ attr_mask, attr));
+
+ // Create a new page group for the region.
+ R_TRY(this->MakePageGroup(*out, address, num_pages));
+
+ return ResultSuccess;
+}
+
ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
Svc::MemoryPermission svc_perm) {
const size_t num_pages = size / PageSize;
@@ -1605,57 +1689,21 @@ ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size)
}
ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) {
- KScopedLightLock lk(general_lock);
-
- KMemoryPermission new_perm = KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite;
-
- KMemoryPermission old_perm{};
-
- if (const ResultCode result{CheckMemoryState(
- nullptr, &old_perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
- KMemoryState::FlagCanCodeMemory, KMemoryPermission::All,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None)};
- result.IsError()) {
- return result;
- }
-
- new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
-
- block_manager->UpdateLock(
- addr, size / PageSize,
- [](KMemoryBlockManager::iterator block, KMemoryPermission permission) {
- block->ShareToDevice(permission);
- },
- new_perm);
-
- return ResultSuccess;
+ return this->LockMemoryAndOpen(
+ nullptr, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
+ KMemoryState::FlagCanCodeMemory, KMemoryPermission::All, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::All, KMemoryAttribute::None,
+ static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
+ KMemoryPermission::KernelReadWrite),
+ KMemoryAttribute::Locked);
}
ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size) {
- KScopedLightLock lk(general_lock);
-
- KMemoryPermission new_perm = KMemoryPermission::UserReadWrite;
-
- KMemoryPermission old_perm{};
-
- if (const ResultCode result{CheckMemoryState(
- nullptr, &old_perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
- KMemoryState::FlagCanCodeMemory, KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::All, KMemoryAttribute::Locked)};
- result.IsError()) {
- return result;
- }
-
- new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
-
- block_manager->UpdateLock(
- addr, size / PageSize,
- [](KMemoryBlockManager::iterator block, KMemoryPermission permission) {
- block->UnshareToDevice(permission);
- },
- new_perm);
-
- return ResultSuccess;
+ return this->UnlockMemory(addr, size, KMemoryState::FlagCanCodeMemory,
+ KMemoryState::FlagCanCodeMemory, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::Locked, nullptr);
}
ResultCode KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
@@ -1991,4 +2039,109 @@ ResultCode KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermissi
return ResultSuccess;
}
+ResultCode KPageTable::LockMemoryAndOpen(KPageLinkedList* out_pg, PAddr* out_paddr, VAddr addr,
+ size_t size, KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryPermission new_perm, KMemoryAttribute lock_attr) {
+ // Validate basic preconditions.
+ ASSERT((lock_attr & attr) == KMemoryAttribute::None);
+ ASSERT((lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
+ KMemoryAttribute::None);
+
+ // Validate the lock request.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(general_lock);
+
+ // Check that the output page group is empty, if it exists.
+ if (out_pg) {
+ ASSERT(out_pg->GetNumPages() == 0);
+ }
+
+ // Check the state.
+ KMemoryState old_state{};
+ KMemoryPermission old_perm{};
+ KMemoryAttribute old_attr{};
+ size_t num_allocator_blocks{};
+ R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
+ std::addressof(old_attr), std::addressof(num_allocator_blocks),
+ addr, size, state_mask | KMemoryState::FlagReferenceCounted,
+ state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
+ attr_mask, attr));
+
+ // Get the physical address, if we're supposed to.
+ if (out_paddr != nullptr) {
+ ASSERT(this->GetPhysicalAddressLocked(out_paddr, addr));
+ }
+
+ // Make the page group, if we're supposed to.
+ if (out_pg != nullptr) {
+ R_TRY(this->MakePageGroup(*out_pg, addr, num_pages));
+ }
+
+ // Decide on new perm and attr.
+ new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
+ KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr | lock_attr);
+
+ // Update permission, if we need to.
+ if (new_perm != old_perm) {
+ R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
+ }
+
+ // Apply the memory block updates.
+ block_manager->Update(addr, num_pages, old_state, new_perm, new_attr);
+
+ return ResultSuccess;
+}
+
+ResultCode KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr, KMemoryPermission new_perm,
+ KMemoryAttribute lock_attr, const KPageLinkedList* pg) {
+ // Validate basic preconditions.
+ ASSERT((attr_mask & lock_attr) == lock_attr);
+ ASSERT((attr & lock_attr) == lock_attr);
+
+ // Validate the unlock request.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(general_lock);
+
+ // Check the state.
+ KMemoryState old_state{};
+ KMemoryPermission old_perm{};
+ KMemoryAttribute old_attr{};
+ size_t num_allocator_blocks{};
+ R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
+ std::addressof(old_attr), std::addressof(num_allocator_blocks),
+ addr, size, state_mask | KMemoryState::FlagReferenceCounted,
+ state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
+ attr_mask, attr));
+
+ // Check the page group.
+ if (pg != nullptr) {
+ UNIMPLEMENTED_MSG("PageGroup support is unimplemented!");
+ }
+
+ // Decide on new perm and attr.
+ new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
+ KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr & ~lock_attr);
+
+ // Update permission, if we need to.
+ if (new_perm != old_perm) {
+ R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
+ }
+
+ // Apply the memory block updates.
+ block_manager->Update(addr, num_pages, old_state, new_perm, new_attr);
+
+ return ResultSuccess;
+}
+
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 54c6adf8d..dd6022975 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -12,6 +12,7 @@
#include "core/file_sys/program_metadata.h"
#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_memory_block.h"
+#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/result.h"
@@ -25,6 +26,8 @@ class KMemoryBlockManager;
class KPageTable final {
public:
+ enum class ICacheInvalidationStrategy : u32 { InvalidateRange, InvalidateAll };
+
YUZU_NON_COPYABLE(KPageTable);
YUZU_NON_MOVEABLE(KPageTable);
@@ -37,7 +40,8 @@ public:
ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state,
KMemoryPermission perm);
ResultCode MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
- ResultCode UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
+ ResultCode UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
+ ICacheInvalidationStrategy icache_invalidation_strategy);
ResultCode UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
VAddr src_addr);
ResultCode MapPhysicalMemory(VAddr addr, std::size_t size);
@@ -71,6 +75,10 @@ public:
ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
ResultCode LockForCodeMemory(VAddr addr, std::size_t size);
ResultCode UnlockForCodeMemory(VAddr addr, std::size_t size);
+ ResultCode MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address, size_t num_pages,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr);
Common::PageTable& PageTableImpl() {
return page_table_impl;
@@ -159,10 +167,37 @@ private:
attr_mask, attr, ignore_attr);
}
+ ResultCode LockMemoryAndOpen(KPageLinkedList* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryPermission new_perm, KMemoryAttribute lock_attr);
+ ResultCode UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryPermission new_perm, KMemoryAttribute lock_attr,
+ const KPageLinkedList* pg);
+
+ ResultCode MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages);
+
bool IsLockedByCurrentThread() const {
return general_lock.IsLockedByCurrentThread();
}
+ bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ return layout.IsHeapPhysicalAddress(cached_physical_heap_region, phys_addr);
+ }
+
+ bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ *out = GetPhysicalAddr(virt_addr);
+
+ return *out != 0;
+ }
+
mutable KLightLock general_lock;
mutable KLightLock map_physical_memory_lock;
@@ -322,6 +357,7 @@ private:
bool is_aslr_enabled{};
u32 heap_fill_value{};
+ const KMemoryRegion* cached_physical_heap_region{};
KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application};
KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront};
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index 48b17fc74..9f171e3da 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -422,7 +422,7 @@ private:
bool is_64bit_process = true;
/// Total running time for the process in ticks.
- u64 total_process_running_time_ticks = 0;
+ std::atomic<u64> total_process_running_time_ticks = 0;
/// Per-process handle table for storing created object handles in.
KHandleTable handle_table;
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index 6c0bb1672..526eb4b70 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -705,7 +705,7 @@ void KScheduler::Unload(KThread* thread) {
prev_thread = nullptr;
}
- thread->context_guard.Unlock();
+ thread->context_guard.unlock();
}
void KScheduler::Reload(KThread* thread) {
@@ -794,13 +794,13 @@ void KScheduler::SwitchToCurrent() {
do {
auto next_thread = current_thread.load();
if (next_thread != nullptr) {
- const auto locked = next_thread->context_guard.TryLock();
+ const auto locked = next_thread->context_guard.try_lock();
if (state.needs_scheduling.load()) {
- next_thread->context_guard.Unlock();
+ next_thread->context_guard.unlock();
break;
}
if (next_thread->GetActiveCore() != core_id) {
- next_thread->context_guard.Unlock();
+ next_thread->context_guard.unlock();
break;
}
if (!locked) {
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h
index 93c47f1b1..016e0a818 100644
--- a/src/core/hle/kernel/k_scheduler_lock.h
+++ b/src/core/hle/kernel/k_scheduler_lock.h
@@ -4,6 +4,7 @@
#pragma once
+#include <atomic>
#include "common/assert.h"
#include "core/hle/kernel/k_spin_lock.h"
#include "core/hle/kernel/k_thread.h"
@@ -75,7 +76,7 @@ private:
KernelCore& kernel;
KAlignedSpinLock spin_lock{};
s32 lock_count{};
- KThread* owner_thread{};
+ std::atomic<KThread*> owner_thread{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_server_port.cpp b/src/core/hle/kernel/k_server_port.cpp
index 433fc98e1..e66c0c992 100644
--- a/src/core/hle/kernel/k_server_port.cpp
+++ b/src/core/hle/kernel/k_server_port.cpp
@@ -62,6 +62,12 @@ void KServerPort::Destroy() {
// Close our reference to our parent.
parent->Close();
+
+ // Release host emulation members.
+ session_handler.reset();
+
+ // Ensure that the global list tracking server objects does not hold on to a reference.
+ kernel.UnregisterServerObject(this);
}
bool KServerPort::IsSignaled() const {
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp
index 30c56ff29..7ac2ef254 100644
--- a/src/core/hle/kernel/k_server_session.cpp
+++ b/src/core/hle/kernel/k_server_session.cpp
@@ -49,6 +49,9 @@ void KServerSession::Destroy() {
// Release host emulation members.
manager.reset();
+
+ // Ensure that the global list tracking server objects does not hold on to a reference.
+ kernel.UnregisterServerObject(this);
}
void KServerSession::OnClientClosed() {
diff --git a/src/core/hle/kernel/k_spin_lock.cpp b/src/core/hle/kernel/k_spin_lock.cpp
index 4412aa4bb..527ff0f9f 100644
--- a/src/core/hle/kernel/k_spin_lock.cpp
+++ b/src/core/hle/kernel/k_spin_lock.cpp
@@ -4,51 +4,18 @@
#include "core/hle/kernel/k_spin_lock.h"
-#if _MSC_VER
-#include <intrin.h>
-#if _M_AMD64
-#define __x86_64__ 1
-#endif
-#if _M_ARM64
-#define __aarch64__ 1
-#endif
-#else
-#if __x86_64__
-#include <xmmintrin.h>
-#endif
-#endif
-
-namespace {
-
-void ThreadPause() {
-#if __x86_64__
- _mm_pause();
-#elif __aarch64__ && _MSC_VER
- __yield();
-#elif __aarch64__
- asm("yield");
-#endif
-}
-
-} // namespace
-
namespace Kernel {
void KSpinLock::Lock() {
- while (lck.test_and_set(std::memory_order_acquire)) {
- ThreadPause();
- }
+ lck.lock();
}
void KSpinLock::Unlock() {
- lck.clear(std::memory_order_release);
+ lck.unlock();
}
bool KSpinLock::TryLock() {
- if (lck.test_and_set(std::memory_order_acquire)) {
- return false;
- }
- return true;
+ return lck.try_lock();
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_spin_lock.h b/src/core/hle/kernel/k_spin_lock.h
index 4d87d006a..7868b25a5 100644
--- a/src/core/hle/kernel/k_spin_lock.h
+++ b/src/core/hle/kernel/k_spin_lock.h
@@ -4,7 +4,7 @@
#pragma once
-#include <atomic>
+#include <mutex>
#include "core/hle/kernel/k_scoped_lock.h"
@@ -25,7 +25,7 @@ public:
[[nodiscard]] bool TryLock();
private:
- std::atomic_flag lck = ATOMIC_FLAG_INIT;
+ std::mutex lck;
};
// TODO(bunnei): Alias for now, in case we want to implement these accurately in the future.
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index 94c8faf68..af71987e8 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -723,10 +723,10 @@ void KThread::UpdateState() {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Set our suspend flags in state.
- const auto old_state = thread_state;
+ const ThreadState old_state = thread_state.load(std::memory_order_relaxed);
const auto new_state =
static_cast<ThreadState>(this->GetSuspendFlags()) | (old_state & ThreadState::Mask);
- thread_state = new_state;
+ thread_state.store(new_state, std::memory_order_relaxed);
// Note the state change in scheduler.
if (new_state != old_state) {
@@ -738,8 +738,8 @@ void KThread::Continue() {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Clear our suspend flags in state.
- const auto old_state = thread_state;
- thread_state = old_state & ThreadState::Mask;
+ const ThreadState old_state = thread_state.load(std::memory_order_relaxed);
+ thread_state.store(old_state & ThreadState::Mask, std::memory_order_relaxed);
// Note the state change in scheduler.
KScheduler::OnThreadStateChanged(kernel, this, old_state);
@@ -1079,17 +1079,10 @@ void KThread::IfDummyThreadTryWait() {
return;
}
- // Block until we can grab the lock.
- KScopedSpinLock lk{dummy_wait_lock};
-}
-
-void KThread::IfDummyThreadBeginWait() {
- if (!IsDummyThread()) {
- return;
- }
-
- // Ensure the thread will block when IfDummyThreadTryWait is called.
- dummy_wait_lock.Lock();
+ // Block until we are no longer waiting.
+ std::unique_lock lk(dummy_wait_lock);
+ dummy_wait_cv.wait(
+ lk, [&] { return GetState() != ThreadState::Waiting || kernel.IsShuttingDown(); });
}
void KThread::IfDummyThreadEndWait() {
@@ -1097,8 +1090,8 @@ void KThread::IfDummyThreadEndWait() {
return;
}
- // Ensure the thread will no longer block.
- dummy_wait_lock.Unlock();
+ // Wake up the waiting thread.
+ dummy_wait_cv.notify_one();
}
void KThread::BeginWait(KThreadQueue* queue) {
@@ -1107,9 +1100,6 @@ void KThread::BeginWait(KThreadQueue* queue) {
// Set our wait queue.
wait_queue = queue;
-
- // Special case for dummy threads to ensure they block.
- IfDummyThreadBeginWait();
}
void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_) {
@@ -1158,10 +1148,11 @@ void KThread::SetState(ThreadState state) {
SetMutexWaitAddressForDebugging({});
SetWaitReasonForDebugging({});
- const ThreadState old_state = thread_state;
- thread_state =
- static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask));
- if (thread_state != old_state) {
+ const ThreadState old_state = thread_state.load(std::memory_order_relaxed);
+ thread_state.store(
+ static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask)),
+ std::memory_order_relaxed);
+ if (thread_state.load(std::memory_order_relaxed) != old_state) {
KScheduler::OnThreadStateChanged(kernel, this, old_state);
}
}
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index f46db7298..4892fdf76 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -5,6 +5,9 @@
#pragma once
#include <array>
+#include <atomic>
+#include <condition_variable>
+#include <mutex>
#include <span>
#include <string>
#include <utility>
@@ -14,6 +17,7 @@
#include "common/common_types.h"
#include "common/intrusive_red_black_tree.h"
+#include "common/spin_lock.h"
#include "core/arm/arm_interface.h"
#include "core/hle/kernel/k_affinity_mask.h"
#include "core/hle/kernel/k_light_lock.h"
@@ -255,11 +259,11 @@ public:
[[nodiscard]] std::shared_ptr<Common::Fiber>& GetHostContext();
[[nodiscard]] ThreadState GetState() const {
- return thread_state & ThreadState::Mask;
+ return thread_state.load(std::memory_order_relaxed) & ThreadState::Mask;
}
[[nodiscard]] ThreadState GetRawState() const {
- return thread_state;
+ return thread_state.load(std::memory_order_relaxed);
}
void SetState(ThreadState state);
@@ -641,7 +645,6 @@ public:
// blocking as needed.
void IfDummyThreadTryWait();
- void IfDummyThreadBeginWait();
void IfDummyThreadEndWait();
private:
@@ -751,7 +754,7 @@ private:
KAffinityMask original_physical_affinity_mask{};
s32 original_physical_ideal_core_id{};
s32 num_core_migration_disables{};
- ThreadState thread_state{};
+ std::atomic<ThreadState> thread_state{};
std::atomic<bool> termination_requested{};
bool wait_cancelled{};
bool cancellable{};
@@ -761,13 +764,14 @@ private:
s8 priority_inheritance_count{};
bool resource_limit_release_hint{};
StackParameters stack_parameters{};
- KSpinLock context_guard{};
- KSpinLock dummy_wait_lock{};
+ Common::SpinLock context_guard{};
// For emulation
std::shared_ptr<Common::Fiber> host_context{};
bool is_single_core{};
ThreadType thread_type{};
+ std::mutex dummy_wait_lock;
+ std::condition_variable dummy_wait_cv;
// For debugging
std::vector<KSynchronizationObject*> wait_objects_for_debugging;
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 34da7c23b..5984afd7e 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -61,6 +61,7 @@ struct KernelCore::Impl {
global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
global_handle_table = std::make_unique<Kernel::KHandleTable>(kernel);
global_handle_table->Initialize(KHandleTable::MaxTableSize);
+ default_service_thread = CreateServiceThread(kernel, "DefaultServiceThread");
is_phantom_mode_for_singlecore = false;
@@ -84,7 +85,7 @@ struct KernelCore::Impl {
void InitializeCores() {
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
- cores[core_id].Initialize(current_process->Is64BitProcess());
+ cores[core_id].Initialize((*current_process).Is64BitProcess());
system.Memory().SetCurrentPageTable(*current_process, core_id);
}
}
@@ -95,15 +96,15 @@ struct KernelCore::Impl {
process_list.clear();
- // Close all open server ports.
- std::unordered_set<KServerPort*> server_ports_;
+ // Close all open server sessions and ports.
+ std::unordered_set<KAutoObject*> server_objects_;
{
- std::lock_guard lk(server_ports_lock);
- server_ports_ = server_ports;
- server_ports.clear();
+ std::scoped_lock lk(server_objects_lock);
+ server_objects_ = server_objects;
+ server_objects.clear();
}
- for (auto* server_port : server_ports_) {
- server_port->Close();
+ for (auto* server_object : server_objects_) {
+ server_object->Close();
}
// Ensures all service threads gracefully shutdown.
@@ -139,6 +140,7 @@ struct KernelCore::Impl {
CleanupObject(font_shared_mem);
CleanupObject(irs_shared_mem);
CleanupObject(time_shared_mem);
+ CleanupObject(hidbus_shared_mem);
CleanupObject(system_resource_limit);
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
@@ -156,7 +158,7 @@ struct KernelCore::Impl {
// Close kernel objects that were not freed on shutdown
{
- std::lock_guard lk(registered_in_use_objects_lock);
+ std::scoped_lock lk{registered_in_use_objects_lock};
if (registered_in_use_objects.size()) {
for (auto& object : registered_in_use_objects) {
object->Close();
@@ -167,17 +169,17 @@ struct KernelCore::Impl {
// Shutdown all processes.
if (current_process) {
- current_process->Finalize();
+ (*current_process).Finalize();
// current_process->Close();
// TODO: The current process should be destroyed based on accurate ref counting after
// calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
- current_process->Destroy();
+ (*current_process).Destroy();
current_process = nullptr;
}
// Track kernel objects that were not freed on shutdown
{
- std::lock_guard lk(registered_objects_lock);
+ std::scoped_lock lk{registered_objects_lock};
if (registered_objects.size()) {
LOG_DEBUG(Kernel, "{} kernel objects were dangling on shutdown!",
registered_objects.size());
@@ -621,16 +623,20 @@ struct KernelCore::Impl {
constexpr std::size_t font_size{0x1100000};
constexpr std::size_t irs_size{0x8000};
constexpr std::size_t time_size{0x1000};
+ constexpr std::size_t hidbus_size{0x1000};
const PAddr hid_phys_addr{system_pool.GetAddress()};
const PAddr font_phys_addr{system_pool.GetAddress() + hid_size};
const PAddr irs_phys_addr{system_pool.GetAddress() + hid_size + font_size};
const PAddr time_phys_addr{system_pool.GetAddress() + hid_size + font_size + irs_size};
+ const PAddr hidbus_phys_addr{system_pool.GetAddress() + hid_size + font_size + irs_size +
+ time_size};
hid_shared_mem = KSharedMemory::Create(system.Kernel());
font_shared_mem = KSharedMemory::Create(system.Kernel());
irs_shared_mem = KSharedMemory::Create(system.Kernel());
time_shared_mem = KSharedMemory::Create(system.Kernel());
+ hidbus_shared_mem = KSharedMemory::Create(system.Kernel());
hid_shared_mem->Initialize(system.DeviceMemory(), nullptr,
{hid_phys_addr, hid_size / PageSize},
@@ -648,6 +654,10 @@ struct KernelCore::Impl {
{time_phys_addr, time_size / PageSize},
Svc::MemoryPermission::None, Svc::MemoryPermission::Read,
time_phys_addr, time_size, "Time:SharedMemory");
+ hidbus_shared_mem->Initialize(system.DeviceMemory(), nullptr,
+ {hidbus_phys_addr, hidbus_size / PageSize},
+ Svc::MemoryPermission::None, Svc::MemoryPermission::Read,
+ hidbus_phys_addr, hidbus_size, "HidBus:SharedMemory");
}
KClientPort* CreateNamedServicePort(std::string name) {
@@ -658,13 +668,20 @@ struct KernelCore::Impl {
}
KClientPort* port = &search->second(system.ServiceManager(), system);
- {
- std::lock_guard lk(server_ports_lock);
- server_ports.insert(&port->GetParent()->GetServerPort());
- }
+ RegisterServerObject(&port->GetParent()->GetServerPort());
return port;
}
+ void RegisterServerObject(KAutoObject* server_object) {
+ std::scoped_lock lk(server_objects_lock);
+ server_objects.insert(server_object);
+ }
+
+ void UnregisterServerObject(KAutoObject* server_object) {
+ std::scoped_lock lk(server_objects_lock);
+ server_objects.erase(server_object);
+ }
+
std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(KernelCore& kernel,
const std::string& name) {
auto service_thread = std::make_shared<Kernel::ServiceThread>(kernel, 1, name);
@@ -677,6 +694,12 @@ struct KernelCore::Impl {
void ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) {
if (auto strong_ptr = service_thread.lock()) {
+ if (strong_ptr == default_service_thread.lock()) {
+ // Nothing to do here, the service is using default_service_thread, which will be
+ // released on shutdown.
+ return;
+ }
+
service_threads_manager.QueueWork(
[this, strong_ptr{std::move(strong_ptr)}]() { service_threads.erase(strong_ptr); });
}
@@ -686,7 +709,7 @@ struct KernelCore::Impl {
service_threads_manager.QueueWork([this]() { service_threads.clear(); });
}
- std::mutex server_ports_lock;
+ std::mutex server_objects_lock;
std::mutex registered_objects_lock;
std::mutex registered_in_use_objects_lock;
@@ -697,7 +720,7 @@ struct KernelCore::Impl {
// Lists all processes that exist in the current session.
std::vector<KProcess*> process_list;
- KProcess* current_process{};
+ std::atomic<KProcess*> current_process{};
std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context;
Kernel::TimeManager time_manager;
@@ -716,7 +739,7 @@ struct KernelCore::Impl {
/// the ConnectToPort SVC.
std::unordered_map<std::string, ServiceInterfaceFactory> service_interface_factory;
NamedPortTable named_ports;
- std::unordered_set<KServerPort*> server_ports;
+ std::unordered_set<KAutoObject*> server_objects;
std::unordered_set<KAutoObject*> registered_objects;
std::unordered_set<KAutoObject*> registered_in_use_objects;
@@ -734,12 +757,14 @@ struct KernelCore::Impl {
Kernel::KSharedMemory* font_shared_mem{};
Kernel::KSharedMemory* irs_shared_mem{};
Kernel::KSharedMemory* time_shared_mem{};
+ Kernel::KSharedMemory* hidbus_shared_mem{};
// Memory layout
std::unique_ptr<KMemoryLayout> memory_layout;
// Threads used for services
- std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads;
+ std::unordered_set<std::shared_ptr<ServiceThread>> service_threads;
+ std::weak_ptr<ServiceThread> default_service_thread;
Common::ThreadWorker service_threads_manager;
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> suspend_threads;
@@ -920,23 +945,31 @@ KClientPort* KernelCore::CreateNamedServicePort(std::string name) {
return impl->CreateNamedServicePort(std::move(name));
}
+void KernelCore::RegisterServerObject(KAutoObject* server_object) {
+ impl->RegisterServerObject(server_object);
+}
+
+void KernelCore::UnregisterServerObject(KAutoObject* server_object) {
+ impl->UnregisterServerObject(server_object);
+}
+
void KernelCore::RegisterKernelObject(KAutoObject* object) {
- std::lock_guard lk(impl->registered_objects_lock);
+ std::scoped_lock lk{impl->registered_objects_lock};
impl->registered_objects.insert(object);
}
void KernelCore::UnregisterKernelObject(KAutoObject* object) {
- std::lock_guard lk(impl->registered_objects_lock);
+ std::scoped_lock lk{impl->registered_objects_lock};
impl->registered_objects.erase(object);
}
void KernelCore::RegisterInUseObject(KAutoObject* object) {
- std::lock_guard lk(impl->registered_in_use_objects_lock);
+ std::scoped_lock lk{impl->registered_in_use_objects_lock};
impl->registered_in_use_objects.insert(object);
}
void KernelCore::UnregisterInUseObject(KAutoObject* object) {
- std::lock_guard lk(impl->registered_in_use_objects_lock);
+ std::scoped_lock lk{impl->registered_in_use_objects_lock};
impl->registered_in_use_objects.erase(object);
}
@@ -1024,6 +1057,14 @@ const Kernel::KSharedMemory& KernelCore::GetTimeSharedMem() const {
return *impl->time_shared_mem;
}
+Kernel::KSharedMemory& KernelCore::GetHidBusSharedMem() {
+ return *impl->hidbus_shared_mem;
+}
+
+const Kernel::KSharedMemory& KernelCore::GetHidBusSharedMem() const {
+ return *impl->hidbus_shared_mem;
+}
+
void KernelCore::Suspend(bool in_suspention) {
const bool should_suspend = exception_exited || in_suspention;
{
@@ -1065,6 +1106,10 @@ std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::
return impl->CreateServiceThread(*this, name);
}
+std::weak_ptr<Kernel::ServiceThread> KernelCore::GetDefaultServiceThread() const {
+ return impl->default_service_thread;
+}
+
void KernelCore::ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) {
impl->ReleaseServiceThread(service_thread);
}
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 4c68e96df..12e44b8a5 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -195,6 +195,14 @@ public:
/// Opens a port to a service previously registered with RegisterNamedService.
KClientPort* CreateNamedServicePort(std::string name);
+ /// Registers a server session or port with the gobal emulation state, to be freed on shutdown.
+ /// This is necessary because we do not emulate processes for HLE sessions and ports.
+ void RegisterServerObject(KAutoObject* server_object);
+
+ /// Unregisters a server session or port previously registered with RegisterServerSession when
+ /// it was destroyed during the current emulation session.
+ void UnregisterServerObject(KAutoObject* server_object);
+
/// Registers all kernel objects with the global emulation state, this is purely for tracking
/// leaks after emulation has been shutdown.
void RegisterKernelObject(KAutoObject* object);
@@ -256,6 +264,12 @@ public:
/// Gets the shared memory object for Time services.
const Kernel::KSharedMemory& GetTimeSharedMem() const;
+ /// Gets the shared memory object for HIDBus services.
+ Kernel::KSharedMemory& GetHidBusSharedMem();
+
+ /// Gets the shared memory object for HIDBus services.
+ const Kernel::KSharedMemory& GetHidBusSharedMem() const;
+
/// Suspend/unsuspend the OS.
void Suspend(bool in_suspention);
@@ -271,9 +285,11 @@ public:
void ExitSVCProfile();
/**
- * Creates an HLE service thread, which are used to execute service routines asynchronously.
- * While these are allocated per ServerSession, these need to be owned and managed outside
- * of ServerSession to avoid a circular dependency.
+ * Creates a host thread to execute HLE service requests, which are used to execute service
+ * routines asynchronously. While these are allocated per ServerSession, these need to be owned
+ * and managed outside of ServerSession to avoid a circular dependency. In general, most
+ * services can just use the default service thread, and not need their own host service thread.
+ * See GetDefaultServiceThread.
* @param name String name for the ServerSession creating this thread, used for debug
* purposes.
* @returns The a weak pointer newly created service thread.
@@ -281,6 +297,14 @@ public:
std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(const std::string& name);
/**
+ * Gets the default host service thread, which executes HLE service requests. Unless service
+ * requests need to block on the host, the default service thread should be used in favor of
+ * creating a new service thread.
+ * @returns The a weak pointer for the default service thread.
+ */
+ std::weak_ptr<Kernel::ServiceThread> GetDefaultServiceThread() const;
+
+ /**
* Releases a HLE service thread, instructing KernelCore to free it. This should be called when
* the ServerSession associated with the thread is destroyed.
* @param service_thread Service thread to release.
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp
index 7477668e4..cc49e8c7e 100644
--- a/src/core/hle/kernel/physical_core.cpp
+++ b/src/core/hle/kernel/physical_core.cpp
@@ -2,7 +2,6 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
-#include "common/spin_lock.h"
#include "core/arm/cpu_interrupt_handler.h"
#include "core/arm/dynarmic/arm_dynarmic_32.h"
#include "core/arm/dynarmic/arm_dynarmic_64.h"
@@ -16,7 +15,7 @@ namespace Kernel {
PhysicalCore::PhysicalCore(std::size_t core_index_, Core::System& system_, KScheduler& scheduler_,
Core::CPUInterrupts& interrupts_)
: core_index{core_index_}, system{system_}, scheduler{scheduler_},
- interrupts{interrupts_}, guard{std::make_unique<Common::SpinLock>()} {
+ interrupts{interrupts_}, guard{std::make_unique<std::mutex>()} {
#ifdef ARCHITECTURE_x86_64
// TODO(bunnei): Initialization relies on a core being available. We may later replace this with
// a 32-bit instance of Dynarmic. This should be abstracted out to a CPU manager.
@@ -58,6 +57,7 @@ bool PhysicalCore::IsInterrupted() const {
void PhysicalCore::Interrupt() {
guard->lock();
interrupts[core_index].SetInterrupt(true);
+ arm_interface->SignalInterrupt();
guard->unlock();
}
diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h
index 16a032e89..f2112fc1d 100644
--- a/src/core/hle/kernel/physical_core.h
+++ b/src/core/hle/kernel/physical_core.h
@@ -6,13 +6,10 @@
#include <cstddef>
#include <memory>
+#include <mutex>
#include "core/arm/arm_interface.h"
-namespace Common {
-class SpinLock;
-}
-
namespace Kernel {
class KScheduler;
} // namespace Kernel
@@ -91,7 +88,7 @@ private:
Core::System& system;
Kernel::KScheduler& scheduler;
Core::CPUInterrupts& interrupts;
- std::unique_ptr<Common::SpinLock> guard;
+ std::unique_ptr<std::mutex> guard;
std::unique_ptr<Core::ARM_Interface> arm_interface;
};
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 839171e85..0c86435b5 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -1362,8 +1362,11 @@ static ResultCode MapProcessMemory(Core::System& system, VAddr dst_address, Hand
ResultInvalidMemoryRegion);
// Create a new page group.
- KMemoryInfo kBlockInfo = dst_pt.QueryInfo(dst_address);
- KPageLinkedList pg(kBlockInfo.GetAddress(), kBlockInfo.GetNumPages());
+ KPageLinkedList pg;
+ R_TRY(src_pt.MakeAndOpenPageGroup(
+ std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess,
+ KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::All, KMemoryAttribute::None));
// Map the group.
R_TRY(dst_pt.MapPages(dst_address, pg, KMemoryState::SharedCode,
@@ -1408,8 +1411,8 @@ static ResultCode UnmapProcessMemory(Core::System& system, VAddr dst_address, Ha
}
static ResultCode CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size) {
- LOG_TRACE(Kernel_SVC, "called, handle_out={}, address=0x{:X}, size=0x{:X}",
- static_cast<void*>(out), address, size);
+ LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, size=0x{:X}", address, size);
+
// Get kernel instance.
auto& kernel = system.Kernel();
@@ -1664,7 +1667,7 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
return ResultInvalidAddress;
}
- if (size == 0 || Common::Is4KBAligned(size)) {
+ if (size == 0 || !Common::Is4KBAligned(size)) {
LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X}).", size);
return ResultInvalidSize;
}
@@ -1710,7 +1713,8 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
return ResultInvalidMemoryRegion;
}
- return page_table.UnmapCodeMemory(dst_address, src_address, size);
+ return page_table.UnmapCodeMemory(dst_address, src_address, size,
+ KPageTable::ICacheInvalidationStrategy::InvalidateAll);
}
/// Exits the current process
diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp
index aa985d820..5b8fe8eae 100644
--- a/src/core/hle/kernel/time_manager.cpp
+++ b/src/core/hle/kernel/time_manager.cpp
@@ -24,7 +24,7 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
}
void TimeManager::ScheduleTimeEvent(KThread* thread, s64 nanoseconds) {
- std::lock_guard lock{mutex};
+ std::scoped_lock lock{mutex};
if (nanoseconds > 0) {
ASSERT(thread);
ASSERT(thread->GetState() != ThreadState::Runnable);
@@ -35,7 +35,7 @@ void TimeManager::ScheduleTimeEvent(KThread* thread, s64 nanoseconds) {
}
void TimeManager::UnscheduleTimeEvent(KThread* thread) {
- std::lock_guard lock{mutex};
+ std::scoped_lock lock{mutex};
system.CoreTiming().UnscheduleEvent(time_manager_event_type,
reinterpret_cast<uintptr_t>(thread));
}