summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/core/hle/kernel/kernel.cpp14
-rw-r--r--src/core/hle/kernel/kernel.h5
-rw-r--r--src/core/hle/kernel/process.cpp137
-rw-r--r--src/core/hle/kernel/process.h20
-rw-r--r--src/core/hle/kernel/svc.cpp8
-rw-r--r--src/core/hle/kernel/thread.cpp10
-rw-r--r--src/core/hle/kernel/thread.h16
-rw-r--r--src/core/hle/kernel/vm_manager.cpp31
-rw-r--r--src/core/hle/kernel/vm_manager.h31
9 files changed, 181 insertions, 91 deletions
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 757e5f21f..799e5e0d8 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -99,7 +99,8 @@ struct KernelCore::Impl {
void Shutdown() {
next_object_id = 0;
- next_process_id = Process::ProcessIDMin;
+ next_kernel_process_id = Process::InitialKIPIDMin;
+ next_user_process_id = Process::ProcessIDMin;
next_thread_id = 1;
process_list.clear();
@@ -132,7 +133,8 @@ struct KernelCore::Impl {
}
std::atomic<u32> next_object_id{0};
- std::atomic<u64> next_process_id{Process::ProcessIDMin};
+ std::atomic<u64> next_kernel_process_id{Process::InitialKIPIDMin};
+ std::atomic<u64> next_user_process_id{Process::ProcessIDMin};
std::atomic<u64> next_thread_id{1};
// Lists all processes that exist in the current session.
@@ -226,8 +228,12 @@ u64 KernelCore::CreateNewThreadID() {
return impl->next_thread_id++;
}
-u64 KernelCore::CreateNewProcessID() {
- return impl->next_process_id++;
+u64 KernelCore::CreateNewKernelProcessID() {
+ return impl->next_kernel_process_id++;
+}
+
+u64 KernelCore::CreateNewUserProcessID() {
+ return impl->next_user_process_id++;
}
Core::Timing::EventType* KernelCore::ThreadWakeupCallbackEventType() const {
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 6b8738599..0cc44ee76 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -96,7 +96,10 @@ private:
u32 CreateNewObjectID();
/// Creates a new process ID, incrementing the internal process ID counter;
- u64 CreateNewProcessID();
+ u64 CreateNewKernelProcessID();
+
+ /// Creates a new process ID, incrementing the internal process ID counter;
+ u64 CreateNewUserProcessID();
/// Creates a new thread ID, incrementing the internal thread ID counter.
u64 CreateNewThreadID();
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index 2b81a8d4f..f45ef05f6 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -3,6 +3,7 @@
// Refer to the license.txt file included.
#include <algorithm>
+#include <bitset>
#include <memory>
#include <random>
#include "common/alignment.h"
@@ -48,7 +49,58 @@ void SetupMainThread(Process& owner_process, KernelCore& kernel, u32 priority) {
}
} // Anonymous namespace
-SharedPtr<Process> Process::Create(Core::System& system, std::string name) {
+// Represents a page used for thread-local storage.
+//
+// Each TLS page contains slots that may be used by processes and threads.
+// Every process and thread is created with a slot in some arbitrary page
+// (whichever page happens to have an available slot).
+class TLSPage {
+public:
+ static constexpr std::size_t num_slot_entries = Memory::PAGE_SIZE / Memory::TLS_ENTRY_SIZE;
+
+ explicit TLSPage(VAddr address) : base_address{address} {}
+
+ bool HasAvailableSlots() const {
+ return !is_slot_used.all();
+ }
+
+ VAddr GetBaseAddress() const {
+ return base_address;
+ }
+
+ std::optional<VAddr> ReserveSlot() {
+ for (std::size_t i = 0; i < is_slot_used.size(); i++) {
+ if (is_slot_used[i]) {
+ continue;
+ }
+
+ is_slot_used[i] = true;
+ return base_address + (i * Memory::TLS_ENTRY_SIZE);
+ }
+
+ return std::nullopt;
+ }
+
+ void ReleaseSlot(VAddr address) {
+ // Ensure that all given addresses are consistent with how TLS pages
+ // are intended to be used when releasing slots.
+ ASSERT(IsWithinPage(address));
+ ASSERT((address % Memory::TLS_ENTRY_SIZE) == 0);
+
+ const std::size_t index = (address - base_address) / Memory::TLS_ENTRY_SIZE;
+ is_slot_used[index] = false;
+ }
+
+private:
+ bool IsWithinPage(VAddr address) const {
+ return base_address <= address && address < base_address + Memory::PAGE_SIZE;
+ }
+
+ VAddr base_address;
+ std::bitset<num_slot_entries> is_slot_used;
+};
+
+SharedPtr<Process> Process::Create(Core::System& system, std::string name, ProcessType type) {
auto& kernel = system.Kernel();
SharedPtr<Process> process(new Process(system));
@@ -56,7 +108,8 @@ SharedPtr<Process> Process::Create(Core::System& system, std::string name) {
process->resource_limit = kernel.GetSystemResourceLimit();
process->status = ProcessStatus::Created;
process->program_id = 0;
- process->process_id = kernel.CreateNewProcessID();
+ process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
+ : kernel.CreateNewUserProcessID();
process->capabilities.InitializeForMetadatalessProcess();
std::mt19937 rng(Settings::values.rng_seed.value_or(0));
@@ -179,61 +232,55 @@ void Process::PrepareForTermination() {
}
/**
- * Finds a free location for the TLS section of a thread.
- * @param tls_slots The TLS page array of the thread's owner process.
- * Returns a tuple of (page, slot, alloc_needed) where:
- * page: The index of the first allocated TLS page that has free slots.
- * slot: The index of the first free slot in the indicated page.
- * alloc_needed: Whether there's a need to allocate a new TLS page (All pages are full).
+ * Attempts to find a TLS page that contains a free slot for
+ * use by a thread.
+ *
+ * @returns If a page with an available slot is found, then an iterator
+ * pointing to the page is returned. Otherwise the end iterator
+ * is returned instead.
*/
-static std::tuple<std::size_t, std::size_t, bool> FindFreeThreadLocalSlot(
- const std::vector<std::bitset<8>>& tls_slots) {
- // Iterate over all the allocated pages, and try to find one where not all slots are used.
- for (std::size_t page = 0; page < tls_slots.size(); ++page) {
- const auto& page_tls_slots = tls_slots[page];
- if (!page_tls_slots.all()) {
- // We found a page with at least one free slot, find which slot it is
- for (std::size_t slot = 0; slot < page_tls_slots.size(); ++slot) {
- if (!page_tls_slots.test(slot)) {
- return std::make_tuple(page, slot, false);
- }
- }
- }
- }
-
- return std::make_tuple(0, 0, true);
+static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) {
+ return std::find_if(tls_pages.begin(), tls_pages.end(),
+ [](const auto& page) { return page.HasAvailableSlots(); });
}
-VAddr Process::MarkNextAvailableTLSSlotAsUsed(Thread& thread) {
- auto [available_page, available_slot, needs_allocation] = FindFreeThreadLocalSlot(tls_slots);
- const VAddr tls_begin = vm_manager.GetTLSIORegionBaseAddress();
+VAddr Process::CreateTLSRegion() {
+ auto tls_page_iter = FindTLSPageWithAvailableSlots(tls_pages);
- if (needs_allocation) {
- tls_slots.emplace_back(0); // The page is completely available at the start
- available_page = tls_slots.size() - 1;
- available_slot = 0; // Use the first slot in the new page
+ if (tls_page_iter == tls_pages.cend()) {
+ const auto region_address =
+ vm_manager.FindFreeRegion(vm_manager.GetTLSIORegionBaseAddress(),
+ vm_manager.GetTLSIORegionEndAddress(), Memory::PAGE_SIZE);
+ ASSERT(region_address.Succeeded());
- // Allocate some memory from the end of the linear heap for this region.
- auto& tls_memory = thread.GetTLSMemory();
- tls_memory->insert(tls_memory->end(), Memory::PAGE_SIZE, 0);
+ const auto map_result = vm_manager.MapMemoryBlock(
+ *region_address, std::make_shared<std::vector<u8>>(Memory::PAGE_SIZE), 0,
+ Memory::PAGE_SIZE, MemoryState::ThreadLocal);
+ ASSERT(map_result.Succeeded());
- vm_manager.RefreshMemoryBlockMappings(tls_memory.get());
+ tls_pages.emplace_back(*region_address);
- vm_manager.MapMemoryBlock(tls_begin + available_page * Memory::PAGE_SIZE, tls_memory, 0,
- Memory::PAGE_SIZE, MemoryState::ThreadLocal);
- }
+ const auto reserve_result = tls_pages.back().ReserveSlot();
+ ASSERT(reserve_result.has_value());
- tls_slots[available_page].set(available_slot);
+ return *reserve_result;
+ }
- return tls_begin + available_page * Memory::PAGE_SIZE + available_slot * Memory::TLS_ENTRY_SIZE;
+ return *tls_page_iter->ReserveSlot();
}
-void Process::FreeTLSSlot(VAddr tls_address) {
- const VAddr tls_base = tls_address - vm_manager.GetTLSIORegionBaseAddress();
- const VAddr tls_page = tls_base / Memory::PAGE_SIZE;
- const VAddr tls_slot = (tls_base % Memory::PAGE_SIZE) / Memory::TLS_ENTRY_SIZE;
+void Process::FreeTLSRegion(VAddr tls_address) {
+ const VAddr aligned_address = Common::AlignDown(tls_address, Memory::PAGE_SIZE);
+ auto iter =
+ std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) {
+ return page.GetBaseAddress() == aligned_address;
+ });
+
+ // Something has gone very wrong if we're freeing a region
+ // with no actual page available.
+ ASSERT(iter != tls_pages.cend());
- tls_slots[tls_page].reset(tls_slot);
+ iter->ReleaseSlot(tls_address);
}
void Process::LoadModule(CodeSet module_, VAddr base_addr) {
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index 29e016983..83ea02bee 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -5,7 +5,6 @@
#pragma once
#include <array>
-#include <bitset>
#include <cstddef>
#include <list>
#include <string>
@@ -32,6 +31,7 @@ namespace Kernel {
class KernelCore;
class ResourceLimit;
class Thread;
+class TLSPage;
struct CodeSet;
@@ -73,9 +73,15 @@ public:
ProcessIDMax = 0xFFFFFFFFFFFFFFFF,
};
+ // Used to determine how process IDs are assigned.
+ enum class ProcessType {
+ KernelInternal,
+ Userland,
+ };
+
static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4;
- static SharedPtr<Process> Create(Core::System& system, std::string name);
+ static SharedPtr<Process> Create(Core::System& system, std::string name, ProcessType type);
std::string GetTypeName() const override {
return "Process";
@@ -254,10 +260,10 @@ public:
// Thread-local storage management
// Marks the next available region as used and returns the address of the slot.
- VAddr MarkNextAvailableTLSSlotAsUsed(Thread& thread);
+ [[nodiscard]] VAddr CreateTLSRegion();
// Frees a used TLS slot identified by the given address
- void FreeTLSSlot(VAddr tls_address);
+ void FreeTLSRegion(VAddr tls_address);
private:
explicit Process(Core::System& system);
@@ -284,7 +290,7 @@ private:
u64 code_memory_size = 0;
/// Current status of the process
- ProcessStatus status;
+ ProcessStatus status{};
/// The ID of this process
u64 process_id = 0;
@@ -304,7 +310,7 @@ private:
/// holds the TLS for a specific thread. This vector contains which parts are in use for each
/// page as a bitmask.
/// This vector will grow as more pages are allocated for new threads.
- std::vector<std::bitset<8>> tls_slots;
+ std::vector<TLSPage> tls_pages;
/// Contains the parsed process capability descriptors.
ProcessCapabilities capabilities;
@@ -333,7 +339,7 @@ private:
Mutex mutex;
/// Random values for svcGetInfo RandomEntropy
- std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy;
+ std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{};
/// List of threads that are running with this process as their owner.
std::list<const Thread*> thread_list;
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index f9c606bc5..de6363ff2 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -38,6 +38,7 @@
#include "core/hle/result.h"
#include "core/hle/service/service.h"
#include "core/memory.h"
+#include "core/reporter.h"
namespace Kernel {
namespace {
@@ -594,6 +595,7 @@ struct BreakReason {
static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
BreakReason break_reason{reason};
bool has_dumped_buffer{};
+ std::vector<u8> debug_buffer;
const auto handle_debug_buffer = [&](VAddr addr, u64 sz) {
if (sz == 0 || addr == 0 || has_dumped_buffer) {
@@ -605,7 +607,7 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
LOG_CRITICAL(Debug_Emulated, "debug_buffer_err_code={:X}", Memory::Read32(addr));
} else {
// We don't know what's in here so we'll hexdump it
- std::vector<u8> debug_buffer(sz);
+ debug_buffer.resize(sz);
Memory::ReadBlock(addr, debug_buffer.data(), sz);
std::string hexdump;
for (std::size_t i = 0; i < debug_buffer.size(); i++) {
@@ -664,6 +666,10 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
break;
}
+ system.GetReporter().SaveSvcBreakReport(
+ static_cast<u32>(break_reason.break_type.Value()), break_reason.signal_debugger, info1,
+ info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt);
+
if (!break_reason.signal_debugger) {
LOG_CRITICAL(
Debug_Emulated,
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index c73a40977..ec529e7f2 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -65,7 +65,7 @@ void Thread::Stop() {
owner_process->UnregisterThread(this);
// Mark the TLS slot in the thread's page as free.
- owner_process->FreeTLSSlot(tls_address);
+ owner_process->FreeTLSRegion(tls_address);
}
void Thread::WakeAfterDelay(s64 nanoseconds) {
@@ -76,13 +76,13 @@ void Thread::WakeAfterDelay(s64 nanoseconds) {
// This function might be called from any thread so we have to be cautious and use the
// thread-safe version of ScheduleEvent.
const s64 cycles = Core::Timing::nsToCycles(std::chrono::nanoseconds{nanoseconds});
- Core::System::GetInstance().CoreTiming().ScheduleEventThreadsafe(
+ Core::System::GetInstance().CoreTiming().ScheduleEvent(
cycles, kernel.ThreadWakeupCallbackEventType(), callback_handle);
}
void Thread::CancelWakeupTimer() {
- Core::System::GetInstance().CoreTiming().UnscheduleEventThreadsafe(
- kernel.ThreadWakeupCallbackEventType(), callback_handle);
+ Core::System::GetInstance().CoreTiming().UnscheduleEvent(kernel.ThreadWakeupCallbackEventType(),
+ callback_handle);
}
static std::optional<s32> GetNextProcessorId(u64 mask) {
@@ -205,9 +205,9 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
thread->name = std::move(name);
thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap();
thread->owner_process = &owner_process;
+ thread->tls_address = thread->owner_process->CreateTLSRegion();
thread->scheduler = &system.Scheduler(processor_id);
thread->scheduler->AddThread(thread);
- thread->tls_address = thread->owner_process->MarkNextAvailableTLSSlotAsUsed(*thread);
thread->owner_process->RegisterThread(thread.get());
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index b4b9cda7c..07e989637 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -5,7 +5,6 @@
#pragma once
#include <functional>
-#include <memory>
#include <string>
#include <vector>
@@ -78,9 +77,6 @@ enum class ThreadActivity : u32 {
class Thread final : public WaitObject {
public:
- using TLSMemory = std::vector<u8>;
- using TLSMemoryPtr = std::shared_ptr<TLSMemory>;
-
using MutexWaitingThreads = std::vector<SharedPtr<Thread>>;
using ThreadContext = Core::ARM_Interface::ThreadContext;
@@ -169,14 +165,6 @@ public:
return thread_id;
}
- TLSMemoryPtr& GetTLSMemory() {
- return tls_memory;
- }
-
- const TLSMemoryPtr& GetTLSMemory() const {
- return tls_memory;
- }
-
/// Resumes a thread from waiting
void ResumeFromWait();
@@ -463,11 +451,9 @@ private:
u32 ideal_core{0xFFFFFFFF};
u64 affinity_mask{0x1};
- TLSMemoryPtr tls_memory = std::make_shared<TLSMemory>();
+ ThreadActivity activity = ThreadActivity::Normal;
std::string name;
-
- ThreadActivity activity = ThreadActivity::Normal;
};
/**
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
index c929c2a52..3df5ccb7f 100644
--- a/src/core/hle/kernel/vm_manager.cpp
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -152,22 +152,33 @@ ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8* me
}
ResultVal<VAddr> VMManager::FindFreeRegion(u64 size) const {
- // Find the first Free VMA.
- const VAddr base = GetASLRRegionBaseAddress();
- const VMAHandle vma_handle = std::find_if(vma_map.begin(), vma_map.end(), [&](const auto& vma) {
- if (vma.second.type != VMAType::Free)
- return false;
+ return FindFreeRegion(GetASLRRegionBaseAddress(), GetASLRRegionEndAddress(), size);
+}
- const VAddr vma_end = vma.second.base + vma.second.size;
- return vma_end > base && vma_end >= base + size;
- });
+ResultVal<VAddr> VMManager::FindFreeRegion(VAddr begin, VAddr end, u64 size) const {
+ ASSERT(begin < end);
+ ASSERT(size <= end - begin);
- if (vma_handle == vma_map.end()) {
+ const VMAHandle vma_handle =
+ std::find_if(vma_map.begin(), vma_map.end(), [begin, end, size](const auto& vma) {
+ if (vma.second.type != VMAType::Free) {
+ return false;
+ }
+ const VAddr vma_base = vma.second.base;
+ const VAddr vma_end = vma_base + vma.second.size;
+ const VAddr assumed_base = (begin < vma_base) ? vma_base : begin;
+ const VAddr used_range = assumed_base + size;
+
+ return vma_base <= assumed_base && assumed_base < used_range && used_range < end &&
+ used_range <= vma_end;
+ });
+
+ if (vma_handle == vma_map.cend()) {
// TODO(Subv): Find the correct error code here.
return ResultCode(-1);
}
- const VAddr target = std::max(base, vma_handle->second.base);
+ const VAddr target = std::max(begin, vma_handle->second.base);
return MakeResult<VAddr>(target);
}
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h
index dfbf7a894..752ae62f9 100644
--- a/src/core/hle/kernel/vm_manager.h
+++ b/src/core/hle/kernel/vm_manager.h
@@ -362,14 +362,39 @@ public:
ResultVal<VMAHandle> MapBackingMemory(VAddr target, u8* memory, u64 size, MemoryState state);
/**
- * Finds the first free address that can hold a region of the desired size.
+ * Finds the first free memory region of the given size within
+ * the user-addressable ASLR memory region.
*
- * @param size Size of the desired region.
- * @return The found free address.
+ * @param size The size of the desired region in bytes.
+ *
+ * @returns If successful, the base address of the free region with
+ * the given size.
*/
ResultVal<VAddr> FindFreeRegion(u64 size) const;
/**
+ * Finds the first free address range that can hold a region of the desired size
+ *
+ * @param begin The starting address of the range.
+ * This is treated as an inclusive beginning address.
+ *
+ * @param end The ending address of the range.
+ * This is treated as an exclusive ending address.
+ *
+ * @param size The size of the free region to attempt to locate,
+ * in bytes.
+ *
+ * @returns If successful, the base address of the free region with
+ * the given size.
+ *
+ * @returns If unsuccessful, a result containing an error code.
+ *
+ * @pre The starting address must be less than the ending address.
+ * @pre The size must not exceed the address range itself.
+ */
+ ResultVal<VAddr> FindFreeRegion(VAddr begin, VAddr end, u64 size) const;
+
+ /**
* Maps a memory-mapped IO region at a given address.
*
* @param target The guest address to start the mapping at.