diff options
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r-- | src/core/hle/kernel/global_scheduler_context.cpp | 6 | ||||
-rw-r--r-- | src/core/hle/kernel/k_interrupt_manager.cpp | 34 | ||||
-rw-r--r-- | src/core/hle/kernel/k_interrupt_manager.h | 17 | ||||
-rw-r--r-- | src/core/hle/kernel/k_memory_block.h | 2 | ||||
-rw-r--r-- | src/core/hle/kernel/k_page_table.cpp | 194 | ||||
-rw-r--r-- | src/core/hle/kernel/k_page_table.h | 20 | ||||
-rw-r--r-- | src/core/hle/kernel/k_process.cpp | 22 | ||||
-rw-r--r-- | src/core/hle/kernel/k_process.h | 4 | ||||
-rw-r--r-- | src/core/hle/kernel/k_scheduler.cpp | 8 | ||||
-rw-r--r-- | src/core/hle/kernel/k_thread.cpp | 46 | ||||
-rw-r--r-- | src/core/hle/kernel/k_thread.h | 6 | ||||
-rw-r--r-- | src/core/hle/kernel/kernel.cpp | 5 | ||||
-rw-r--r-- | src/core/hle/kernel/svc.cpp | 108 | ||||
-rw-r--r-- | src/core/hle/kernel/svc_common.h | 5 | ||||
-rw-r--r-- | src/core/hle/kernel/svc_wrap.h | 8 |
15 files changed, 361 insertions, 124 deletions
diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp index 4f4e338e3..baad2c5d6 100644 --- a/src/core/hle/kernel/global_scheduler_context.cpp +++ b/src/core/hle/kernel/global_scheduler_context.cpp @@ -9,6 +9,7 @@ #include "core/hle/kernel/global_scheduler_context.h" #include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/physical_core.h" namespace Kernel { @@ -42,6 +43,11 @@ void GlobalSchedulerContext::PreemptThreads() { for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { const u32 priority = preemption_priorities[core_id]; kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority); + + // Signal an interrupt occurred. For core 3, this is a certainty, as preemption will result + // in the rotator thread being scheduled. For cores 0-2, this is to simulate or system + // interrupts that may have occurred. + kernel.PhysicalCore(core_id).Interrupt(); } } diff --git a/src/core/hle/kernel/k_interrupt_manager.cpp b/src/core/hle/kernel/k_interrupt_manager.cpp new file mode 100644 index 000000000..e5dd39751 --- /dev/null +++ b/src/core/hle/kernel/k_interrupt_manager.cpp @@ -0,0 +1,34 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/hle/kernel/k_interrupt_manager.h" +#include "core/hle/kernel/k_process.h" +#include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/k_thread.h" +#include "core/hle/kernel/kernel.h" + +namespace Kernel::KInterruptManager { + +void HandleInterrupt(KernelCore& kernel, s32 core_id) { + auto* process = kernel.CurrentProcess(); + if (!process) { + return; + } + + auto& scheduler = kernel.Scheduler(core_id); + auto& current_thread = *scheduler.GetCurrentThread(); + + // If the user disable count is set, we may need to pin the current thread. + if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) { + KScopedSchedulerLock sl{kernel}; + + // Pin the current thread. + process->PinCurrentThread(core_id); + + // Set the interrupt flag for the thread. + scheduler.GetCurrentThread()->SetInterruptFlag(); + } +} + +} // namespace Kernel::KInterruptManager diff --git a/src/core/hle/kernel/k_interrupt_manager.h b/src/core/hle/kernel/k_interrupt_manager.h new file mode 100644 index 000000000..05924801e --- /dev/null +++ b/src/core/hle/kernel/k_interrupt_manager.h @@ -0,0 +1,17 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/common_types.h" + +namespace Kernel { + +class KernelCore; + +namespace KInterruptManager { +void HandleInterrupt(KernelCore& kernel, s32 core_id); +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h index fd491146f..9e51c33ce 100644 --- a/src/core/hle/kernel/k_memory_block.h +++ b/src/core/hle/kernel/k_memory_block.h @@ -120,7 +120,7 @@ static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x00402015); enum class KMemoryPermission : u8 { None = 0, - Mask = static_cast<u8>(~None), + All = static_cast<u8>(~None), Read = 1 << 0, Write = 1 << 1, diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index 99982e5a3..4da509224 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp @@ -264,9 +264,9 @@ ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_ ASSERT(heap_last < stack_start || stack_last < heap_start); ASSERT(heap_last < kmap_start || kmap_last < heap_start); - current_heap_addr = heap_region_start; - heap_capacity = 0; - physical_memory_usage = 0; + current_heap_end = heap_region_start; + max_heap_size = 0; + mapped_physical_memory_size = 0; memory_pool = pool; page_table_impl.Resize(address_space_width, PageBits); @@ -306,7 +306,7 @@ ResultCode KPageTable::MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std: KMemoryState state{}; KMemoryPermission perm{}; CASCADE_CODE(CheckMemoryState(&state, &perm, nullptr, src_addr, size, KMemoryState::All, - KMemoryState::Normal, KMemoryPermission::Mask, + KMemoryState::Normal, KMemoryPermission::All, KMemoryPermission::ReadAndWrite, KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); @@ -465,7 +465,7 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) { MapPhysicalMemory(page_linked_list, addr, end_addr); - physical_memory_usage += remaining_size; + mapped_physical_memory_size += remaining_size; const std::size_t num_pages{size / PageSize}; block_manager->Update(addr, num_pages, KMemoryState::Free, KMemoryPermission::None, @@ -507,7 +507,7 @@ ResultCode KPageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) { auto process{system.Kernel().CurrentProcess()}; process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); - physical_memory_usage -= mapped_size; + mapped_physical_memory_size -= mapped_size; return ResultSuccess; } @@ -554,7 +554,7 @@ ResultCode KPageTable::Map(VAddr dst_addr, VAddr src_addr, std::size_t size) { KMemoryState src_state{}; CASCADE_CODE(CheckMemoryState( &src_state, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias, - KMemoryState::FlagCanAlias, KMemoryPermission::Mask, KMemoryPermission::ReadAndWrite, + KMemoryState::FlagCanAlias, KMemoryPermission::All, KMemoryPermission::ReadAndWrite, KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); if (IsRegionMapped(dst_addr, size)) { @@ -593,7 +593,7 @@ ResultCode KPageTable::Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size) { KMemoryState src_state{}; CASCADE_CODE(CheckMemoryState( &src_state, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias, - KMemoryState::FlagCanAlias, KMemoryPermission::Mask, KMemoryPermission::None, + KMemoryState::FlagCanAlias, KMemoryPermission::All, KMemoryPermission::None, KMemoryAttribute::Mask, KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped)); KMemoryPermission dst_perm{}; @@ -784,7 +784,7 @@ ResultCode KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemo CASCADE_CODE(CheckMemoryState( &state, nullptr, &attribute, addr, size, KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, - KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, KMemoryPermission::Mask, + KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, KMemoryPermission::All, KMemoryPermission::ReadAndWrite, KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); @@ -806,6 +806,33 @@ ResultCode KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) { KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped)); block_manager->Update(addr, size / PageSize, state, KMemoryPermission::ReadAndWrite); + return ResultSuccess; +} + +ResultCode KPageTable::SetMemoryPermission(VAddr addr, std::size_t size, + Svc::MemoryPermission svc_perm) { + const size_t num_pages = size / PageSize; + + // Lock the table. + std::lock_guard lock{page_table_lock}; + + // Verify we can change the memory permission. + KMemoryState old_state; + KMemoryPermission old_perm; + R_TRY(this->CheckMemoryState( + std::addressof(old_state), std::addressof(old_perm), nullptr, addr, size, + KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); + + // Determine new perm. + const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm); + R_SUCCEED_IF(old_perm == new_perm); + + // Perform mapping operation. + R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions)); + + // Update the blocks. + block_manager->Update(addr, num_pages, old_state, new_perm, KMemoryAttribute::None); return ResultSuccess; } @@ -832,61 +859,125 @@ ResultCode KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, KMemoryA return ResultSuccess; } -ResultCode KPageTable::SetHeapCapacity(std::size_t new_heap_capacity) { +ResultCode KPageTable::SetMaxHeapSize(std::size_t size) { + // Lock the table. std::lock_guard lock{page_table_lock}; - heap_capacity = new_heap_capacity; - return ResultSuccess; -} -ResultVal<VAddr> KPageTable::SetHeapSize(std::size_t size) { + // Only process page tables are allowed to set heap size. + ASSERT(!this->IsKernel()); - if (size > heap_region_end - heap_region_start) { - return ResultOutOfMemory; - } + max_heap_size = size; - const u64 previous_heap_size{GetHeapSize()}; - - UNIMPLEMENTED_IF_MSG(previous_heap_size > size, "Heap shrink is unimplemented"); + return ResultSuccess; +} - // Increase the heap size +ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) { + // Try to perform a reduction in heap, instead of an extension. + VAddr cur_address{}; + std::size_t allocation_size{}; { - std::lock_guard lock{page_table_lock}; - - const u64 delta{size - previous_heap_size}; - - // Reserve memory for the heap extension. - KScopedResourceReservation memory_reservation( - system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, - delta); - - if (!memory_reservation.Succeeded()) { - LOG_ERROR(Kernel, "Could not reserve heap extension of size {:X} bytes", delta); - return ResultLimitReached; + // Lock the table. + std::lock_guard lk(page_table_lock); + + // Validate that setting heap size is possible at all. + R_UNLESS(!is_kernel, ResultOutOfMemory); + R_UNLESS(size <= static_cast<std::size_t>(heap_region_end - heap_region_start), + ResultOutOfMemory); + R_UNLESS(size <= max_heap_size, ResultOutOfMemory); + + if (size < GetHeapSize()) { + // The size being requested is less than the current size, so we need to free the end of + // the heap. + + // Validate memory state. + std::size_t num_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), + heap_region_start + size, GetHeapSize() - size, + KMemoryState::All, KMemoryState::Normal, + KMemoryPermission::All, KMemoryPermission::ReadAndWrite, + KMemoryAttribute::All, KMemoryAttribute::None)); + + // Unmap the end of the heap. + const auto num_pages = (GetHeapSize() - size) / PageSize; + R_TRY(Operate(heap_region_start + size, num_pages, KMemoryPermission::None, + OperationType::Unmap)); + + // Release the memory from the resource limit. + system.Kernel().CurrentProcess()->GetResourceLimit()->Release( + LimitableResource::PhysicalMemory, num_pages * PageSize); + + // Apply the memory block update. + block_manager->Update(heap_region_start + size, num_pages, KMemoryState::Free, + KMemoryPermission::None, KMemoryAttribute::None); + + // Update the current heap end. + current_heap_end = heap_region_start + size; + + // Set the output. + *out = heap_region_start; + return ResultSuccess; + } else if (size == GetHeapSize()) { + // The size requested is exactly the current size. + *out = heap_region_start; + return ResultSuccess; + } else { + // We have to allocate memory. Determine how much to allocate and where while the table + // is locked. + cur_address = current_heap_end; + allocation_size = size - GetHeapSize(); } + } - KPageLinkedList page_linked_list; - const std::size_t num_pages{delta / PageSize}; + // Reserve memory for the heap extension. + KScopedResourceReservation memory_reservation( + system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, + allocation_size); + R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); - CASCADE_CODE( - system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool)); + // Allocate pages for the heap extension. + KPageLinkedList page_linked_list; + R_TRY(system.Kernel().MemoryManager().Allocate(page_linked_list, allocation_size / PageSize, + memory_pool)); - if (IsRegionMapped(current_heap_addr, delta)) { - return ResultInvalidCurrentMemory; + // Map the pages. + { + // Lock the table. + std::lock_guard lk(page_table_lock); + + // Ensure that the heap hasn't changed since we began executing. + ASSERT(cur_address == current_heap_end); + + // Check the memory state. + std::size_t num_allocator_blocks{}; + R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), current_heap_end, + allocation_size, KMemoryState::All, KMemoryState::Free, + KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::None, KMemoryAttribute::None)); + + // Map the pages. + const auto num_pages = allocation_size / PageSize; + R_TRY(Operate(current_heap_end, num_pages, page_linked_list, OperationType::MapGroup)); + + // Clear all the newly allocated pages. + for (std::size_t cur_page = 0; cur_page < num_pages; ++cur_page) { + std::memset(system.Memory().GetPointer(current_heap_end + (cur_page * PageSize)), 0, + PageSize); } - CASCADE_CODE( - Operate(current_heap_addr, num_pages, page_linked_list, OperationType::MapGroup)); - - // Succeeded in allocation, commit the resource reservation + // We succeeded, so commit our memory reservation. memory_reservation.Commit(); - block_manager->Update(current_heap_addr, num_pages, KMemoryState::Normal, - KMemoryPermission::ReadAndWrite); + // Apply the memory block update. + block_manager->Update(current_heap_end, num_pages, KMemoryState::Normal, + KMemoryPermission::ReadAndWrite, KMemoryAttribute::None); - current_heap_addr = heap_region_start + size; - } + // Update the current heap end. + current_heap_end = heap_region_start + size; - return heap_region_start; + // Set the output. + *out = heap_region_start; + return ResultSuccess; + } } ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, @@ -978,7 +1069,7 @@ ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) { if (const ResultCode result{CheckMemoryState( nullptr, &old_perm, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, - KMemoryState::FlagCanCodeMemory, KMemoryPermission::Mask, + KMemoryState::FlagCanCodeMemory, KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None)}; result.IsError()) { return result; @@ -1031,9 +1122,8 @@ ResultCode KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) { bool KPageTable::IsRegionMapped(VAddr address, u64 size) { return CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free, - KMemoryPermission::Mask, KMemoryPermission::None, - KMemoryAttribute::Mask, KMemoryAttribute::None, - KMemoryAttribute::IpcAndDeviceMapped) + KMemoryPermission::All, KMemoryPermission::None, KMemoryAttribute::Mask, + KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped) .IsError(); } diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index d784aa67e..564410dca 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h @@ -47,10 +47,11 @@ public: KMemoryInfo QueryInfo(VAddr addr); ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm); ResultCode ResetTransferMemory(VAddr addr, std::size_t size); + ResultCode SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm); ResultCode SetMemoryAttribute(VAddr addr, std::size_t size, KMemoryAttribute mask, KMemoryAttribute value); - ResultCode SetHeapCapacity(std::size_t new_heap_capacity); - ResultVal<VAddr> SetHeapSize(std::size_t size); + ResultCode SetMaxHeapSize(std::size_t size); + ResultCode SetHeapSize(VAddr* out, std::size_t size); ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, bool is_map_only, VAddr region_start, std::size_t region_num_pages, KMemoryState state, @@ -182,14 +183,15 @@ public: constexpr VAddr GetAliasCodeRegionSize() const { return alias_code_region_end - alias_code_region_start; } + size_t GetNormalMemorySize() { + std::lock_guard lk(page_table_lock); + return GetHeapSize() + mapped_physical_memory_size; + } constexpr std::size_t GetAddressSpaceWidth() const { return address_space_width; } - constexpr std::size_t GetHeapSize() { - return current_heap_addr - heap_region_start; - } - constexpr std::size_t GetTotalHeapSize() { - return GetHeapSize() + physical_memory_usage; + constexpr std::size_t GetHeapSize() const { + return current_heap_end - heap_region_start; } constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const { return address_space_start <= address && address + size - 1 <= address_space_end - 1; @@ -269,10 +271,8 @@ private: VAddr code_region_end{}; VAddr alias_code_region_start{}; VAddr alias_code_region_end{}; - VAddr current_heap_addr{}; - std::size_t heap_capacity{}; - std::size_t physical_memory_usage{}; + std::size_t mapped_physical_memory_size{}; std::size_t max_heap_size{}; std::size_t max_physical_memory_size{}; std::size_t address_space_width{}; diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 90dda40dc..bf98a51e2 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -28,7 +28,6 @@ #include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/svc_results.h" -#include "core/hle/lock.h" #include "core/memory.h" namespace Kernel { @@ -173,7 +172,7 @@ void KProcess::DecrementThreadCount() { u64 KProcess::GetTotalPhysicalMemoryAvailable() const { const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) + - page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size + + page_table->GetNormalMemorySize() + GetSystemResourceSize() + image_size + main_thread_stack_size}; if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application); capacity != pool_size) { @@ -190,7 +189,7 @@ u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const { } u64 KProcess::GetTotalPhysicalMemoryUsed() const { - return image_size + main_thread_stack_size + page_table->GetTotalHeapSize() + + return image_size + main_thread_stack_size + page_table->GetNormalMemorySize() + GetSystemResourceSize(); } @@ -221,30 +220,28 @@ bool KProcess::ReleaseUserException(KThread* thread) { } } -void KProcess::PinCurrentThread() { +void KProcess::PinCurrentThread(s32 core_id) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); // Get the current thread. - const s32 core_id = GetCurrentCoreId(kernel); - KThread* cur_thread = GetCurrentThreadPointer(kernel); + KThread* cur_thread = kernel.Scheduler(static_cast<std::size_t>(core_id)).GetCurrentThread(); // If the thread isn't terminated, pin it. if (!cur_thread->IsTerminationRequested()) { // Pin it. PinThread(core_id, cur_thread); - cur_thread->Pin(); + cur_thread->Pin(core_id); // An update is needed. KScheduler::SetSchedulerUpdateNeeded(kernel); } } -void KProcess::UnpinCurrentThread() { +void KProcess::UnpinCurrentThread(s32 core_id) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); // Get the current thread. - const s32 core_id = GetCurrentCoreId(kernel); - KThread* cur_thread = GetCurrentThreadPointer(kernel); + KThread* cur_thread = kernel.Scheduler(static_cast<std::size_t>(core_id)).GetCurrentThread(); // Unpin it. cur_thread->Unpin(); @@ -411,8 +408,8 @@ void KProcess::Run(s32 main_thread_priority, u64 stack_size) { resource_limit->Reserve(LimitableResource::Threads, 1); resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size); - const std::size_t heap_capacity{memory_usage_capacity - main_thread_stack_size - image_size}; - ASSERT(!page_table->SetHeapCapacity(heap_capacity).IsError()); + const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)}; + ASSERT(!page_table->SetMaxHeapSize(heap_capacity).IsError()); ChangeStatus(ProcessStatus::Running); @@ -543,7 +540,6 @@ void KProcess::FreeTLSRegion(VAddr tls_address) { } void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) { - std::lock_guard lock{HLE::g_hle_lock}; const auto ReprotectSegment = [&](const CodeSet::Segment& segment, KMemoryPermission permission) { page_table->SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission); diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h index cb93c7e24..e7c8b5838 100644 --- a/src/core/hle/kernel/k_process.h +++ b/src/core/hle/kernel/k_process.h @@ -345,8 +345,8 @@ public: bool IsSignaled() const override; - void PinCurrentThread(); - void UnpinCurrentThread(); + void PinCurrentThread(s32 core_id); + void UnpinCurrentThread(s32 core_id); void UnpinThread(KThread* thread); KLightLock& GetStateLock() { diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 277201de4..31cec990e 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -15,6 +15,7 @@ #include "core/core.h" #include "core/core_timing.h" #include "core/cpu_manager.h" +#include "core/hle/kernel/k_interrupt_manager.h" #include "core/hle/kernel/k_process.h" #include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" @@ -53,6 +54,13 @@ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedul } cores_pending_reschedule &= ~(1ULL << core); } + + for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; ++core_id) { + if (kernel.PhysicalCore(core_id).IsInterrupted()) { + KInterruptManager::HandleInterrupt(kernel, static_cast<s32>(core_id)); + } + } + if (must_context_switch) { auto core_scheduler = kernel.CurrentScheduler(); kernel.ExitSVCProfile(); diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 752592e2e..71e029a3f 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -3,6 +3,7 @@ // Refer to the license.txt file included. #include <algorithm> +#include <atomic> #include <cinttypes> #include <optional> #include <vector> @@ -26,12 +27,14 @@ #include "core/hle/kernel/k_resource_limit.h" #include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" +#include "core/hle/kernel/k_system_control.h" #include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_thread_queue.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/svc_results.h" #include "core/hle/kernel/time_manager.h" #include "core/hle/result.h" +#include "core/memory.h" #ifdef ARCHITECTURE_x86_64 #include "core/arm/dynarmic/arm_dynarmic_32.h" @@ -50,6 +53,7 @@ static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, VAddr entry_point, u64 arg) { context = {}; context.cpu_registers[0] = arg; + context.cpu_registers[18] = Kernel::KSystemControl::GenerateRandomU64() | 1; context.pc = entry_point; context.sp = stack_top; // TODO(merry): Perform a hardware test to determine the below value. @@ -61,6 +65,13 @@ namespace Kernel { namespace { +struct ThreadLocalRegion { + static constexpr std::size_t MessageBufferSize = 0x100; + std::array<u32, MessageBufferSize / sizeof(u32)> message_buffer; + std::atomic_uint16_t disable_count; + std::atomic_uint16_t interrupt_flag; +}; + class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait { public: explicit ThreadQueueImplForKThreadSleep(KernelCore& kernel_) @@ -344,7 +355,7 @@ void KThread::StartTermination() { if (parent != nullptr) { parent->ReleaseUserException(this); if (parent->GetPinnedThread(GetCurrentCoreId(kernel)) == this) { - parent->UnpinCurrentThread(); + parent->UnpinCurrentThread(core_id); } } @@ -370,7 +381,7 @@ void KThread::StartTermination() { this->Close(); } -void KThread::Pin() { +void KThread::Pin(s32 current_core) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); // Set ourselves as pinned. @@ -387,7 +398,6 @@ void KThread::Pin() { // Bind ourselves to this core. const s32 active_core = GetActiveCore(); - const s32 current_core = GetCurrentCoreId(kernel); SetActiveCore(current_core); physical_ideal_core_id = current_core; @@ -480,6 +490,36 @@ void KThread::Unpin() { } } +u16 KThread::GetUserDisableCount() const { + if (!IsUserThread()) { + // We only emulate TLS for user threads + return {}; + } + + auto& memory = kernel.System().Memory(); + return memory.Read16(tls_address + offsetof(ThreadLocalRegion, disable_count)); +} + +void KThread::SetInterruptFlag() { + if (!IsUserThread()) { + // We only emulate TLS for user threads + return; + } + + auto& memory = kernel.System().Memory(); + memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 1); +} + +void KThread::ClearInterruptFlag() { + if (!IsUserThread()) { + // We only emulate TLS for user threads + return; + } + + auto& memory = kernel.System().Memory(); + memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 0); +} + ResultCode KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { KScopedSchedulerLock sl{kernel}; diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index c8a08bd71..83dfde69b 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h @@ -307,6 +307,10 @@ public: return parent != nullptr; } + u16 GetUserDisableCount() const; + void SetInterruptFlag(); + void ClearInterruptFlag(); + [[nodiscard]] KThread* GetLockOwner() const { return lock_owner; } @@ -490,7 +494,7 @@ public: this->GetStackParameters().disable_count--; } - void Pin(); + void Pin(s32 current_core); void Unpin(); diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 2e4e4cb1c..1225e1fba 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -182,7 +182,10 @@ struct KernelCore::Impl { // Shutdown all processes. if (current_process) { current_process->Finalize(); - current_process->Close(); + // current_process->Close(); + // TODO: The current process should be destroyed based on accurate ref counting after + // calling Close(). Adding a manual Destroy() call instead to avoid a memory leak. + current_process->Destroy(); current_process = nullptr; } diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index a9f7438ea..250ef9042 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -41,7 +41,6 @@ #include "core/hle/kernel/svc_results.h" #include "core/hle/kernel/svc_types.h" #include "core/hle/kernel/svc_wrap.h" -#include "core/hle/lock.h" #include "core/hle/result.h" #include "core/memory.h" #include "core/reporter.h" @@ -136,25 +135,15 @@ enum class ResourceLimitValueType { } // Anonymous namespace /// Set the process heap to a given Size. It can both extend and shrink the heap. -static ResultCode SetHeapSize(Core::System& system, VAddr* heap_addr, u64 heap_size) { - std::lock_guard lock{HLE::g_hle_lock}; - LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", heap_size); - - // Size must be a multiple of 0x200000 (2MB) and be equal to or less than 8GB. - if ((heap_size % 0x200000) != 0) { - LOG_ERROR(Kernel_SVC, "The heap size is not a multiple of 2MB, heap_size=0x{:016X}", - heap_size); - return ResultInvalidSize; - } - - if (heap_size >= 0x200000000) { - LOG_ERROR(Kernel_SVC, "The heap size is not less than 8GB, heap_size=0x{:016X}", heap_size); - return ResultInvalidSize; - } +static ResultCode SetHeapSize(Core::System& system, VAddr* out_address, u64 size) { + LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", size); - auto& page_table{system.Kernel().CurrentProcess()->PageTable()}; + // Validate size. + R_UNLESS(Common::IsAligned(size, HeapSizeAlignment), ResultInvalidSize); + R_UNLESS(size < MainMemorySizeMax, ResultInvalidSize); - CASCADE_RESULT(*heap_addr, page_table.SetHeapSize(heap_size)); + // Set the heap size. + R_TRY(system.Kernel().CurrentProcess()->PageTable().SetHeapSize(out_address, size)); return ResultSuccess; } @@ -166,9 +155,38 @@ static ResultCode SetHeapSize32(Core::System& system, u32* heap_addr, u32 heap_s return result; } +constexpr bool IsValidSetMemoryPermission(MemoryPermission perm) { + switch (perm) { + case MemoryPermission::None: + case MemoryPermission::Read: + case MemoryPermission::ReadWrite: + return true; + default: + return false; + } +} + +static ResultCode SetMemoryPermission(Core::System& system, VAddr address, u64 size, + MemoryPermission perm) { + // Validate address / size. + R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress); + R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize); + R_UNLESS(size > 0, ResultInvalidSize); + R_UNLESS((address < address + size), ResultInvalidCurrentMemory); + + // Validate the permission. + R_UNLESS(IsValidSetMemoryPermission(perm), ResultInvalidNewMemoryPermission); + + // Validate that the region is in range for the current process. + auto& page_table = system.Kernel().CurrentProcess()->PageTable(); + R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory); + + // Set the memory attribute. + return page_table.SetMemoryPermission(address, size, perm); +} + static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask, u32 attribute) { - std::lock_guard lock{HLE::g_hle_lock}; LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, mask=0x{:08X}, attribute=0x{:08X}", address, size, mask, attribute); @@ -212,7 +230,6 @@ static ResultCode SetMemoryAttribute32(Core::System& system, u32 address, u32 si /// Maps a memory range into a different range. static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) { - std::lock_guard lock{HLE::g_hle_lock}; LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr, src_addr, size); @@ -232,7 +249,6 @@ static ResultCode MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, /// Unmaps a region that was previously mapped with svcMapMemory static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) { - std::lock_guard lock{HLE::g_hle_lock}; LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr, src_addr, size); @@ -642,7 +658,6 @@ static void OutputDebugString(Core::System& system, VAddr address, u64 len) { /// Gets system/memory information for the current process static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle handle, u64 info_sub_id) { - std::lock_guard lock{HLE::g_hle_lock}; LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id, info_sub_id, handle); @@ -886,22 +901,17 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle return ResultSuccess; } case GetInfoType::IdleTickCount: { - if (handle == 0) { - LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", - static_cast<Handle>(handle)); - return ResultInvalidHandle; - } - - if (info_sub_id != 0xFFFFFFFFFFFFFFFF && - info_sub_id != system.Kernel().CurrentPhysicalCoreIndex()) { - LOG_ERROR(Kernel_SVC, "Core is not the current core, got {}", info_sub_id); - return ResultInvalidCombination; - } + // Verify the input handle is invalid. + R_UNLESS(handle == InvalidHandle, ResultInvalidHandle); - const auto& scheduler = *system.Kernel().CurrentScheduler(); - const auto* const idle_thread = scheduler.GetIdleThread(); + // Verify the requested core is valid. + const bool core_valid = + (info_sub_id == static_cast<u64>(-1ULL)) || + (info_sub_id == static_cast<u64>(system.Kernel().CurrentPhysicalCoreIndex())); + R_UNLESS(core_valid, ResultInvalidCombination); - *result = idle_thread->GetCpuTime(); + // Get the idle tick count. + *result = system.Kernel().CurrentScheduler()->GetIdleThread()->GetCpuTime(); return ResultSuccess; } default: @@ -924,7 +934,6 @@ static ResultCode GetInfo32(Core::System& system, u32* result_low, u32* result_h /// Maps memory at a desired address static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) { - std::lock_guard lock{HLE::g_hle_lock}; LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size); if (!Common::Is4KBAligned(addr)) { @@ -978,7 +987,6 @@ static ResultCode MapPhysicalMemory32(Core::System& system, u32 addr, u32 size) /// Unmaps memory previously mapped via MapPhysicalMemory static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) { - std::lock_guard lock{HLE::g_hle_lock}; LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size); if (!Common::Is4KBAligned(addr)) { @@ -1520,7 +1528,6 @@ static ResultCode ControlCodeMemory(Core::System& system, Handle code_memory_han static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address, VAddr page_info_address, Handle process_handle, VAddr address) { - std::lock_guard lock{HLE::g_hle_lock}; LOG_TRACE(Kernel_SVC, "called process=0x{:08X} address={:X}", process_handle, address); const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle); @@ -2020,6 +2027,25 @@ static ResultCode SignalToAddress(Core::System& system, VAddr address, Svc::Sign count); } +static void SynchronizePreemptionState(Core::System& system) { + auto& kernel = system.Kernel(); + + // Lock the scheduler. + KScopedSchedulerLock sl{kernel}; + + // If the current thread is pinned, unpin it. + KProcess* cur_process = system.Kernel().CurrentProcess(); + const auto core_id = GetCurrentCoreId(kernel); + + if (cur_process->GetPinnedThread(core_id) == GetCurrentThreadPointer(kernel)) { + // Clear the current thread's interrupt flag. + GetCurrentThread(kernel).ClearInterruptFlag(); + + // Unpin the current thread. + cur_process->UnpinCurrentThread(core_id); + } +} + static ResultCode SignalToAddress32(Core::System& system, u32 address, Svc::SignalType signal_type, s32 value, s32 count) { return SignalToAddress(system, address, signal_type, value, count); @@ -2738,7 +2764,7 @@ static const FunctionDef SVC_Table_32[] = { static const FunctionDef SVC_Table_64[] = { {0x00, nullptr, "Unknown"}, {0x01, SvcWrap64<SetHeapSize>, "SetHeapSize"}, - {0x02, nullptr, "SetMemoryPermission"}, + {0x02, SvcWrap64<SetMemoryPermission>, "SetMemoryPermission"}, {0x03, SvcWrap64<SetMemoryAttribute>, "SetMemoryAttribute"}, {0x04, SvcWrap64<MapMemory>, "MapMemory"}, {0x05, SvcWrap64<UnmapMemory>, "UnmapMemory"}, @@ -2790,7 +2816,7 @@ static const FunctionDef SVC_Table_64[] = { {0x33, SvcWrap64<GetThreadContext>, "GetThreadContext"}, {0x34, SvcWrap64<WaitForAddress>, "WaitForAddress"}, {0x35, SvcWrap64<SignalToAddress>, "SignalToAddress"}, - {0x36, nullptr, "SynchronizePreemptionState"}, + {0x36, SvcWrap64<SynchronizePreemptionState>, "SynchronizePreemptionState"}, {0x37, nullptr, "Unknown"}, {0x38, nullptr, "Unknown"}, {0x39, nullptr, "Unknown"}, diff --git a/src/core/hle/kernel/svc_common.h b/src/core/hle/kernel/svc_common.h index 60ea2c405..25de6e437 100644 --- a/src/core/hle/kernel/svc_common.h +++ b/src/core/hle/kernel/svc_common.h @@ -5,6 +5,7 @@ #pragma once #include "common/common_types.h" +#include "common/literals.h" namespace Kernel { using Handle = u32; @@ -12,9 +13,13 @@ using Handle = u32; namespace Kernel::Svc { +using namespace Common::Literals; + constexpr s32 ArgumentHandleCountMax = 0x40; constexpr u32 HandleWaitMask{1u << 30}; +constexpr inline std::size_t HeapSizeAlignment = 2_MiB; + constexpr inline Handle InvalidHandle = Handle(0); enum PseudoHandle : Handle { diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h index 86255fe6d..a60adfcab 100644 --- a/src/core/hle/kernel/svc_wrap.h +++ b/src/core/hle/kernel/svc_wrap.h @@ -249,6 +249,14 @@ void SvcWrap64(Core::System& system) { func(system, Param(system, 0), Param(system, 1), static_cast<u32>(Param(system, 2))).raw); } +// Used by SetMemoryPermission +template <ResultCode func(Core::System&, u64, u64, Svc::MemoryPermission)> +void SvcWrap64(Core::System& system) { + FuncReturn(system, func(system, Param(system, 0), Param(system, 1), + static_cast<Svc::MemoryPermission>(Param(system, 2))) + .raw); +} + // Used by MapSharedMemory template <ResultCode func(Core::System&, Handle, u64, u64, Svc::MemoryPermission)> void SvcWrap64(Core::System& system) { |