diff options
Diffstat (limited to 'src')
280 files changed, 9225 insertions, 5535 deletions
diff --git a/src/audio_core/audio_renderer.cpp b/src/audio_core/audio_renderer.cpp index e6f38d600..c187d8ac5 100644 --- a/src/audio_core/audio_renderer.cpp +++ b/src/audio_core/audio_renderer.cpp @@ -36,9 +36,9 @@ public: } void SetWaveIndex(std::size_t index); - std::vector<s16> DequeueSamples(std::size_t sample_count); + std::vector<s16> DequeueSamples(std::size_t sample_count, Memory::Memory& memory); void UpdateState(); - void RefreshBuffer(); + void RefreshBuffer(Memory::Memory& memory); private: bool is_in_use{}; @@ -66,17 +66,18 @@ public: return info; } - void UpdateState(); + void UpdateState(Memory::Memory& memory); private: EffectOutStatus out_status{}; EffectInStatus info{}; }; -AudioRenderer::AudioRenderer(Core::Timing::CoreTiming& core_timing, AudioRendererParameter params, - Kernel::SharedPtr<Kernel::WritableEvent> buffer_event, +AudioRenderer::AudioRenderer(Core::Timing::CoreTiming& core_timing, Memory::Memory& memory_, + AudioRendererParameter params, + std::shared_ptr<Kernel::WritableEvent> buffer_event, std::size_t instance_number) : worker_params{params}, buffer_event{buffer_event}, voices(params.voice_count), - effects(params.effect_count) { + effects(params.effect_count), memory{memory_} { audio_out = std::make_unique<AudioCore::AudioOut>(); stream = audio_out->OpenStream(core_timing, STREAM_SAMPLE_RATE, STREAM_NUM_CHANNELS, @@ -162,7 +163,7 @@ std::vector<u8> AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_ } for (auto& effect : effects) { - effect.UpdateState(); + effect.UpdateState(memory); } // Release previous buffers and queue next ones for playback @@ -206,13 +207,14 @@ void AudioRenderer::VoiceState::SetWaveIndex(std::size_t index) { is_refresh_pending = true; } -std::vector<s16> AudioRenderer::VoiceState::DequeueSamples(std::size_t sample_count) { +std::vector<s16> AudioRenderer::VoiceState::DequeueSamples(std::size_t sample_count, + Memory::Memory& memory) { if (!IsPlaying()) { return {}; } if (is_refresh_pending) { - RefreshBuffer(); + RefreshBuffer(memory); } const std::size_t max_size{samples.size() - offset}; @@ -256,10 +258,11 @@ void AudioRenderer::VoiceState::UpdateState() { is_in_use = info.is_in_use; } -void AudioRenderer::VoiceState::RefreshBuffer() { - std::vector<s16> new_samples(info.wave_buffer[wave_index].buffer_sz / sizeof(s16)); - Memory::ReadBlock(info.wave_buffer[wave_index].buffer_addr, new_samples.data(), - info.wave_buffer[wave_index].buffer_sz); +void AudioRenderer::VoiceState::RefreshBuffer(Memory::Memory& memory) { + const auto wave_buffer_address = info.wave_buffer[wave_index].buffer_addr; + const auto wave_buffer_size = info.wave_buffer[wave_index].buffer_sz; + std::vector<s16> new_samples(wave_buffer_size / sizeof(s16)); + memory.ReadBlock(wave_buffer_address, new_samples.data(), wave_buffer_size); switch (static_cast<Codec::PcmFormat>(info.sample_format)) { case Codec::PcmFormat::Int16: { @@ -269,7 +272,7 @@ void AudioRenderer::VoiceState::RefreshBuffer() { case Codec::PcmFormat::Adpcm: { // Decode ADPCM to PCM16 Codec::ADPCM_Coeff coeffs; - Memory::ReadBlock(info.additional_params_addr, coeffs.data(), sizeof(Codec::ADPCM_Coeff)); + memory.ReadBlock(info.additional_params_addr, coeffs.data(), sizeof(Codec::ADPCM_Coeff)); new_samples = Codec::DecodeADPCM(reinterpret_cast<u8*>(new_samples.data()), new_samples.size() * sizeof(s16), coeffs, adpcm_state); break; @@ -307,18 +310,18 @@ void AudioRenderer::VoiceState::RefreshBuffer() { is_refresh_pending = false; } -void AudioRenderer::EffectState::UpdateState() { +void AudioRenderer::EffectState::UpdateState(Memory::Memory& memory) { if (info.is_new) { out_status.state = EffectStatus::New; } else { if (info.type == Effect::Aux) { - ASSERT_MSG(Memory::Read32(info.aux_info.return_buffer_info) == 0, + ASSERT_MSG(memory.Read32(info.aux_info.return_buffer_info) == 0, "Aux buffers tried to update"); - ASSERT_MSG(Memory::Read32(info.aux_info.send_buffer_info) == 0, + ASSERT_MSG(memory.Read32(info.aux_info.send_buffer_info) == 0, "Aux buffers tried to update"); - ASSERT_MSG(Memory::Read32(info.aux_info.return_buffer_base) == 0, + ASSERT_MSG(memory.Read32(info.aux_info.return_buffer_base) == 0, "Aux buffers tried to update"); - ASSERT_MSG(Memory::Read32(info.aux_info.send_buffer_base) == 0, + ASSERT_MSG(memory.Read32(info.aux_info.send_buffer_base) == 0, "Aux buffers tried to update"); } } @@ -340,7 +343,7 @@ void AudioRenderer::QueueMixedBuffer(Buffer::Tag tag) { std::size_t offset{}; s64 samples_remaining{BUFFER_SIZE}; while (samples_remaining > 0) { - const std::vector<s16> samples{voice.DequeueSamples(samples_remaining)}; + const std::vector<s16> samples{voice.DequeueSamples(samples_remaining, memory)}; if (samples.empty()) { break; diff --git a/src/audio_core/audio_renderer.h b/src/audio_core/audio_renderer.h index 4f14b91cd..be1b019f1 100644 --- a/src/audio_core/audio_renderer.h +++ b/src/audio_core/audio_renderer.h @@ -22,6 +22,10 @@ namespace Kernel { class WritableEvent; } +namespace Memory { +class Memory; +} + namespace AudioCore { class AudioOut; @@ -217,9 +221,9 @@ static_assert(sizeof(UpdateDataHeader) == 0x40, "UpdateDataHeader has wrong size class AudioRenderer { public: - AudioRenderer(Core::Timing::CoreTiming& core_timing, AudioRendererParameter params, - Kernel::SharedPtr<Kernel::WritableEvent> buffer_event, - std::size_t instance_number); + AudioRenderer(Core::Timing::CoreTiming& core_timing, Memory::Memory& memory_, + AudioRendererParameter params, + std::shared_ptr<Kernel::WritableEvent> buffer_event, std::size_t instance_number); ~AudioRenderer(); std::vector<u8> UpdateAudioRenderer(const std::vector<u8>& input_params); @@ -235,11 +239,12 @@ private: class VoiceState; AudioRendererParameter worker_params; - Kernel::SharedPtr<Kernel::WritableEvent> buffer_event; + std::shared_ptr<Kernel::WritableEvent> buffer_event; std::vector<VoiceState> voices; std::vector<EffectState> effects; std::unique_ptr<AudioOut> audio_out; - AudioCore::StreamPtr stream; + StreamPtr stream; + Memory::Memory& memory; }; } // namespace AudioCore diff --git a/src/audio_core/stream.cpp b/src/audio_core/stream.cpp index 6a5f53a57..4ca98f8ea 100644 --- a/src/audio_core/stream.cpp +++ b/src/audio_core/stream.cpp @@ -37,7 +37,7 @@ Stream::Stream(Core::Timing::CoreTiming& core_timing, u32 sample_rate, Format fo : sample_rate{sample_rate}, format{format}, release_callback{std::move(release_callback)}, sink_stream{sink_stream}, core_timing{core_timing}, name{std::move(name_)} { - release_event = core_timing.RegisterEvent( + release_event = Core::Timing::CreateEvent( name, [this](u64 userdata, s64 cycles_late) { ReleaseActiveBuffer(); }); } diff --git a/src/audio_core/stream.h b/src/audio_core/stream.h index 8106cea43..1708a4d98 100644 --- a/src/audio_core/stream.h +++ b/src/audio_core/stream.h @@ -98,18 +98,19 @@ private: /// Gets the number of core cycles when the specified buffer will be released s64 GetBufferReleaseCycles(const Buffer& buffer) const; - u32 sample_rate; ///< Sample rate of the stream - Format format; ///< Format of the stream - float game_volume = 1.0f; ///< The volume the game currently has set - ReleaseCallback release_callback; ///< Buffer release callback for the stream - State state{State::Stopped}; ///< Playback state of the stream - Core::Timing::EventType* release_event{}; ///< Core timing release event for the stream - BufferPtr active_buffer; ///< Actively playing buffer in the stream - std::queue<BufferPtr> queued_buffers; ///< Buffers queued to be played in the stream - std::queue<BufferPtr> released_buffers; ///< Buffers recently released from the stream - SinkStream& sink_stream; ///< Output sink for the stream - Core::Timing::CoreTiming& core_timing; ///< Core timing instance. - std::string name; ///< Name of the stream, must be unique + u32 sample_rate; ///< Sample rate of the stream + Format format; ///< Format of the stream + float game_volume = 1.0f; ///< The volume the game currently has set + ReleaseCallback release_callback; ///< Buffer release callback for the stream + State state{State::Stopped}; ///< Playback state of the stream + std::shared_ptr<Core::Timing::EventType> + release_event; ///< Core timing release event for the stream + BufferPtr active_buffer; ///< Actively playing buffer in the stream + std::queue<BufferPtr> queued_buffers; ///< Buffers queued to be played in the stream + std::queue<BufferPtr> released_buffers; ///< Buffers recently released from the stream + SinkStream& sink_stream; ///< Output sink for the stream + Core::Timing::CoreTiming& core_timing; ///< Core timing instance. + std::string name; ///< Name of the stream, must be unique }; using StreamPtr = std::shared_ptr<Stream>; diff --git a/src/common/assert.h b/src/common/assert.h index 4b0e3f64e..5b67c5c52 100644 --- a/src/common/assert.h +++ b/src/common/assert.h @@ -41,8 +41,9 @@ __declspec(noinline, noreturn) } \ while (0) -#define UNREACHABLE() ASSERT_MSG(false, "Unreachable code!") -#define UNREACHABLE_MSG(...) ASSERT_MSG(false, __VA_ARGS__) +#define UNREACHABLE() assert_noinline_call([] { LOG_CRITICAL(Debug, "Unreachable code!"); }) +#define UNREACHABLE_MSG(...) \ + assert_noinline_call([&] { LOG_CRITICAL(Debug, "Unreachable code!\n" __VA_ARGS__); }) #ifdef _DEBUG #define DEBUG_ASSERT(_a_) ASSERT(_a_) diff --git a/src/common/common_funcs.h b/src/common/common_funcs.h index c029dc7b3..052254678 100644 --- a/src/common/common_funcs.h +++ b/src/common/common_funcs.h @@ -19,13 +19,15 @@ /// Helper macros to insert unused bytes or words to properly align structs. These values will be /// zero-initialized. -#define INSERT_PADDING_BYTES(num_bytes) std::array<u8, num_bytes> CONCAT2(pad, __LINE__){}; -#define INSERT_PADDING_WORDS(num_words) std::array<u32, num_words> CONCAT2(pad, __LINE__){}; +#define INSERT_PADDING_BYTES(num_bytes) \ + std::array<u8, num_bytes> CONCAT2(pad, __LINE__) {} +#define INSERT_PADDING_WORDS(num_words) \ + std::array<u32, num_words> CONCAT2(pad, __LINE__) {} /// These are similar to the INSERT_PADDING_* macros, but are needed for padding unions. This is /// because unions can only be initialized by one member. -#define INSERT_UNION_PADDING_BYTES(num_bytes) std::array<u8, num_bytes> CONCAT2(pad, __LINE__); -#define INSERT_UNION_PADDING_WORDS(num_words) std::array<u32, num_words> CONCAT2(pad, __LINE__); +#define INSERT_UNION_PADDING_BYTES(num_bytes) std::array<u8, num_bytes> CONCAT2(pad, __LINE__) +#define INSERT_UNION_PADDING_WORDS(num_words) std::array<u32, num_words> CONCAT2(pad, __LINE__) #ifndef _MSC_VER @@ -56,7 +58,7 @@ std::string GetLastErrorMsg(); namespace Common { constexpr u32 MakeMagic(char a, char b, char c, char d) { - return a | b << 8 | c << 16 | d << 24; + return u32(a) | u32(b) << 8 | u32(c) << 16 | u32(d) << 24; } } // namespace Common diff --git a/src/common/hash.h b/src/common/hash.h index ebd4125e2..b2538f3ea 100644 --- a/src/common/hash.h +++ b/src/common/hash.h @@ -35,41 +35,6 @@ static inline u64 ComputeStructHash64(const T& data) { return ComputeHash64(&data, sizeof(data)); } -/// A helper template that ensures the padding in a struct is initialized by memsetting to 0. -template <typename T> -struct HashableStruct { - // In addition to being trivially copyable, T must also have a trivial default constructor, - // because any member initialization would be overridden by memset - static_assert(std::is_trivial_v<T>, "Type passed to HashableStruct must be trivial"); - /* - * We use a union because "implicitly-defined copy/move constructor for a union X copies the - * object representation of X." and "implicitly-defined copy assignment operator for a union X - * copies the object representation (3.9) of X." = Bytewise copy instead of memberwise copy. - * This is important because the padding bytes are included in the hash and comparison between - * objects. - */ - union { - T state; - }; - - HashableStruct() { - // Memset structure to zero padding bits, so that they will be deterministic when hashing - std::memset(&state, 0, sizeof(T)); - } - - bool operator==(const HashableStruct<T>& o) const { - return std::memcmp(&state, &o.state, sizeof(T)) == 0; - }; - - bool operator!=(const HashableStruct<T>& o) const { - return !(*this == o); - }; - - std::size_t Hash() const { - return Common::ComputeStructHash64(state); - } -}; - struct PairHash { template <class T1, class T2> std::size_t operator()(const std::pair<T1, T2>& pair) const noexcept { diff --git a/src/common/logging/backend.cpp b/src/common/logging/backend.cpp index 1111cfbad..8f2591d53 100644 --- a/src/common/logging/backend.cpp +++ b/src/common/logging/backend.cpp @@ -272,8 +272,10 @@ const char* GetLogClassName(Class log_class) { #undef CLS #undef SUB case Class::Count: - UNREACHABLE(); + break; } + UNREACHABLE(); + return "Invalid"; } const char* GetLevelName(Level log_level) { @@ -288,9 +290,11 @@ const char* GetLevelName(Level log_level) { LVL(Error); LVL(Critical); case Level::Count: - UNREACHABLE(); + break; } #undef LVL + UNREACHABLE(); + return "Invalid"; } void SetGlobalFilter(const Filter& filter) { diff --git a/src/common/threadsafe_queue.h b/src/common/threadsafe_queue.h index e714ba5b3..8268bbd5c 100644 --- a/src/common/threadsafe_queue.h +++ b/src/common/threadsafe_queue.h @@ -46,9 +46,16 @@ public: ElementPtr* new_ptr = new ElementPtr(); write_ptr->next.store(new_ptr, std::memory_order_release); write_ptr = new_ptr; - cv.notify_one(); - ++size; + const size_t previous_size{size++}; + + // Acquire the mutex and then immediately release it as a fence. + // TODO(bunnei): This can be replaced with C++20 waitable atomics when properly supported. + // See discussion on https://github.com/yuzu-emu/yuzu/pull/3173 for details. + if (previous_size == 0) { + std::lock_guard lock{cv_mutex}; + } + cv.notify_one(); } void Pop() { diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 4f6a87b0a..7fd226050 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -170,6 +170,7 @@ add_library(core STATIC hle/kernel/server_port.h hle/kernel/server_session.cpp hle/kernel/server_session.h + hle/kernel/session.cpp hle/kernel/session.h hle/kernel/shared_memory.cpp hle/kernel/shared_memory.h @@ -509,7 +510,6 @@ add_library(core STATIC memory/dmnt_cheat_vm.h memory.cpp memory.h - memory_setup.h perf_stats.cpp perf_stats.h reporter.cpp @@ -522,6 +522,23 @@ add_library(core STATIC tools/freezer.h ) +if (MSVC) + target_compile_options(core PRIVATE + # 'expression' : signed/unsigned mismatch + /we4018 + # 'argument' : conversion from 'type1' to 'type2', possible loss of data (floating-point) + /we4244 + # 'conversion' : conversion from 'type1' to 'type2', signed/unsigned mismatch + /we4245 + # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data + /we4254 + # 'var' : conversion from 'size_t' to 'type', possible loss of data + /we4267 + # 'context' : truncation from 'type1' to 'type2' + /we4305 + ) +endif() + create_target_directory_groups(core) target_link_libraries(core PUBLIC common PRIVATE audio_core video_core) diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp index 372612c9b..7e846ddd5 100644 --- a/src/core/arm/arm_interface.cpp +++ b/src/core/arm/arm_interface.cpp @@ -13,7 +13,6 @@ #include "core/memory.h" namespace Core { - namespace { constexpr u64 ELF_DYNAMIC_TAG_NULL = 0; @@ -61,15 +60,15 @@ static_assert(sizeof(ELFSymbol) == 0x18, "ELFSymbol has incorrect size."); using Symbols = std::vector<std::pair<ELFSymbol, std::string>>; -Symbols GetSymbols(VAddr text_offset) { - const auto mod_offset = text_offset + Memory::Read32(text_offset + 4); +Symbols GetSymbols(VAddr text_offset, Memory::Memory& memory) { + const auto mod_offset = text_offset + memory.Read32(text_offset + 4); if (mod_offset < text_offset || (mod_offset & 0b11) != 0 || - Memory::Read32(mod_offset) != Common::MakeMagic('M', 'O', 'D', '0')) { + memory.Read32(mod_offset) != Common::MakeMagic('M', 'O', 'D', '0')) { return {}; } - const auto dynamic_offset = Memory::Read32(mod_offset + 0x4) + mod_offset; + const auto dynamic_offset = memory.Read32(mod_offset + 0x4) + mod_offset; VAddr string_table_offset{}; VAddr symbol_table_offset{}; @@ -77,8 +76,8 @@ Symbols GetSymbols(VAddr text_offset) { VAddr dynamic_index = dynamic_offset; while (true) { - const auto tag = Memory::Read64(dynamic_index); - const auto value = Memory::Read64(dynamic_index + 0x8); + const u64 tag = memory.Read64(dynamic_index); + const u64 value = memory.Read64(dynamic_index + 0x8); dynamic_index += 0x10; if (tag == ELF_DYNAMIC_TAG_NULL) { @@ -106,11 +105,11 @@ Symbols GetSymbols(VAddr text_offset) { VAddr symbol_index = symbol_table_address; while (symbol_index < string_table_address) { ELFSymbol symbol{}; - Memory::ReadBlock(symbol_index, &symbol, sizeof(ELFSymbol)); + memory.ReadBlock(symbol_index, &symbol, sizeof(ELFSymbol)); VAddr string_offset = string_table_address + symbol.name_index; std::string name; - for (u8 c = Memory::Read8(string_offset); c != 0; c = Memory::Read8(++string_offset)) { + for (u8 c = memory.Read8(string_offset); c != 0; c = memory.Read8(++string_offset)) { name += static_cast<char>(c); } @@ -142,28 +141,28 @@ constexpr u64 SEGMENT_BASE = 0x7100000000ull; std::vector<ARM_Interface::BacktraceEntry> ARM_Interface::GetBacktrace() const { std::vector<BacktraceEntry> out; + auto& memory = system.Memory(); auto fp = GetReg(29); auto lr = GetReg(30); - while (true) { out.push_back({"", 0, lr, 0}); if (!fp) { break; } - lr = Memory::Read64(fp + 8) - 4; - fp = Memory::Read64(fp); + lr = memory.Read64(fp + 8) - 4; + fp = memory.Read64(fp); } std::map<VAddr, std::string> modules; - auto& loader{System::GetInstance().GetAppLoader()}; + auto& loader{system.GetAppLoader()}; if (loader.ReadNSOModules(modules) != Loader::ResultStatus::Success) { return {}; } std::map<std::string, Symbols> symbols; for (const auto& module : modules) { - symbols.insert_or_assign(module.second, GetSymbols(module.first)); + symbols.insert_or_assign(module.second, GetSymbols(module.first, memory)); } for (auto& entry : out) { diff --git a/src/core/arm/arm_interface.h b/src/core/arm/arm_interface.h index 45e94e625..47b964eb7 100644 --- a/src/core/arm/arm_interface.h +++ b/src/core/arm/arm_interface.h @@ -17,11 +17,13 @@ enum class VMAPermission : u8; } namespace Core { +class System; /// Generic ARMv8 CPU interface class ARM_Interface : NonCopyable { public: - virtual ~ARM_Interface() {} + explicit ARM_Interface(System& system_) : system{system_} {} + virtual ~ARM_Interface() = default; struct ThreadContext { std::array<u64, 31> cpu_registers; @@ -163,6 +165,10 @@ public: /// fp+0 : pointer to previous frame record /// fp+8 : value of lr for frame void LogBacktrace() const; + +protected: + /// System context that this ARM interface is running under. + System& system; }; } // namespace Core diff --git a/src/core/arm/dynarmic/arm_dynarmic.cpp b/src/core/arm/dynarmic/arm_dynarmic.cpp index 700c4afff..f8c7f0efd 100644 --- a/src/core/arm/dynarmic/arm_dynarmic.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic.cpp @@ -28,36 +28,38 @@ public: explicit ARM_Dynarmic_Callbacks(ARM_Dynarmic& parent) : parent(parent) {} u8 MemoryRead8(u64 vaddr) override { - return Memory::Read8(vaddr); + return parent.system.Memory().Read8(vaddr); } u16 MemoryRead16(u64 vaddr) override { - return Memory::Read16(vaddr); + return parent.system.Memory().Read16(vaddr); } u32 MemoryRead32(u64 vaddr) override { - return Memory::Read32(vaddr); + return parent.system.Memory().Read32(vaddr); } u64 MemoryRead64(u64 vaddr) override { - return Memory::Read64(vaddr); + return parent.system.Memory().Read64(vaddr); } Vector MemoryRead128(u64 vaddr) override { - return {Memory::Read64(vaddr), Memory::Read64(vaddr + 8)}; + auto& memory = parent.system.Memory(); + return {memory.Read64(vaddr), memory.Read64(vaddr + 8)}; } void MemoryWrite8(u64 vaddr, u8 value) override { - Memory::Write8(vaddr, value); + parent.system.Memory().Write8(vaddr, value); } void MemoryWrite16(u64 vaddr, u16 value) override { - Memory::Write16(vaddr, value); + parent.system.Memory().Write16(vaddr, value); } void MemoryWrite32(u64 vaddr, u32 value) override { - Memory::Write32(vaddr, value); + parent.system.Memory().Write32(vaddr, value); } void MemoryWrite64(u64 vaddr, u64 value) override { - Memory::Write64(vaddr, value); + parent.system.Memory().Write64(vaddr, value); } void MemoryWrite128(u64 vaddr, Vector value) override { - Memory::Write64(vaddr, value[0]); - Memory::Write64(vaddr + 8, value[1]); + auto& memory = parent.system.Memory(); + memory.Write64(vaddr, value[0]); + memory.Write64(vaddr + 8, value[1]); } void InterpreterFallback(u64 pc, std::size_t num_instructions) override { @@ -67,7 +69,7 @@ public: ARM_Interface::ThreadContext ctx; parent.SaveContext(ctx); parent.inner_unicorn.LoadContext(ctx); - parent.inner_unicorn.ExecuteInstructions(static_cast<int>(num_instructions)); + parent.inner_unicorn.ExecuteInstructions(num_instructions); parent.inner_unicorn.SaveContext(ctx); parent.LoadContext(ctx); num_interpreted_instructions += num_instructions; @@ -171,9 +173,10 @@ void ARM_Dynarmic::Step() { ARM_Dynarmic::ARM_Dynarmic(System& system, ExclusiveMonitor& exclusive_monitor, std::size_t core_index) - : cb(std::make_unique<ARM_Dynarmic_Callbacks>(*this)), inner_unicorn{system}, - core_index{core_index}, system{system}, - exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {} + : ARM_Interface{system}, + cb(std::make_unique<ARM_Dynarmic_Callbacks>(*this)), inner_unicorn{system}, + core_index{core_index}, exclusive_monitor{ + dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {} ARM_Dynarmic::~ARM_Dynarmic() = default; @@ -264,7 +267,9 @@ void ARM_Dynarmic::PageTableChanged(Common::PageTable& page_table, jit = MakeJit(page_table, new_address_space_size_in_bits); } -DynarmicExclusiveMonitor::DynarmicExclusiveMonitor(std::size_t core_count) : monitor(core_count) {} +DynarmicExclusiveMonitor::DynarmicExclusiveMonitor(Memory::Memory& memory_, std::size_t core_count) + : monitor(core_count), memory{memory_} {} + DynarmicExclusiveMonitor::~DynarmicExclusiveMonitor() = default; void DynarmicExclusiveMonitor::SetExclusive(std::size_t core_index, VAddr addr) { @@ -277,29 +282,28 @@ void DynarmicExclusiveMonitor::ClearExclusive() { } bool DynarmicExclusiveMonitor::ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) { - return monitor.DoExclusiveOperation(core_index, vaddr, 1, - [&] { Memory::Write8(vaddr, value); }); + return monitor.DoExclusiveOperation(core_index, vaddr, 1, [&] { memory.Write8(vaddr, value); }); } bool DynarmicExclusiveMonitor::ExclusiveWrite16(std::size_t core_index, VAddr vaddr, u16 value) { return monitor.DoExclusiveOperation(core_index, vaddr, 2, - [&] { Memory::Write16(vaddr, value); }); + [&] { memory.Write16(vaddr, value); }); } bool DynarmicExclusiveMonitor::ExclusiveWrite32(std::size_t core_index, VAddr vaddr, u32 value) { return monitor.DoExclusiveOperation(core_index, vaddr, 4, - [&] { Memory::Write32(vaddr, value); }); + [&] { memory.Write32(vaddr, value); }); } bool DynarmicExclusiveMonitor::ExclusiveWrite64(std::size_t core_index, VAddr vaddr, u64 value) { return monitor.DoExclusiveOperation(core_index, vaddr, 8, - [&] { Memory::Write64(vaddr, value); }); + [&] { memory.Write64(vaddr, value); }); } bool DynarmicExclusiveMonitor::ExclusiveWrite128(std::size_t core_index, VAddr vaddr, u128 value) { return monitor.DoExclusiveOperation(core_index, vaddr, 16, [&] { - Memory::Write64(vaddr + 0, value[0]); - Memory::Write64(vaddr + 8, value[1]); + memory.Write64(vaddr + 0, value[0]); + memory.Write64(vaddr + 8, value[1]); }); } diff --git a/src/core/arm/dynarmic/arm_dynarmic.h b/src/core/arm/dynarmic/arm_dynarmic.h index 504d46c68..9cd475cfb 100644 --- a/src/core/arm/dynarmic/arm_dynarmic.h +++ b/src/core/arm/dynarmic/arm_dynarmic.h @@ -12,6 +12,10 @@ #include "core/arm/exclusive_monitor.h" #include "core/arm/unicorn/arm_unicorn.h" +namespace Memory { +class Memory; +} + namespace Core { class ARM_Dynarmic_Callbacks; @@ -58,13 +62,12 @@ private: ARM_Unicorn inner_unicorn; std::size_t core_index; - System& system; DynarmicExclusiveMonitor& exclusive_monitor; }; class DynarmicExclusiveMonitor final : public ExclusiveMonitor { public: - explicit DynarmicExclusiveMonitor(std::size_t core_count); + explicit DynarmicExclusiveMonitor(Memory::Memory& memory_, std::size_t core_count); ~DynarmicExclusiveMonitor() override; void SetExclusive(std::size_t core_index, VAddr addr) override; @@ -79,6 +82,7 @@ public: private: friend class ARM_Dynarmic; Dynarmic::A64::ExclusiveMonitor monitor; + Memory::Memory& memory; }; } // namespace Core diff --git a/src/core/arm/unicorn/arm_unicorn.cpp b/src/core/arm/unicorn/arm_unicorn.cpp index d4f41bfc1..48182c99a 100644 --- a/src/core/arm/unicorn/arm_unicorn.cpp +++ b/src/core/arm/unicorn/arm_unicorn.cpp @@ -60,17 +60,18 @@ static bool UnmappedMemoryHook(uc_engine* uc, uc_mem_type type, u64 addr, int si return false; } -ARM_Unicorn::ARM_Unicorn(System& system) : system{system} { +ARM_Unicorn::ARM_Unicorn(System& system) : ARM_Interface{system} { CHECKED(uc_open(UC_ARCH_ARM64, UC_MODE_ARM, &uc)); auto fpv = 3 << 20; CHECKED(uc_reg_write(uc, UC_ARM64_REG_CPACR_EL1, &fpv)); uc_hook hook{}; - CHECKED(uc_hook_add(uc, &hook, UC_HOOK_INTR, (void*)InterruptHook, this, 0, -1)); - CHECKED(uc_hook_add(uc, &hook, UC_HOOK_MEM_INVALID, (void*)UnmappedMemoryHook, &system, 0, -1)); + CHECKED(uc_hook_add(uc, &hook, UC_HOOK_INTR, (void*)InterruptHook, this, 0, UINT64_MAX)); + CHECKED(uc_hook_add(uc, &hook, UC_HOOK_MEM_INVALID, (void*)UnmappedMemoryHook, &system, 0, + UINT64_MAX)); if (GDBStub::IsServerEnabled()) { - CHECKED(uc_hook_add(uc, &hook, UC_HOOK_CODE, (void*)CodeHook, this, 0, -1)); + CHECKED(uc_hook_add(uc, &hook, UC_HOOK_CODE, (void*)CodeHook, this, 0, UINT64_MAX)); last_bkpt_hit = false; } } @@ -154,9 +155,10 @@ void ARM_Unicorn::SetTPIDR_EL0(u64 value) { void ARM_Unicorn::Run() { if (GDBStub::IsServerEnabled()) { - ExecuteInstructions(std::max(4000000, 0)); + ExecuteInstructions(std::max(4000000U, 0U)); } else { - ExecuteInstructions(std::max(system.CoreTiming().GetDowncount(), s64{0})); + ExecuteInstructions( + std::max(std::size_t(system.CoreTiming().GetDowncount()), std::size_t{0})); } } @@ -166,7 +168,7 @@ void ARM_Unicorn::Step() { MICROPROFILE_DEFINE(ARM_Jit_Unicorn, "ARM JIT", "Unicorn", MP_RGB(255, 64, 64)); -void ARM_Unicorn::ExecuteInstructions(int num_instructions) { +void ARM_Unicorn::ExecuteInstructions(std::size_t num_instructions) { MICROPROFILE_SCOPE(ARM_Jit_Unicorn); CHECKED(uc_emu_start(uc, GetPC(), 1ULL << 63, 0, num_instructions)); system.CoreTiming().AddTicks(num_instructions); diff --git a/src/core/arm/unicorn/arm_unicorn.h b/src/core/arm/unicorn/arm_unicorn.h index fe2ffd70c..3c5b155f9 100644 --- a/src/core/arm/unicorn/arm_unicorn.h +++ b/src/core/arm/unicorn/arm_unicorn.h @@ -34,7 +34,7 @@ public: void LoadContext(const ThreadContext& ctx) override; void PrepareReschedule() override; void ClearExclusiveState() override; - void ExecuteInstructions(int num_instructions); + void ExecuteInstructions(std::size_t num_instructions); void Run() override; void Step() override; void ClearInstructionCache() override; @@ -45,7 +45,6 @@ private: static void InterruptHook(uc_engine* uc, u32 int_no, void* user_data); uc_engine* uc{}; - System& system; GDBStub::BreakpointAddress last_bkpt{}; bool last_bkpt_hit = false; }; diff --git a/src/core/core.cpp b/src/core/core.cpp index eba17218a..d697b80ef 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp @@ -39,13 +39,13 @@ #include "core/hle/service/service.h" #include "core/hle/service/sm/sm.h" #include "core/loader/loader.h" +#include "core/memory.h" #include "core/memory/cheat_engine.h" #include "core/perf_stats.h" #include "core/reporter.h" #include "core/settings.h" #include "core/telemetry_session.h" #include "core/tools/freezer.h" -#include "video_core/debug_utils/debug_utils.h" #include "video_core/renderer_base.h" #include "video_core/video_core.h" @@ -112,8 +112,8 @@ FileSys::VirtualFile GetGameFileFromPath(const FileSys::VirtualFilesystem& vfs, } struct System::Impl { explicit Impl(System& system) - : kernel{system}, fs_controller{system}, cpu_core_manager{system}, reporter{system}, - applet_manager{system} {} + : kernel{system}, fs_controller{system}, memory{system}, + cpu_core_manager{system}, reporter{system}, applet_manager{system} {} Cpu& CurrentCpuCore() { return cpu_core_manager.GetCurrentCore(); @@ -340,8 +340,8 @@ struct System::Impl { std::unique_ptr<Loader::AppLoader> app_loader; std::unique_ptr<VideoCore::RendererBase> renderer; std::unique_ptr<Tegra::GPU> gpu_core; - std::shared_ptr<Tegra::DebugContext> debug_context; - std::unique_ptr<Core::Hardware::InterruptManager> interrupt_manager; + std::unique_ptr<Hardware::InterruptManager> interrupt_manager; + Memory::Memory memory; CpuCoreManager cpu_core_manager; bool is_powered_on = false; bool exit_lock = false; @@ -498,6 +498,14 @@ const ExclusiveMonitor& System::Monitor() const { return impl->cpu_core_manager.GetExclusiveMonitor(); } +Memory::Memory& System::Memory() { + return impl->memory; +} + +const Memory::Memory& System::Memory() const { + return impl->memory; +} + Tegra::GPU& System::GPU() { return *impl->gpu_core; } @@ -570,14 +578,6 @@ Loader::AppLoader& System::GetAppLoader() const { return *impl->app_loader; } -void System::SetGPUDebugContext(std::shared_ptr<Tegra::DebugContext> context) { - impl->debug_context = std::move(context); -} - -Tegra::DebugContext* System::GetGPUDebugContext() const { - return impl->debug_context.get(); -} - void System::SetFilesystem(std::shared_ptr<FileSys::VfsFilesystem> vfs) { impl->virtual_filesystem = std::move(vfs); } diff --git a/src/core/core.h b/src/core/core.h index 984074ce3..e240c5c58 100644 --- a/src/core/core.h +++ b/src/core/core.h @@ -7,6 +7,7 @@ #include <cstddef> #include <memory> #include <string> +#include <vector> #include "common/common_types.h" #include "core/file_sys/vfs_types.h" @@ -85,6 +86,10 @@ namespace Core::Hardware { class InterruptManager; } +namespace Memory { +class Memory; +} + namespace Core { class ARM_Interface; @@ -224,6 +229,12 @@ public: /// Gets a constant reference to the exclusive monitor const ExclusiveMonitor& Monitor() const; + /// Gets a mutable reference to the system memory instance. + Memory::Memory& Memory(); + + /// Gets a constant reference to the system memory instance. + const Memory::Memory& Memory() const; + /// Gets a mutable reference to the GPU interface Tegra::GPU& GPU(); @@ -296,10 +307,6 @@ public: Service::SM::ServiceManager& ServiceManager(); const Service::SM::ServiceManager& ServiceManager() const; - void SetGPUDebugContext(std::shared_ptr<Tegra::DebugContext> context); - - Tegra::DebugContext* GetGPUDebugContext() const; - void SetFilesystem(std::shared_ptr<FileSys::VfsFilesystem> vfs); std::shared_ptr<FileSys::VfsFilesystem> GetFilesystem() const; diff --git a/src/core/core_cpu.cpp b/src/core/core_cpu.cpp index 233ea572c..630cd4feb 100644 --- a/src/core/core_cpu.cpp +++ b/src/core/core_cpu.cpp @@ -66,9 +66,10 @@ Cpu::Cpu(System& system, ExclusiveMonitor& exclusive_monitor, CpuBarrier& cpu_ba Cpu::~Cpu() = default; -std::unique_ptr<ExclusiveMonitor> Cpu::MakeExclusiveMonitor(std::size_t num_cores) { +std::unique_ptr<ExclusiveMonitor> Cpu::MakeExclusiveMonitor( + [[maybe_unused]] Memory::Memory& memory, [[maybe_unused]] std::size_t num_cores) { #ifdef ARCHITECTURE_x86_64 - return std::make_unique<DynarmicExclusiveMonitor>(num_cores); + return std::make_unique<DynarmicExclusiveMonitor>(memory, num_cores); #else // TODO(merry): Passthrough exclusive monitor return nullptr; @@ -95,6 +96,8 @@ void Cpu::RunLoop(bool tight_loop) { } else { arm_interface->Step(); } + // We are stopping a run, exclusive state must be cleared + arm_interface->ClearExclusiveState(); } core_timing.Advance(); diff --git a/src/core/core_cpu.h b/src/core/core_cpu.h index cafca8df7..78f5021a2 100644 --- a/src/core/core_cpu.h +++ b/src/core/core_cpu.h @@ -24,6 +24,10 @@ namespace Core::Timing { class CoreTiming; } +namespace Memory { +class Memory; +} + namespace Core { class ARM_Interface; @@ -86,7 +90,19 @@ public: void Shutdown(); - static std::unique_ptr<ExclusiveMonitor> MakeExclusiveMonitor(std::size_t num_cores); + /** + * Creates an exclusive monitor to handle exclusive reads/writes. + * + * @param memory The current memory subsystem that the monitor may wish + * to keep track of. + * + * @param num_cores The number of cores to assume about the CPU. + * + * @returns The constructed exclusive monitor instance, or nullptr if the current + * CPU backend is unable to use an exclusive monitor. + */ + static std::unique_ptr<ExclusiveMonitor> MakeExclusiveMonitor(Memory::Memory& memory, + std::size_t num_cores); private: void Reschedule(); diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp index 0e9570685..aa09fa453 100644 --- a/src/core/core_timing.cpp +++ b/src/core/core_timing.cpp @@ -17,11 +17,15 @@ namespace Core::Timing { constexpr int MAX_SLICE_LENGTH = 10000; +std::shared_ptr<EventType> CreateEvent(std::string name, TimedCallback&& callback) { + return std::make_shared<EventType>(std::move(callback), std::move(name)); +} + struct CoreTiming::Event { s64 time; u64 fifo_order; u64 userdata; - const EventType* type; + std::weak_ptr<EventType> type; // Sort by time, unless the times are the same, in which case sort by // the order added to the queue @@ -54,36 +58,15 @@ void CoreTiming::Initialize() { event_fifo_id = 0; const auto empty_timed_callback = [](u64, s64) {}; - ev_lost = RegisterEvent("_lost_event", empty_timed_callback); + ev_lost = CreateEvent("_lost_event", empty_timed_callback); } void CoreTiming::Shutdown() { ClearPendingEvents(); - UnregisterAllEvents(); -} - -EventType* CoreTiming::RegisterEvent(const std::string& name, TimedCallback callback) { - std::lock_guard guard{inner_mutex}; - // check for existing type with same name. - // we want event type names to remain unique so that we can use them for serialization. - ASSERT_MSG(event_types.find(name) == event_types.end(), - "CoreTiming Event \"{}\" is already registered. Events should only be registered " - "during Init to avoid breaking save states.", - name.c_str()); - - auto info = event_types.emplace(name, EventType{callback, nullptr}); - EventType* event_type = &info.first->second; - event_type->name = &info.first->first; - return event_type; -} - -void CoreTiming::UnregisterAllEvents() { - ASSERT_MSG(event_queue.empty(), "Cannot unregister events with events pending"); - event_types.clear(); } -void CoreTiming::ScheduleEvent(s64 cycles_into_future, const EventType* event_type, u64 userdata) { - ASSERT(event_type != nullptr); +void CoreTiming::ScheduleEvent(s64 cycles_into_future, const std::shared_ptr<EventType>& event_type, + u64 userdata) { std::lock_guard guard{inner_mutex}; const s64 timeout = GetTicks() + cycles_into_future; @@ -93,13 +76,15 @@ void CoreTiming::ScheduleEvent(s64 cycles_into_future, const EventType* event_ty } event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type}); + std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>()); } -void CoreTiming::UnscheduleEvent(const EventType* event_type, u64 userdata) { +void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata) { std::lock_guard guard{inner_mutex}; + const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) { - return e.type == event_type && e.userdata == userdata; + return e.type.lock().get() == event_type.get() && e.userdata == userdata; }); // Removing random items breaks the invariant so we have to re-establish it. @@ -130,10 +115,12 @@ void CoreTiming::ClearPendingEvents() { event_queue.clear(); } -void CoreTiming::RemoveEvent(const EventType* event_type) { +void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) { std::lock_guard guard{inner_mutex}; - const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), - [&](const Event& e) { return e.type == event_type; }); + + const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) { + return e.type.lock().get() == event_type.get(); + }); // Removing random items breaks the invariant so we have to re-establish it. if (itr != event_queue.end()) { @@ -181,7 +168,11 @@ void CoreTiming::Advance() { std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>()); event_queue.pop_back(); inner_mutex.unlock(); - evt.type->callback(evt.userdata, global_timer - evt.time); + + if (auto event_type{evt.type.lock()}) { + event_type->callback(evt.userdata, global_timer - evt.time); + } + inner_mutex.lock(); } diff --git a/src/core/core_timing.h b/src/core/core_timing.h index 3bb88c810..d50f4eb8a 100644 --- a/src/core/core_timing.h +++ b/src/core/core_timing.h @@ -6,11 +6,12 @@ #include <chrono> #include <functional> +#include <memory> #include <mutex> #include <optional> #include <string> -#include <unordered_map> #include <vector> + #include "common/common_types.h" #include "common/threadsafe_queue.h" @@ -21,10 +22,13 @@ using TimedCallback = std::function<void(u64 userdata, s64 cycles_late)>; /// Contains the characteristics of a particular event. struct EventType { + EventType(TimedCallback&& callback, std::string&& name) + : callback{std::move(callback)}, name{std::move(name)} {} + /// The event's callback function. TimedCallback callback; /// A pointer to the name of the event. - const std::string* name; + const std::string name; }; /** @@ -57,31 +61,17 @@ public: /// Tears down all timing related functionality. void Shutdown(); - /// Registers a core timing event with the given name and callback. - /// - /// @param name The name of the core timing event to register. - /// @param callback The callback to execute for the event. - /// - /// @returns An EventType instance representing the registered event. - /// - /// @pre The name of the event being registered must be unique among all - /// registered events. - /// - EventType* RegisterEvent(const std::string& name, TimedCallback callback); - - /// Unregisters all registered events thus far. Note: not thread unsafe - void UnregisterAllEvents(); - /// After the first Advance, the slice lengths and the downcount will be reduced whenever an /// event is scheduled earlier than the current values. /// /// Scheduling from a callback will not update the downcount until the Advance() completes. - void ScheduleEvent(s64 cycles_into_future, const EventType* event_type, u64 userdata = 0); + void ScheduleEvent(s64 cycles_into_future, const std::shared_ptr<EventType>& event_type, + u64 userdata = 0); - void UnscheduleEvent(const EventType* event_type, u64 userdata); + void UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata); /// We only permit one event of each type in the queue at a time. - void RemoveEvent(const EventType* event_type); + void RemoveEvent(const std::shared_ptr<EventType>& event_type); void ForceExceptionCheck(s64 cycles); @@ -148,13 +138,18 @@ private: std::vector<Event> event_queue; u64 event_fifo_id = 0; - // Stores each element separately as a linked list node so pointers to elements - // remain stable regardless of rehashes/resizing. - std::unordered_map<std::string, EventType> event_types; - - EventType* ev_lost = nullptr; + std::shared_ptr<EventType> ev_lost; std::mutex inner_mutex; }; +/// Creates a core timing event with the given name and callback. +/// +/// @param name The name of the core timing event to create. +/// @param callback The callback to execute for the event. +/// +/// @returns An EventType instance representing the created event. +/// +std::shared_ptr<EventType> CreateEvent(std::string name, TimedCallback&& callback); + } // namespace Core::Timing diff --git a/src/core/cpu_core_manager.cpp b/src/core/cpu_core_manager.cpp index 8efd410bb..f04a34133 100644 --- a/src/core/cpu_core_manager.cpp +++ b/src/core/cpu_core_manager.cpp @@ -25,7 +25,7 @@ CpuCoreManager::~CpuCoreManager() = default; void CpuCoreManager::Initialize() { barrier = std::make_unique<CpuBarrier>(); - exclusive_monitor = Cpu::MakeExclusiveMonitor(cores.size()); + exclusive_monitor = Cpu::MakeExclusiveMonitor(system.Memory(), cores.size()); for (std::size_t index = 0; index < cores.size(); ++index) { cores[index] = std::make_unique<Cpu>(system, *exclusive_monitor, *barrier, index); diff --git a/src/core/crypto/key_manager.cpp b/src/core/crypto/key_manager.cpp index 222fc95ba..87e6a1fd3 100644 --- a/src/core/crypto/key_manager.cpp +++ b/src/core/crypto/key_manager.cpp @@ -22,6 +22,7 @@ #include "common/file_util.h" #include "common/hex_util.h" #include "common/logging/log.h" +#include "common/string_util.h" #include "core/core.h" #include "core/crypto/aes_util.h" #include "core/crypto/key_manager.h" @@ -378,8 +379,9 @@ std::vector<Ticket> GetTicketblob(const FileUtil::IOFile& ticket_save) { template <size_t size> static std::array<u8, size> operator^(const std::array<u8, size>& lhs, const std::array<u8, size>& rhs) { - std::array<u8, size> out{}; - std::transform(lhs.begin(), lhs.end(), rhs.begin(), out.begin(), std::bit_xor<>()); + std::array<u8, size> out; + std::transform(lhs.begin(), lhs.end(), rhs.begin(), out.begin(), + [](u8 lhs, u8 rhs) { return u8(lhs ^ rhs); }); return out; } @@ -396,7 +398,7 @@ static std::array<u8, target_size> MGF1(const std::array<u8, in_size>& seed) { while (out.size() < target_size) { out.resize(out.size() + 0x20); seed_exp[in_size + 3] = static_cast<u8>(i); - mbedtls_sha256(seed_exp.data(), seed_exp.size(), out.data() + out.size() - 0x20, 0); + mbedtls_sha256_ret(seed_exp.data(), seed_exp.size(), out.data() + out.size() - 0x20, 0); ++i; } @@ -538,7 +540,7 @@ void KeyManager::LoadFromFile(const std::string& filename, bool is_title_keys) { Key128 key = Common::HexStringToArray<16>(out[1]); s128_keys[{S128KeyType::Titlekey, rights_id[1], rights_id[0]}] = key; } else { - std::transform(out[0].begin(), out[0].end(), out[0].begin(), ::tolower); + out[0] = Common::ToLower(out[0]); if (s128_file_id.find(out[0]) != s128_file_id.end()) { const auto index = s128_file_id.at(out[0]); Key128 key = Common::HexStringToArray<16>(out[1]); @@ -668,23 +670,27 @@ void KeyManager::WriteKeyToFile(KeyCategory category, std::string_view keyname, const std::array<u8, Size>& key) { const std::string yuzu_keys_dir = FileUtil::GetUserPath(FileUtil::UserPath::KeysDir); std::string filename = "title.keys_autogenerated"; - if (category == KeyCategory::Standard) + if (category == KeyCategory::Standard) { filename = dev_mode ? "dev.keys_autogenerated" : "prod.keys_autogenerated"; - else if (category == KeyCategory::Console) + } else if (category == KeyCategory::Console) { filename = "console.keys_autogenerated"; - const auto add_info_text = !FileUtil::Exists(yuzu_keys_dir + DIR_SEP + filename); - FileUtil::CreateFullPath(yuzu_keys_dir + DIR_SEP + filename); - std::ofstream file(yuzu_keys_dir + DIR_SEP + filename, std::ios::app); - if (!file.is_open()) + } + + const auto path = yuzu_keys_dir + DIR_SEP + filename; + const auto add_info_text = !FileUtil::Exists(path); + FileUtil::CreateFullPath(path); + FileUtil::IOFile file{path, "a"}; + if (!file.IsOpen()) { return; + } if (add_info_text) { - file - << "# This file is autogenerated by Yuzu\n" - << "# It serves to store keys that were automatically generated from the normal keys\n" - << "# If you are experiencing issues involving keys, it may help to delete this file\n"; + file.WriteString( + "# This file is autogenerated by Yuzu\n" + "# It serves to store keys that were automatically generated from the normal keys\n" + "# If you are experiencing issues involving keys, it may help to delete this file\n"); } - file << fmt::format("\n{} = {}", keyname, Common::HexToString(key)); + file.WriteString(fmt::format("\n{} = {}", keyname, Common::HexToString(key))); AttemptLoadKeyFile(yuzu_keys_dir, yuzu_keys_dir, filename, category == KeyCategory::Title); } @@ -944,12 +950,10 @@ void KeyManager::DeriveETicket(PartitionDataManager& data) { return; } - Key128 rsa_oaep_kek{}; - std::transform(seed3.begin(), seed3.end(), mask0.begin(), rsa_oaep_kek.begin(), - std::bit_xor<>()); - - if (rsa_oaep_kek == Key128{}) + const Key128 rsa_oaep_kek = seed3 ^ mask0; + if (rsa_oaep_kek == Key128{}) { return; + } SetKey(S128KeyType::Source, rsa_oaep_kek, static_cast<u64>(SourceKeyType::RSAOaepKekGeneration)); diff --git a/src/core/crypto/partition_data_manager.cpp b/src/core/crypto/partition_data_manager.cpp index 594cd82c5..d64302f2e 100644 --- a/src/core/crypto/partition_data_manager.cpp +++ b/src/core/crypto/partition_data_manager.cpp @@ -161,7 +161,7 @@ std::array<u8, key_size> FindKeyFromHex(const std::vector<u8>& binary, std::array<u8, 0x20> temp{}; for (size_t i = 0; i < binary.size() - key_size; ++i) { - mbedtls_sha256(binary.data() + i, key_size, temp.data(), 0); + mbedtls_sha256_ret(binary.data() + i, key_size, temp.data(), 0); if (temp != hash) continue; @@ -189,7 +189,7 @@ static std::array<Key128, 0x20> FindEncryptedMasterKeyFromHex(const std::vector< AESCipher<Key128> cipher(key, Mode::ECB); for (size_t i = 0; i < binary.size() - 0x10; ++i) { cipher.Transcode(binary.data() + i, dec_temp.size(), dec_temp.data(), Op::Decrypt); - mbedtls_sha256(dec_temp.data(), dec_temp.size(), temp.data(), 0); + mbedtls_sha256_ret(dec_temp.data(), dec_temp.size(), temp.data(), 0); for (size_t k = 0; k < out.size(); ++k) { if (temp == master_key_hashes[k]) { @@ -204,11 +204,12 @@ static std::array<Key128, 0x20> FindEncryptedMasterKeyFromHex(const std::vector< FileSys::VirtualFile FindFileInDirWithNames(const FileSys::VirtualDir& dir, const std::string& name) { - auto upper = name; - std::transform(upper.begin(), upper.end(), upper.begin(), [](u8 c) { return std::toupper(c); }); + const auto upper = Common::ToUpper(name); + for (const auto& fname : {name, name + ".bin", upper, upper + ".BIN"}) { - if (dir->GetFile(fname) != nullptr) + if (dir->GetFile(fname) != nullptr) { return dir->GetFile(fname); + } } return nullptr; diff --git a/src/core/file_sys/directory.h b/src/core/file_sys/directory.h index 7b5c509fb..0d73eecc9 100644 --- a/src/core/file_sys/directory.h +++ b/src/core/file_sys/directory.h @@ -15,7 +15,7 @@ namespace FileSys { -enum EntryType : u8 { +enum class EntryType : u8 { Directory = 0, File = 1, }; diff --git a/src/core/file_sys/kernel_executable.cpp b/src/core/file_sys/kernel_executable.cpp index 371300684..76313679d 100644 --- a/src/core/file_sys/kernel_executable.cpp +++ b/src/core/file_sys/kernel_executable.cpp @@ -147,7 +147,7 @@ std::vector<u32> KIP::GetKernelCapabilities() const { } s32 KIP::GetMainThreadPriority() const { - return header.main_thread_priority; + return static_cast<s32>(header.main_thread_priority); } u32 KIP::GetMainThreadStackSize() const { diff --git a/src/core/file_sys/patch_manager.cpp b/src/core/file_sys/patch_manager.cpp index df0ecb15c..e226e9711 100644 --- a/src/core/file_sys/patch_manager.cpp +++ b/src/core/file_sys/patch_manager.cpp @@ -76,7 +76,7 @@ VirtualDir PatchManager::PatchExeFS(VirtualDir exefs) const { const auto& disabled = Settings::values.disabled_addons[title_id]; const auto update_disabled = - std::find(disabled.begin(), disabled.end(), "Update") != disabled.end(); + std::find(disabled.cbegin(), disabled.cend(), "Update") != disabled.cend(); // Game Updates const auto update_tid = GetUpdateTitleID(title_id); @@ -127,7 +127,7 @@ std::vector<VirtualFile> PatchManager::CollectPatches(const std::vector<VirtualD std::vector<VirtualFile> out; out.reserve(patch_dirs.size()); for (const auto& subdir : patch_dirs) { - if (std::find(disabled.begin(), disabled.end(), subdir->GetName()) != disabled.end()) + if (std::find(disabled.cbegin(), disabled.cend(), subdir->GetName()) != disabled.cend()) continue; auto exefs_dir = subdir->GetSubdirectory("exefs"); @@ -284,12 +284,17 @@ std::vector<Memory::CheatEntry> PatchManager::CreateCheatList( return {}; } + const auto& disabled = Settings::values.disabled_addons[title_id]; auto patch_dirs = load_dir->GetSubdirectories(); std::sort(patch_dirs.begin(), patch_dirs.end(), [](const VirtualDir& l, const VirtualDir& r) { return l->GetName() < r->GetName(); }); std::vector<Memory::CheatEntry> out; for (const auto& subdir : patch_dirs) { + if (std::find(disabled.cbegin(), disabled.cend(), subdir->GetName()) != disabled.cend()) { + continue; + } + auto cheats_dir = subdir->GetSubdirectory("cheats"); if (cheats_dir != nullptr) { auto res = ReadCheatFileFromFolder(system, title_id, build_id_, cheats_dir, true); @@ -331,8 +336,9 @@ static void ApplyLayeredFS(VirtualFile& romfs, u64 title_id, ContentRecordType t layers.reserve(patch_dirs.size() + 1); layers_ext.reserve(patch_dirs.size() + 1); for (const auto& subdir : patch_dirs) { - if (std::find(disabled.begin(), disabled.end(), subdir->GetName()) != disabled.end()) + if (std::find(disabled.cbegin(), disabled.cend(), subdir->GetName()) != disabled.cend()) { continue; + } auto romfs_dir = subdir->GetSubdirectory("romfs"); if (romfs_dir != nullptr) @@ -381,7 +387,7 @@ VirtualFile PatchManager::PatchRomFS(VirtualFile romfs, u64 ivfc_offset, Content const auto& disabled = Settings::values.disabled_addons[title_id]; const auto update_disabled = - std::find(disabled.begin(), disabled.end(), "Update") != disabled.end(); + std::find(disabled.cbegin(), disabled.cend(), "Update") != disabled.cend(); if (!update_disabled && update != nullptr) { const auto new_nca = std::make_shared<NCA>(update, romfs, ivfc_offset); @@ -431,7 +437,7 @@ std::map<std::string, std::string, std::less<>> PatchManager::GetPatchVersionNam auto [nacp, discard_icon_file] = update.GetControlMetadata(); const auto update_disabled = - std::find(disabled.begin(), disabled.end(), "Update") != disabled.end(); + std::find(disabled.cbegin(), disabled.cend(), "Update") != disabled.cend(); const auto update_label = update_disabled ? "[D] Update" : "Update"; if (nacp != nullptr) { diff --git a/src/core/file_sys/program_metadata.cpp b/src/core/file_sys/program_metadata.cpp index 7310b3602..1d6c30962 100644 --- a/src/core/file_sys/program_metadata.cpp +++ b/src/core/file_sys/program_metadata.cpp @@ -52,14 +52,14 @@ Loader::ResultStatus ProgramMetadata::Load(VirtualFile file) { } void ProgramMetadata::LoadManual(bool is_64_bit, ProgramAddressSpaceType address_space, - u8 main_thread_prio, u8 main_thread_core, + s32 main_thread_prio, u32 main_thread_core, u32 main_thread_stack_size, u64 title_id, u64 filesystem_permissions, KernelCapabilityDescriptors capabilities) { npdm_header.has_64_bit_instructions.Assign(is_64_bit); npdm_header.address_space_type.Assign(address_space); - npdm_header.main_thread_priority = main_thread_prio; - npdm_header.main_thread_cpu = main_thread_core; + npdm_header.main_thread_priority = static_cast<u8>(main_thread_prio); + npdm_header.main_thread_cpu = static_cast<u8>(main_thread_core); npdm_header.main_stack_size = main_thread_stack_size; aci_header.title_id = title_id; aci_file_access.permissions = filesystem_permissions; diff --git a/src/core/file_sys/program_metadata.h b/src/core/file_sys/program_metadata.h index 88ec97d85..f8759a396 100644 --- a/src/core/file_sys/program_metadata.h +++ b/src/core/file_sys/program_metadata.h @@ -47,8 +47,8 @@ public: Loader::ResultStatus Load(VirtualFile file); // Load from parameters instead of NPDM file, used for KIP - void LoadManual(bool is_64_bit, ProgramAddressSpaceType address_space, u8 main_thread_prio, - u8 main_thread_core, u32 main_thread_stack_size, u64 title_id, + void LoadManual(bool is_64_bit, ProgramAddressSpaceType address_space, s32 main_thread_prio, + u32 main_thread_core, u32 main_thread_stack_size, u64 title_id, u64 filesystem_permissions, KernelCapabilityDescriptors capabilities); bool Is64BitProgram() const; diff --git a/src/core/file_sys/registered_cache.cpp b/src/core/file_sys/registered_cache.cpp index ac3fbd849..6e9cf67ef 100644 --- a/src/core/file_sys/registered_cache.cpp +++ b/src/core/file_sys/registered_cache.cpp @@ -62,7 +62,7 @@ static std::string GetRelativePathFromNcaID(const std::array<u8, 16>& nca_id, bo Common::HexToString(nca_id, second_hex_upper)); Core::Crypto::SHA256Hash hash{}; - mbedtls_sha256(nca_id.data(), nca_id.size(), hash.data(), 0); + mbedtls_sha256_ret(nca_id.data(), nca_id.size(), hash.data(), 0); return fmt::format(cnmt_suffix ? "/000000{:02X}/{}.cnmt.nca" : "/000000{:02X}/{}.nca", hash[0], Common::HexToString(nca_id, second_hex_upper)); } @@ -141,7 +141,7 @@ bool PlaceholderCache::Create(const NcaID& id, u64 size) const { } Core::Crypto::SHA256Hash hash{}; - mbedtls_sha256(id.data(), id.size(), hash.data(), 0); + mbedtls_sha256_ret(id.data(), id.size(), hash.data(), 0); const auto dirname = fmt::format("000000{:02X}", hash[0]); const auto dir2 = GetOrCreateDirectoryRelative(dir, dirname); @@ -165,7 +165,7 @@ bool PlaceholderCache::Delete(const NcaID& id) const { } Core::Crypto::SHA256Hash hash{}; - mbedtls_sha256(id.data(), id.size(), hash.data(), 0); + mbedtls_sha256_ret(id.data(), id.size(), hash.data(), 0); const auto dirname = fmt::format("000000{:02X}", hash[0]); const auto dir2 = GetOrCreateDirectoryRelative(dir, dirname); @@ -603,7 +603,7 @@ InstallResult RegisteredCache::InstallEntry(const NCA& nca, TitleType type, OptionalHeader opt_header{0, 0}; ContentRecord c_rec{{}, {}, {}, GetCRTypeFromNCAType(nca.GetType()), {}}; const auto& data = nca.GetBaseFile()->ReadBytes(0x100000); - mbedtls_sha256(data.data(), data.size(), c_rec.hash.data(), 0); + mbedtls_sha256_ret(data.data(), data.size(), c_rec.hash.data(), 0); memcpy(&c_rec.nca_id, &c_rec.hash, 16); const CNMT new_cnmt(header, opt_header, {c_rec}, {}); if (!RawInstallYuzuMeta(new_cnmt)) @@ -626,7 +626,7 @@ InstallResult RegisteredCache::RawInstallNCA(const NCA& nca, const VfsCopyFuncti id = *override_id; } else { const auto& data = in->ReadBytes(0x100000); - mbedtls_sha256(data.data(), data.size(), hash.data(), 0); + mbedtls_sha256_ret(data.data(), data.size(), hash.data(), 0); memcpy(id.data(), hash.data(), 16); } diff --git a/src/core/file_sys/romfs.cpp b/src/core/file_sys/romfs.cpp index ebbdf081e..c909d1ce4 100644 --- a/src/core/file_sys/romfs.cpp +++ b/src/core/file_sys/romfs.cpp @@ -2,6 +2,8 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include <memory> + #include "common/common_types.h" #include "common/swap.h" #include "core/file_sys/fsmitm_romfsbuild.h" @@ -12,7 +14,7 @@ #include "core/file_sys/vfs_vector.h" namespace FileSys { - +namespace { constexpr u32 ROMFS_ENTRY_EMPTY = 0xFFFFFFFF; struct TableLocation { @@ -51,7 +53,7 @@ struct FileEntry { static_assert(sizeof(FileEntry) == 0x20, "FileEntry has incorrect size."); template <typename Entry> -static std::pair<Entry, std::string> GetEntry(const VirtualFile& file, std::size_t offset) { +std::pair<Entry, std::string> GetEntry(const VirtualFile& file, std::size_t offset) { Entry entry{}; if (file->ReadObject(&entry, offset) != sizeof(Entry)) return {}; @@ -99,6 +101,7 @@ void ProcessDirectory(VirtualFile file, std::size_t dir_offset, std::size_t file this_dir_offset = entry.first.sibling; } } +} // Anonymous namespace VirtualDir ExtractRomFS(VirtualFile file, RomFSExtractionType type) { RomFSHeader header{}; diff --git a/src/core/file_sys/romfs.h b/src/core/file_sys/romfs.h index 1c89be8a4..2fd07ed04 100644 --- a/src/core/file_sys/romfs.h +++ b/src/core/file_sys/romfs.h @@ -5,10 +5,6 @@ #pragma once #include <array> -#include <map> -#include "common/common_funcs.h" -#include "common/common_types.h" -#include "common/swap.h" #include "core/file_sys/vfs.h" namespace FileSys { diff --git a/src/core/file_sys/romfs_factory.cpp b/src/core/file_sys/romfs_factory.cpp index 4bd2e6183..418a39a7e 100644 --- a/src/core/file_sys/romfs_factory.cpp +++ b/src/core/file_sys/romfs_factory.cpp @@ -71,12 +71,12 @@ ResultVal<VirtualFile> RomFSFactory::Open(u64 title_id, StorageId storage, if (res == nullptr) { // TODO(DarkLordZach): Find the right error code to use here - return ResultCode(-1); + return RESULT_UNKNOWN; } const auto romfs = res->GetRomFS(); if (romfs == nullptr) { // TODO(DarkLordZach): Find the right error code to use here - return ResultCode(-1); + return RESULT_UNKNOWN; } return MakeResult<VirtualFile>(romfs); } diff --git a/src/core/file_sys/savedata_factory.cpp b/src/core/file_sys/savedata_factory.cpp index e2a7eaf7b..f3def93ab 100644 --- a/src/core/file_sys/savedata_factory.cpp +++ b/src/core/file_sys/savedata_factory.cpp @@ -90,7 +90,7 @@ ResultVal<VirtualDir> SaveDataFactory::Create(SaveDataSpaceId space, // Return an error if the save data doesn't actually exist. if (out == nullptr) { // TODO(DarkLordZach): Find out correct error code. - return ResultCode(-1); + return RESULT_UNKNOWN; } return MakeResult<VirtualDir>(std::move(out)); @@ -111,7 +111,7 @@ ResultVal<VirtualDir> SaveDataFactory::Open(SaveDataSpaceId space, // Return an error if the save data doesn't actually exist. if (out == nullptr) { // TODO(Subv): Find out correct error code. - return ResultCode(-1); + return RESULT_UNKNOWN; } return MakeResult<VirtualDir>(std::move(out)); diff --git a/src/core/file_sys/vfs_libzip.cpp b/src/core/file_sys/vfs_libzip.cpp index 8bdaa7e4a..11d1978ea 100644 --- a/src/core/file_sys/vfs_libzip.cpp +++ b/src/core/file_sys/vfs_libzip.cpp @@ -27,7 +27,7 @@ VirtualDir ExtractZIP(VirtualFile file) { std::shared_ptr<VectorVfsDirectory> out = std::make_shared<VectorVfsDirectory>(); - const auto num_entries = zip_get_num_entries(zip.get(), 0); + const auto num_entries = static_cast<std::size_t>(zip_get_num_entries(zip.get(), 0)); zip_stat_t stat{}; zip_stat_init(&stat); diff --git a/src/core/file_sys/xts_archive.cpp b/src/core/file_sys/xts_archive.cpp index 4bc5cb2ee..86e06ccb9 100644 --- a/src/core/file_sys/xts_archive.cpp +++ b/src/core/file_sys/xts_archive.cpp @@ -7,12 +7,13 @@ #include <cstring> #include <regex> #include <string> + #include <mbedtls/md.h> #include <mbedtls/sha256.h> -#include "common/assert.h" + #include "common/file_util.h" #include "common/hex_util.h" -#include "common/logging/log.h" +#include "common/string_util.h" #include "core/crypto/aes_util.h" #include "core/crypto/xts_encryption_layer.h" #include "core/file_sys/partition_filesystem.h" @@ -53,18 +54,15 @@ NAX::NAX(VirtualFile file_) : header(std::make_unique<NAXHeader>()), file(std::m return; } - std::string two_dir = match[1]; - std::string nca_id = match[2]; - std::transform(two_dir.begin(), two_dir.end(), two_dir.begin(), ::toupper); - std::transform(nca_id.begin(), nca_id.end(), nca_id.begin(), ::tolower); - + const std::string two_dir = Common::ToUpper(match[1]); + const std::string nca_id = Common::ToLower(match[2]); status = Parse(fmt::format("/registered/{}/{}.nca", two_dir, nca_id)); } NAX::NAX(VirtualFile file_, std::array<u8, 0x10> nca_id) : header(std::make_unique<NAXHeader>()), file(std::move(file_)) { Core::Crypto::SHA256Hash hash{}; - mbedtls_sha256(nca_id.data(), nca_id.size(), hash.data(), 0); + mbedtls_sha256_ret(nca_id.data(), nca_id.size(), hash.data(), 0); status = Parse(fmt::format("/registered/000000{:02X}/{}.nca", hash[0], Common::HexToString(nca_id, false))); } @@ -93,8 +91,7 @@ Loader::ResultStatus NAX::Parse(std::string_view path) { std::size_t i = 0; for (; i < sd_keys.size(); ++i) { std::array<Core::Crypto::Key128, 2> nax_keys{}; - if (!CalculateHMAC256(nax_keys.data(), sd_keys[i].data(), 0x10, std::string(path).c_str(), - path.size())) { + if (!CalculateHMAC256(nax_keys.data(), sd_keys[i].data(), 0x10, path.data(), path.size())) { return Loader::ResultStatus::ErrorNAXKeyHMACFailed; } diff --git a/src/core/gdbstub/gdbstub.cpp b/src/core/gdbstub/gdbstub.cpp index 20bb50868..37cb28848 100644 --- a/src/core/gdbstub/gdbstub.cpp +++ b/src/core/gdbstub/gdbstub.cpp @@ -468,7 +468,8 @@ static u8 ReadByte() { /// Calculate the checksum of the current command buffer. static u8 CalculateChecksum(const u8* buffer, std::size_t length) { - return static_cast<u8>(std::accumulate(buffer, buffer + length, 0, std::plus<u8>())); + return static_cast<u8>(std::accumulate(buffer, buffer + length, u8{0}, + [](u8 lhs, u8 rhs) { return u8(lhs + rhs); })); } /** @@ -507,8 +508,9 @@ static void RemoveBreakpoint(BreakpointType type, VAddr addr) { bp->second.len, bp->second.addr, static_cast<int>(type)); if (type == BreakpointType::Execute) { - Memory::WriteBlock(bp->second.addr, bp->second.inst.data(), bp->second.inst.size()); - Core::System::GetInstance().InvalidateCpuInstructionCaches(); + auto& system = Core::System::GetInstance(); + system.Memory().WriteBlock(bp->second.addr, bp->second.inst.data(), bp->second.inst.size()); + system.InvalidateCpuInstructionCaches(); } p.erase(addr); } @@ -968,12 +970,13 @@ static void ReadMemory() { SendReply("E01"); } - if (!Memory::IsValidVirtualAddress(addr)) { + auto& memory = Core::System::GetInstance().Memory(); + if (!memory.IsValidVirtualAddress(addr)) { return SendReply("E00"); } std::vector<u8> data(len); - Memory::ReadBlock(addr, data.data(), len); + memory.ReadBlock(addr, data.data(), len); MemToGdbHex(reply, data.data(), len); reply[len * 2] = '\0'; @@ -983,22 +986,23 @@ static void ReadMemory() { /// Modify location in memory with data received from the gdb client. static void WriteMemory() { auto start_offset = command_buffer + 1; - auto addr_pos = std::find(start_offset, command_buffer + command_length, ','); - VAddr addr = HexToLong(start_offset, static_cast<u64>(addr_pos - start_offset)); + const auto addr_pos = std::find(start_offset, command_buffer + command_length, ','); + const VAddr addr = HexToLong(start_offset, static_cast<u64>(addr_pos - start_offset)); start_offset = addr_pos + 1; - auto len_pos = std::find(start_offset, command_buffer + command_length, ':'); - u64 len = HexToLong(start_offset, static_cast<u64>(len_pos - start_offset)); + const auto len_pos = std::find(start_offset, command_buffer + command_length, ':'); + const u64 len = HexToLong(start_offset, static_cast<u64>(len_pos - start_offset)); - if (!Memory::IsValidVirtualAddress(addr)) { + auto& system = Core::System::GetInstance(); + auto& memory = system.Memory(); + if (!memory.IsValidVirtualAddress(addr)) { return SendReply("E00"); } std::vector<u8> data(len); - GdbHexToMem(data.data(), len_pos + 1, len); - Memory::WriteBlock(addr, data.data(), len); - Core::System::GetInstance().InvalidateCpuInstructionCaches(); + memory.WriteBlock(addr, data.data(), len); + system.InvalidateCpuInstructionCaches(); SendReply("OK"); } @@ -1054,12 +1058,15 @@ static bool CommitBreakpoint(BreakpointType type, VAddr addr, u64 len) { breakpoint.active = true; breakpoint.addr = addr; breakpoint.len = len; - Memory::ReadBlock(addr, breakpoint.inst.data(), breakpoint.inst.size()); + + auto& system = Core::System::GetInstance(); + auto& memory = system.Memory(); + memory.ReadBlock(addr, breakpoint.inst.data(), breakpoint.inst.size()); static constexpr std::array<u8, 4> btrap{0x00, 0x7d, 0x20, 0xd4}; if (type == BreakpointType::Execute) { - Memory::WriteBlock(addr, btrap.data(), btrap.size()); - Core::System::GetInstance().InvalidateCpuInstructionCaches(); + memory.WriteBlock(addr, btrap.data(), btrap.size()); + system.InvalidateCpuInstructionCaches(); } p.insert({addr, breakpoint}); diff --git a/src/core/hardware_interrupt_manager.cpp b/src/core/hardware_interrupt_manager.cpp index c2115db2d..c629d9fa1 100644 --- a/src/core/hardware_interrupt_manager.cpp +++ b/src/core/hardware_interrupt_manager.cpp @@ -11,13 +11,12 @@ namespace Core::Hardware { InterruptManager::InterruptManager(Core::System& system_in) : system(system_in) { - gpu_interrupt_event = - system.CoreTiming().RegisterEvent("GPUInterrupt", [this](u64 message, s64) { - auto nvdrv = system.ServiceManager().GetService<Service::Nvidia::NVDRV>("nvdrv"); - const u32 syncpt = static_cast<u32>(message >> 32); - const u32 value = static_cast<u32>(message); - nvdrv->SignalGPUInterruptSyncpt(syncpt, value); - }); + gpu_interrupt_event = Core::Timing::CreateEvent("GPUInterrupt", [this](u64 message, s64) { + auto nvdrv = system.ServiceManager().GetService<Service::Nvidia::NVDRV>("nvdrv"); + const u32 syncpt = static_cast<u32>(message >> 32); + const u32 value = static_cast<u32>(message); + nvdrv->SignalGPUInterruptSyncpt(syncpt, value); + }); } InterruptManager::~InterruptManager() = default; diff --git a/src/core/hardware_interrupt_manager.h b/src/core/hardware_interrupt_manager.h index 494db883a..5fa306ae0 100644 --- a/src/core/hardware_interrupt_manager.h +++ b/src/core/hardware_interrupt_manager.h @@ -4,6 +4,8 @@ #pragma once +#include <memory> + #include "common/common_types.h" namespace Core { @@ -25,7 +27,7 @@ public: private: Core::System& system; - Core::Timing::EventType* gpu_interrupt_event{}; + std::shared_ptr<Core::Timing::EventType> gpu_interrupt_event; }; } // namespace Core::Hardware diff --git a/src/core/hle/ipc_helpers.h b/src/core/hle/ipc_helpers.h index 5bb139483..0dc6a4a43 100644 --- a/src/core/hle/ipc_helpers.h +++ b/src/core/hle/ipc_helpers.h @@ -19,6 +19,7 @@ #include "core/hle/kernel/hle_ipc.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/server_session.h" +#include "core/hle/kernel/session.h" #include "core/hle/result.h" namespace IPC { @@ -139,10 +140,9 @@ public: context->AddDomainObject(std::move(iface)); } else { auto& kernel = Core::System::GetInstance().Kernel(); - auto [server, client] = - Kernel::ServerSession::CreateSessionPair(kernel, iface->GetServiceName()); - iface->ClientConnected(server); + auto [client, server] = Kernel::Session::Create(kernel, iface->GetServiceName()); context->AddMoveObject(std::move(client)); + iface->ClientConnected(std::move(server)); } } @@ -203,10 +203,10 @@ public: void PushRaw(const T& value); template <typename... O> - void PushMoveObjects(Kernel::SharedPtr<O>... pointers); + void PushMoveObjects(std::shared_ptr<O>... pointers); template <typename... O> - void PushCopyObjects(Kernel::SharedPtr<O>... pointers); + void PushCopyObjects(std::shared_ptr<O>... pointers); private: u32 normal_params_size{}; @@ -298,7 +298,7 @@ void ResponseBuilder::Push(const First& first_value, const Other&... other_value } template <typename... O> -inline void ResponseBuilder::PushCopyObjects(Kernel::SharedPtr<O>... pointers) { +inline void ResponseBuilder::PushCopyObjects(std::shared_ptr<O>... pointers) { auto objects = {pointers...}; for (auto& object : objects) { context->AddCopyObject(std::move(object)); @@ -306,7 +306,7 @@ inline void ResponseBuilder::PushCopyObjects(Kernel::SharedPtr<O>... pointers) { } template <typename... O> -inline void ResponseBuilder::PushMoveObjects(Kernel::SharedPtr<O>... pointers) { +inline void ResponseBuilder::PushMoveObjects(std::shared_ptr<O>... pointers) { auto objects = {pointers...}; for (auto& object : objects) { context->AddMoveObject(std::move(object)); @@ -357,10 +357,10 @@ public: T PopRaw(); template <typename T> - Kernel::SharedPtr<T> GetMoveObject(std::size_t index); + std::shared_ptr<T> GetMoveObject(std::size_t index); template <typename T> - Kernel::SharedPtr<T> GetCopyObject(std::size_t index); + std::shared_ptr<T> GetCopyObject(std::size_t index); template <class T> std::shared_ptr<T> PopIpcInterface() { @@ -465,12 +465,12 @@ void RequestParser::Pop(First& first_value, Other&... other_values) { } template <typename T> -Kernel::SharedPtr<T> RequestParser::GetMoveObject(std::size_t index) { +std::shared_ptr<T> RequestParser::GetMoveObject(std::size_t index) { return context->GetMoveObject<T>(index); } template <typename T> -Kernel::SharedPtr<T> RequestParser::GetCopyObject(std::size_t index) { +std::shared_ptr<T> RequestParser::GetCopyObject(std::size_t index) { return context->GetCopyObject<T>(index); } diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp index de0a9064e..db189c8e3 100644 --- a/src/core/hle/kernel/address_arbiter.cpp +++ b/src/core/hle/kernel/address_arbiter.cpp @@ -11,18 +11,16 @@ #include "core/core_cpu.h" #include "core/hle/kernel/address_arbiter.h" #include "core/hle/kernel/errors.h" -#include "core/hle/kernel/object.h" -#include "core/hle/kernel/process.h" #include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/thread.h" #include "core/hle/result.h" #include "core/memory.h" namespace Kernel { -namespace { + // Wake up num_to_wake (or all) threads in a vector. -void WakeThreads(const std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_to_wake) { - auto& system = Core::System::GetInstance(); +void AddressArbiter::WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads, + s32 num_to_wake) { // Only process up to 'target' threads, unless 'target' is <= 0, in which case process // them all. std::size_t last = waiting_threads.size(); @@ -34,12 +32,12 @@ void WakeThreads(const std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_ for (std::size_t i = 0; i < last; i++) { ASSERT(waiting_threads[i]->GetStatus() == ThreadStatus::WaitArb); waiting_threads[i]->SetWaitSynchronizationResult(RESULT_SUCCESS); + RemoveThread(waiting_threads[i]); waiting_threads[i]->SetArbiterWaitAddress(0); waiting_threads[i]->ResumeFromWait(); system.PrepareReschedule(waiting_threads[i]->GetProcessorID()); } } -} // Anonymous namespace AddressArbiter::AddressArbiter(Core::System& system) : system{system} {} AddressArbiter::~AddressArbiter() = default; @@ -59,35 +57,41 @@ ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 v } ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) { - const std::vector<SharedPtr<Thread>> waiting_threads = GetThreadsWaitingOnAddress(address); + const std::vector<std::shared_ptr<Thread>> waiting_threads = + GetThreadsWaitingOnAddress(address); WakeThreads(waiting_threads, num_to_wake); return RESULT_SUCCESS; } ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake) { + auto& memory = system.Memory(); + // Ensure that we can write to the address. - if (!Memory::IsValidVirtualAddress(address)) { + if (!memory.IsValidVirtualAddress(address)) { return ERR_INVALID_ADDRESS_STATE; } - if (static_cast<s32>(Memory::Read32(address)) != value) { + if (static_cast<s32>(memory.Read32(address)) != value) { return ERR_INVALID_STATE; } - Memory::Write32(address, static_cast<u32>(value + 1)); + memory.Write32(address, static_cast<u32>(value + 1)); return SignalToAddressOnly(address, num_to_wake); } ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake) { + auto& memory = system.Memory(); + // Ensure that we can write to the address. - if (!Memory::IsValidVirtualAddress(address)) { + if (!memory.IsValidVirtualAddress(address)) { return ERR_INVALID_ADDRESS_STATE; } // Get threads waiting on the address. - const std::vector<SharedPtr<Thread>> waiting_threads = GetThreadsWaitingOnAddress(address); + const std::vector<std::shared_ptr<Thread>> waiting_threads = + GetThreadsWaitingOnAddress(address); // Determine the modified value depending on the waiting count. s32 updated_value; @@ -107,11 +111,11 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a } } - if (static_cast<s32>(Memory::Read32(address)) != value) { + if (static_cast<s32>(memory.Read32(address)) != value) { return ERR_INVALID_STATE; } - Memory::Write32(address, static_cast<u32>(updated_value)); + memory.Write32(address, static_cast<u32>(updated_value)); WakeThreads(waiting_threads, num_to_wake); return RESULT_SUCCESS; } @@ -132,18 +136,20 @@ ResultCode AddressArbiter::WaitForAddress(VAddr address, ArbitrationType type, s ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout, bool should_decrement) { + auto& memory = system.Memory(); + // Ensure that we can read the address. - if (!Memory::IsValidVirtualAddress(address)) { + if (!memory.IsValidVirtualAddress(address)) { return ERR_INVALID_ADDRESS_STATE; } - const s32 cur_value = static_cast<s32>(Memory::Read32(address)); + const s32 cur_value = static_cast<s32>(memory.Read32(address)); if (cur_value >= value) { return ERR_INVALID_STATE; } if (should_decrement) { - Memory::Write32(address, static_cast<u32>(cur_value - 1)); + memory.Write32(address, static_cast<u32>(cur_value - 1)); } // Short-circuit without rescheduling, if timeout is zero. @@ -155,15 +161,19 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6 } ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) { + auto& memory = system.Memory(); + // Ensure that we can read the address. - if (!Memory::IsValidVirtualAddress(address)) { + if (!memory.IsValidVirtualAddress(address)) { return ERR_INVALID_ADDRESS_STATE; } + // Only wait for the address if equal. - if (static_cast<s32>(Memory::Read32(address)) != value) { + if (static_cast<s32>(memory.Read32(address)) != value) { return ERR_INVALID_STATE; } - // Short-circuit without rescheduling, if timeout is zero. + + // Short-circuit without rescheduling if timeout is zero. if (timeout == 0) { return RESULT_TIMEOUT; } @@ -172,36 +182,62 @@ ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 t } ResultCode AddressArbiter::WaitForAddressImpl(VAddr address, s64 timeout) { - SharedPtr<Thread> current_thread = system.CurrentScheduler().GetCurrentThread(); + Thread* current_thread = system.CurrentScheduler().GetCurrentThread(); current_thread->SetArbiterWaitAddress(address); + InsertThread(SharedFrom(current_thread)); current_thread->SetStatus(ThreadStatus::WaitArb); current_thread->InvalidateWakeupCallback(); - current_thread->WakeAfterDelay(timeout); system.PrepareReschedule(current_thread->GetProcessorID()); return RESULT_TIMEOUT; } -std::vector<SharedPtr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress(VAddr address) const { - - // Retrieve all threads that are waiting for this address. - std::vector<SharedPtr<Thread>> threads; - const auto& scheduler = system.GlobalScheduler(); - const auto& thread_list = scheduler.GetThreadList(); +void AddressArbiter::HandleWakeupThread(std::shared_ptr<Thread> thread) { + ASSERT(thread->GetStatus() == ThreadStatus::WaitArb); + RemoveThread(thread); + thread->SetArbiterWaitAddress(0); +} - for (const auto& thread : thread_list) { - if (thread->GetArbiterWaitAddress() == address) { - threads.push_back(thread); +void AddressArbiter::InsertThread(std::shared_ptr<Thread> thread) { + const VAddr arb_addr = thread->GetArbiterWaitAddress(); + std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr]; + auto it = thread_list.begin(); + while (it != thread_list.end()) { + const std::shared_ptr<Thread>& current_thread = *it; + if (current_thread->GetPriority() >= thread->GetPriority()) { + thread_list.insert(it, thread); + return; } + ++it; } + thread_list.push_back(std::move(thread)); +} - // Sort them by priority, such that the highest priority ones come first. - std::sort(threads.begin(), threads.end(), - [](const SharedPtr<Thread>& lhs, const SharedPtr<Thread>& rhs) { - return lhs->GetPriority() < rhs->GetPriority(); - }); +void AddressArbiter::RemoveThread(std::shared_ptr<Thread> thread) { + const VAddr arb_addr = thread->GetArbiterWaitAddress(); + std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr]; + auto it = thread_list.begin(); + while (it != thread_list.end()) { + const std::shared_ptr<Thread>& current_thread = *it; + if (current_thread.get() == thread.get()) { + thread_list.erase(it); + return; + } + ++it; + } + UNREACHABLE(); +} - return threads; +std::vector<std::shared_ptr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress(VAddr address) { + std::vector<std::shared_ptr<Thread>> result; + std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[address]; + auto it = thread_list.begin(); + while (it != thread_list.end()) { + std::shared_ptr<Thread> current_thread = *it; + result.push_back(std::move(current_thread)); + ++it; + } + return result; } } // namespace Kernel diff --git a/src/core/hle/kernel/address_arbiter.h b/src/core/hle/kernel/address_arbiter.h index ed0d0e69f..386983e54 100644 --- a/src/core/hle/kernel/address_arbiter.h +++ b/src/core/hle/kernel/address_arbiter.h @@ -4,10 +4,12 @@ #pragma once +#include <list> +#include <memory> +#include <unordered_map> #include <vector> #include "common/common_types.h" -#include "core/hle/kernel/object.h" union ResultCode; @@ -48,6 +50,9 @@ public: /// Waits on an address with a particular arbitration type. ResultCode WaitForAddress(VAddr address, ArbitrationType type, s32 value, s64 timeout_ns); + /// Removes a thread from the container and resets its address arbiter adress to 0 + void HandleWakeupThread(std::shared_ptr<Thread> thread); + private: /// Signals an address being waited on. ResultCode SignalToAddressOnly(VAddr address, s32 num_to_wake); @@ -71,8 +76,20 @@ private: // Waits on the given address with a timeout in nanoseconds ResultCode WaitForAddressImpl(VAddr address, s64 timeout); + /// Wake up num_to_wake (or all) threads in a vector. + void WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads, s32 num_to_wake); + + /// Insert a thread into the address arbiter container + void InsertThread(std::shared_ptr<Thread> thread); + + /// Removes a thread from the address arbiter container + void RemoveThread(std::shared_ptr<Thread> thread); + // Gets the threads waiting on an address. - std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address) const; + std::vector<std::shared_ptr<Thread>> GetThreadsWaitingOnAddress(VAddr address); + + /// List of threads waiting for a address arbiter + std::unordered_map<VAddr, std::list<std::shared_ptr<Thread>>> arb_threads; Core::System& system; }; diff --git a/src/core/hle/kernel/client_port.cpp b/src/core/hle/kernel/client_port.cpp index 744b1697d..5498fd313 100644 --- a/src/core/hle/kernel/client_port.cpp +++ b/src/core/hle/kernel/client_port.cpp @@ -8,39 +8,35 @@ #include "core/hle/kernel/hle_ipc.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/server_port.h" -#include "core/hle/kernel/server_session.h" +#include "core/hle/kernel/session.h" namespace Kernel { ClientPort::ClientPort(KernelCore& kernel) : Object{kernel} {} ClientPort::~ClientPort() = default; -SharedPtr<ServerPort> ClientPort::GetServerPort() const { +std::shared_ptr<ServerPort> ClientPort::GetServerPort() const { return server_port; } -ResultVal<SharedPtr<ClientSession>> ClientPort::Connect() { - // Note: Threads do not wait for the server endpoint to call - // AcceptSession before returning from this call. - +ResultVal<std::shared_ptr<ClientSession>> ClientPort::Connect() { if (active_sessions >= max_sessions) { return ERR_MAX_CONNECTIONS_REACHED; } active_sessions++; - // Create a new session pair, let the created sessions inherit the parent port's HLE handler. - auto [server, client] = ServerSession::CreateSessionPair(kernel, server_port->GetName(), this); + auto [client, server] = Kernel::Session::Create(kernel, name); if (server_port->HasHLEHandler()) { - server_port->GetHLEHandler()->ClientConnected(server); + server_port->GetHLEHandler()->ClientConnected(std::move(server)); } else { - server_port->AppendPendingSession(server); + server_port->AppendPendingSession(std::move(server)); } // Wake the threads waiting on the ServerPort server_port->WakeupAllWaitingThreads(); - return MakeResult(client); + return MakeResult(std::move(client)); } void ClientPort::ConnectionClosed() { diff --git a/src/core/hle/kernel/client_port.h b/src/core/hle/kernel/client_port.h index 4921ad4f0..9762bbf0d 100644 --- a/src/core/hle/kernel/client_port.h +++ b/src/core/hle/kernel/client_port.h @@ -4,7 +4,9 @@ #pragma once +#include <memory> #include <string> + #include "common/common_types.h" #include "core/hle/kernel/object.h" #include "core/hle/result.h" @@ -17,6 +19,9 @@ class ServerPort; class ClientPort final : public Object { public: + explicit ClientPort(KernelCore& kernel); + ~ClientPort() override; + friend class ServerPort; std::string GetTypeName() const override { return "ClientPort"; @@ -30,7 +35,7 @@ public: return HANDLE_TYPE; } - SharedPtr<ServerPort> GetServerPort() const; + std::shared_ptr<ServerPort> GetServerPort() const; /** * Creates a new Session pair, adds the created ServerSession to the associated ServerPort's @@ -38,7 +43,7 @@ public: * waiting on it to awake. * @returns ClientSession The client endpoint of the created Session pair, or error code. */ - ResultVal<SharedPtr<ClientSession>> Connect(); + ResultVal<std::shared_ptr<ClientSession>> Connect(); /** * Signifies that a previously active connection has been closed, @@ -47,10 +52,7 @@ public: void ConnectionClosed(); private: - explicit ClientPort(KernelCore& kernel); - ~ClientPort() override; - - SharedPtr<ServerPort> server_port; ///< ServerPort associated with this client port. + std::shared_ptr<ServerPort> server_port; ///< ServerPort associated with this client port. u32 max_sessions = 0; ///< Maximum number of simultaneous sessions the port can have u32 active_sessions = 0; ///< Number of currently open sessions to this port std::string name; ///< Name of client port (optional) diff --git a/src/core/hle/kernel/client_session.cpp b/src/core/hle/kernel/client_session.cpp index c17baa50a..4669a14ad 100644 --- a/src/core/hle/kernel/client_session.cpp +++ b/src/core/hle/kernel/client_session.cpp @@ -1,4 +1,4 @@ -// Copyright 2016 Citra Emulator Project +// Copyright 2019 yuzu emulator team // Licensed under GPLv2 or any later version // Refer to the license.txt file included. @@ -12,29 +12,44 @@ namespace Kernel { -ClientSession::ClientSession(KernelCore& kernel) : Object{kernel} {} +ClientSession::ClientSession(KernelCore& kernel) : WaitObject{kernel} {} + ClientSession::~ClientSession() { // This destructor will be called automatically when the last ClientSession handle is closed by // the emulated application. - - // A local reference to the ServerSession is necessary to guarantee it - // will be kept alive until after ClientDisconnected() returns. - SharedPtr<ServerSession> server = parent->server; - if (server) { - server->ClientDisconnected(); + if (parent->Server()) { + parent->Server()->ClientDisconnected(); } +} + +bool ClientSession::ShouldWait(const Thread* thread) const { + UNIMPLEMENTED(); + return {}; +} - parent->client = nullptr; +void ClientSession::Acquire(Thread* thread) { + UNIMPLEMENTED(); } -ResultCode ClientSession::SendSyncRequest(SharedPtr<Thread> thread) { +ResultVal<std::shared_ptr<ClientSession>> ClientSession::Create(KernelCore& kernel, + std::shared_ptr<Session> parent, + std::string name) { + std::shared_ptr<ClientSession> client_session{std::make_shared<ClientSession>(kernel)}; + + client_session->name = std::move(name); + client_session->parent = std::move(parent); + + return MakeResult(std::move(client_session)); +} + +ResultCode ClientSession::SendSyncRequest(std::shared_ptr<Thread> thread, Memory::Memory& memory) { // Keep ServerSession alive until we're done working with it. - SharedPtr<ServerSession> server = parent->server; - if (server == nullptr) + if (!parent->Server()) { return ERR_SESSION_CLOSED_BY_REMOTE; + } // Signal the server session that new data is available - return server->HandleSyncRequest(std::move(thread)); + return parent->Server()->HandleSyncRequest(std::move(thread), memory); } } // namespace Kernel diff --git a/src/core/hle/kernel/client_session.h b/src/core/hle/kernel/client_session.h index 09cdff588..b4289a9a8 100644 --- a/src/core/hle/kernel/client_session.h +++ b/src/core/hle/kernel/client_session.h @@ -1,4 +1,4 @@ -// Copyright 2016 Citra Emulator Project +// Copyright 2019 yuzu emulator team // Licensed under GPLv2 or any later version // Refer to the license.txt file included. @@ -6,20 +6,28 @@ #include <memory> #include <string> -#include "core/hle/kernel/object.h" + +#include "core/hle/kernel/wait_object.h" +#include "core/hle/result.h" union ResultCode; +namespace Memory { +class Memory; +} + namespace Kernel { class KernelCore; class Session; -class ServerSession; class Thread; -class ClientSession final : public Object { +class ClientSession final : public WaitObject { public: - friend class ServerSession; + explicit ClientSession(KernelCore& kernel); + ~ClientSession() override; + + friend class Session; std::string GetTypeName() const override { return "ClientSession"; @@ -34,11 +42,16 @@ public: return HANDLE_TYPE; } - ResultCode SendSyncRequest(SharedPtr<Thread> thread); + ResultCode SendSyncRequest(std::shared_ptr<Thread> thread, Memory::Memory& memory); + + bool ShouldWait(const Thread* thread) const override; + + void Acquire(Thread* thread) override; private: - explicit ClientSession(KernelCore& kernel); - ~ClientSession() override; + static ResultVal<std::shared_ptr<ClientSession>> Create(KernelCore& kernel, + std::shared_ptr<Session> parent, + std::string name = "Unknown"); /// The parent session, which links to the server endpoint. std::shared_ptr<Session> parent; diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp index 2cc5d536b..e441a27fc 100644 --- a/src/core/hle/kernel/handle_table.cpp +++ b/src/core/hle/kernel/handle_table.cpp @@ -44,7 +44,7 @@ ResultCode HandleTable::SetSize(s32 handle_table_size) { return RESULT_SUCCESS; } -ResultVal<Handle> HandleTable::Create(SharedPtr<Object> obj) { +ResultVal<Handle> HandleTable::Create(std::shared_ptr<Object> obj) { DEBUG_ASSERT(obj != nullptr); const u16 slot = next_free_slot; @@ -70,7 +70,7 @@ ResultVal<Handle> HandleTable::Create(SharedPtr<Object> obj) { } ResultVal<Handle> HandleTable::Duplicate(Handle handle) { - SharedPtr<Object> object = GetGeneric(handle); + std::shared_ptr<Object> object = GetGeneric(handle); if (object == nullptr) { LOG_ERROR(Kernel, "Tried to duplicate invalid handle: {:08X}", handle); return ERR_INVALID_HANDLE; @@ -99,11 +99,11 @@ bool HandleTable::IsValid(Handle handle) const { return slot < table_size && objects[slot] != nullptr && generations[slot] == generation; } -SharedPtr<Object> HandleTable::GetGeneric(Handle handle) const { +std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const { if (handle == CurrentThread) { - return GetCurrentThread(); + return SharedFrom(GetCurrentThread()); } else if (handle == CurrentProcess) { - return Core::System::GetInstance().CurrentProcess(); + return SharedFrom(Core::System::GetInstance().CurrentProcess()); } if (!IsValid(handle)) { diff --git a/src/core/hle/kernel/handle_table.h b/src/core/hle/kernel/handle_table.h index 44901391b..8029660ed 100644 --- a/src/core/hle/kernel/handle_table.h +++ b/src/core/hle/kernel/handle_table.h @@ -6,6 +6,8 @@ #include <array> #include <cstddef> +#include <memory> + #include "common/common_types.h" #include "core/hle/kernel/object.h" #include "core/hle/result.h" @@ -68,7 +70,7 @@ public: * @return The created Handle or one of the following errors: * - `ERR_HANDLE_TABLE_FULL`: the maximum number of handles has been exceeded. */ - ResultVal<Handle> Create(SharedPtr<Object> obj); + ResultVal<Handle> Create(std::shared_ptr<Object> obj); /** * Returns a new handle that points to the same object as the passed in handle. @@ -92,7 +94,7 @@ public: * Looks up a handle. * @return Pointer to the looked-up object, or `nullptr` if the handle is not valid. */ - SharedPtr<Object> GetGeneric(Handle handle) const; + std::shared_ptr<Object> GetGeneric(Handle handle) const; /** * Looks up a handle while verifying its type. @@ -100,7 +102,7 @@ public: * type differs from the requested one. */ template <class T> - SharedPtr<T> Get(Handle handle) const { + std::shared_ptr<T> Get(Handle handle) const { return DynamicObjectCast<T>(GetGeneric(handle)); } @@ -109,7 +111,7 @@ public: private: /// Stores the Object referenced by the handle or null if the slot is empty. - std::array<SharedPtr<Object>, MAX_COUNT> objects; + std::array<std::shared_ptr<Object>, MAX_COUNT> objects; /** * The value of `next_generation` when the handle was created, used to check for validity. For diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index a7b5849b0..2db28dcf0 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp @@ -32,23 +32,25 @@ SessionRequestHandler::SessionRequestHandler() = default; SessionRequestHandler::~SessionRequestHandler() = default; -void SessionRequestHandler::ClientConnected(SharedPtr<ServerSession> server_session) { +void SessionRequestHandler::ClientConnected(std::shared_ptr<ServerSession> server_session) { server_session->SetHleHandler(shared_from_this()); connected_sessions.push_back(std::move(server_session)); } -void SessionRequestHandler::ClientDisconnected(const SharedPtr<ServerSession>& server_session) { +void SessionRequestHandler::ClientDisconnected( + const std::shared_ptr<ServerSession>& server_session) { server_session->SetHleHandler(nullptr); boost::range::remove_erase(connected_sessions, server_session); } -SharedPtr<WritableEvent> HLERequestContext::SleepClientThread( +std::shared_ptr<WritableEvent> HLERequestContext::SleepClientThread( const std::string& reason, u64 timeout, WakeupCallback&& callback, - SharedPtr<WritableEvent> writable_event) { + std::shared_ptr<WritableEvent> writable_event) { // Put the client thread to sleep until the wait event is signaled or the timeout expires. - thread->SetWakeupCallback([context = *this, callback]( - ThreadWakeupReason reason, SharedPtr<Thread> thread, - SharedPtr<WaitObject> object, std::size_t index) mutable -> bool { + thread->SetWakeupCallback([context = *this, callback](ThreadWakeupReason reason, + std::shared_ptr<Thread> thread, + std::shared_ptr<WaitObject> object, + std::size_t index) mutable -> bool { ASSERT(thread->GetStatus() == ThreadStatus::WaitHLEEvent); callback(thread, context, reason); context.WriteToOutgoingCommandBuffer(*thread); @@ -72,11 +74,13 @@ SharedPtr<WritableEvent> HLERequestContext::SleepClientThread( thread->WakeAfterDelay(timeout); } + is_thread_waiting = true; + return writable_event; } -HLERequestContext::HLERequestContext(SharedPtr<Kernel::ServerSession> server_session, - SharedPtr<Thread> thread) +HLERequestContext::HLERequestContext(std::shared_ptr<Kernel::ServerSession> server_session, + std::shared_ptr<Thread> thread) : server_session(std::move(server_session)), thread(std::move(thread)) { cmd_buf[0] = 0; } @@ -212,10 +216,11 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const HandleTabl ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(Thread& thread) { auto& owner_process = *thread.GetOwnerProcess(); auto& handle_table = owner_process.GetHandleTable(); + auto& memory = Core::System::GetInstance().Memory(); std::array<u32, IPC::COMMAND_BUFFER_LENGTH> dst_cmdbuf; - Memory::ReadBlock(owner_process, thread.GetTLSAddress(), dst_cmdbuf.data(), - dst_cmdbuf.size() * sizeof(u32)); + memory.ReadBlock(owner_process, thread.GetTLSAddress(), dst_cmdbuf.data(), + dst_cmdbuf.size() * sizeof(u32)); // The header was already built in the internal command buffer. Attempt to parse it to verify // the integrity and then copy it over to the target command buffer. @@ -271,8 +276,8 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(Thread& thread) { } // Copy the translated command buffer back into the thread's command buffer area. - Memory::WriteBlock(owner_process, thread.GetTLSAddress(), dst_cmdbuf.data(), - dst_cmdbuf.size() * sizeof(u32)); + memory.WriteBlock(owner_process, thread.GetTLSAddress(), dst_cmdbuf.data(), + dst_cmdbuf.size() * sizeof(u32)); return RESULT_SUCCESS; } @@ -280,15 +285,14 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(Thread& thread) { std::vector<u8> HLERequestContext::ReadBuffer(int buffer_index) const { std::vector<u8> buffer; const bool is_buffer_a{BufferDescriptorA().size() && BufferDescriptorA()[buffer_index].Size()}; + auto& memory = Core::System::GetInstance().Memory(); if (is_buffer_a) { buffer.resize(BufferDescriptorA()[buffer_index].Size()); - Memory::ReadBlock(BufferDescriptorA()[buffer_index].Address(), buffer.data(), - buffer.size()); + memory.ReadBlock(BufferDescriptorA()[buffer_index].Address(), buffer.data(), buffer.size()); } else { buffer.resize(BufferDescriptorX()[buffer_index].Size()); - Memory::ReadBlock(BufferDescriptorX()[buffer_index].Address(), buffer.data(), - buffer.size()); + memory.ReadBlock(BufferDescriptorX()[buffer_index].Address(), buffer.data(), buffer.size()); } return buffer; @@ -309,10 +313,11 @@ std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size, size = buffer_size; // TODO(bunnei): This needs to be HW tested } + auto& memory = Core::System::GetInstance().Memory(); if (is_buffer_b) { - Memory::WriteBlock(BufferDescriptorB()[buffer_index].Address(), buffer, size); + memory.WriteBlock(BufferDescriptorB()[buffer_index].Address(), buffer, size); } else { - Memory::WriteBlock(BufferDescriptorC()[buffer_index].Address(), buffer, size); + memory.WriteBlock(BufferDescriptorC()[buffer_index].Address(), buffer, size); } return size; diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h index ccf5e56aa..050ad8fd7 100644 --- a/src/core/hle/kernel/hle_ipc.h +++ b/src/core/hle/kernel/hle_ipc.h @@ -5,6 +5,7 @@ #pragma once #include <array> +#include <functional> #include <memory> #include <optional> #include <string> @@ -60,20 +61,20 @@ public: * associated ServerSession alive for the duration of the connection. * @param server_session Owning pointer to the ServerSession associated with the connection. */ - void ClientConnected(SharedPtr<ServerSession> server_session); + void ClientConnected(std::shared_ptr<ServerSession> server_session); /** * Signals that a client has just disconnected from this HLE handler and releases the * associated ServerSession. * @param server_session ServerSession associated with the connection. */ - void ClientDisconnected(const SharedPtr<ServerSession>& server_session); + void ClientDisconnected(const std::shared_ptr<ServerSession>& server_session); protected: /// List of sessions that are connected to this handler. /// A ServerSession whose server endpoint is an HLE implementation is kept alive by this list /// for the duration of the connection. - std::vector<SharedPtr<ServerSession>> connected_sessions; + std::vector<std::shared_ptr<ServerSession>> connected_sessions; }; /** @@ -97,7 +98,8 @@ protected: */ class HLERequestContext { public: - explicit HLERequestContext(SharedPtr<ServerSession> session, SharedPtr<Thread> thread); + explicit HLERequestContext(std::shared_ptr<ServerSession> session, + std::shared_ptr<Thread> thread); ~HLERequestContext(); /// Returns a pointer to the IPC command buffer for this request. @@ -109,12 +111,12 @@ public: * Returns the session through which this request was made. This can be used as a map key to * access per-client data on services. */ - const SharedPtr<Kernel::ServerSession>& Session() const { + const std::shared_ptr<Kernel::ServerSession>& Session() const { return server_session; } - using WakeupCallback = std::function<void(SharedPtr<Thread> thread, HLERequestContext& context, - ThreadWakeupReason reason)>; + using WakeupCallback = std::function<void( + std::shared_ptr<Thread> thread, HLERequestContext& context, ThreadWakeupReason reason)>; /** * Puts the specified guest thread to sleep until the returned event is signaled or until the @@ -129,9 +131,9 @@ public: * created. * @returns Event that when signaled will resume the thread and call the callback function. */ - SharedPtr<WritableEvent> SleepClientThread(const std::string& reason, u64 timeout, - WakeupCallback&& callback, - SharedPtr<WritableEvent> writable_event = nullptr); + std::shared_ptr<WritableEvent> SleepClientThread( + const std::string& reason, u64 timeout, WakeupCallback&& callback, + std::shared_ptr<WritableEvent> writable_event = nullptr); /// Populates this context with data from the requesting process/thread. ResultCode PopulateFromIncomingCommandBuffer(const HandleTable& handle_table, @@ -209,20 +211,20 @@ public: std::size_t GetWriteBufferSize(int buffer_index = 0) const; template <typename T> - SharedPtr<T> GetCopyObject(std::size_t index) { + std::shared_ptr<T> GetCopyObject(std::size_t index) { return DynamicObjectCast<T>(copy_objects.at(index)); } template <typename T> - SharedPtr<T> GetMoveObject(std::size_t index) { + std::shared_ptr<T> GetMoveObject(std::size_t index) { return DynamicObjectCast<T>(move_objects.at(index)); } - void AddMoveObject(SharedPtr<Object> object) { + void AddMoveObject(std::shared_ptr<Object> object) { move_objects.emplace_back(std::move(object)); } - void AddCopyObject(SharedPtr<Object> object) { + void AddCopyObject(std::shared_ptr<Object> object) { copy_objects.emplace_back(std::move(object)); } @@ -262,15 +264,27 @@ public: std::string Description() const; + Thread& GetThread() { + return *thread; + } + + const Thread& GetThread() const { + return *thread; + } + + bool IsThreadWaiting() const { + return is_thread_waiting; + } + private: void ParseCommandBuffer(const HandleTable& handle_table, u32_le* src_cmdbuf, bool incoming); std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf; - SharedPtr<Kernel::ServerSession> server_session; - SharedPtr<Thread> thread; + std::shared_ptr<Kernel::ServerSession> server_session; + std::shared_ptr<Thread> thread; // TODO(yuriks): Check common usage of this and optimize size accordingly - boost::container::small_vector<SharedPtr<Object>, 8> move_objects; - boost::container::small_vector<SharedPtr<Object>, 8> copy_objects; + boost::container::small_vector<std::shared_ptr<Object>, 8> move_objects; + boost::container::small_vector<std::shared_ptr<Object>, 8> copy_objects; boost::container::small_vector<std::shared_ptr<SessionRequestHandler>, 8> domain_objects; std::optional<IPC::CommandHeader> command_header; @@ -288,6 +302,7 @@ private: u32_le command{}; std::vector<std::shared_ptr<SessionRequestHandler>> domain_request_handlers; + bool is_thread_waiting{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index f94ac150d..1d0783bd3 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -13,7 +13,6 @@ #include "core/core.h" #include "core/core_timing.h" #include "core/core_timing_util.h" -#include "core/hle/kernel/address_arbiter.h" #include "core/hle/kernel/client_port.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" @@ -40,7 +39,7 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_ // Lock the global kernel mutex when we enter the kernel HLE. std::lock_guard lock{HLE::g_hle_lock}; - SharedPtr<Thread> thread = + std::shared_ptr<Thread> thread = system.Kernel().RetrieveThreadFromWakeupCallbackHandleTable(proper_handle); if (thread == nullptr) { LOG_CRITICAL(Kernel, "Callback fired for invalid thread {:08X}", proper_handle); @@ -53,7 +52,7 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_ thread->GetStatus() == ThreadStatus::WaitHLEEvent) { // Remove the thread from each of its waiting objects' waitlists for (const auto& object : thread->GetWaitObjects()) { - object->RemoveWaitingThread(thread.get()); + object->RemoveWaitingThread(thread); } thread->ClearWaitObjects(); @@ -64,8 +63,11 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_ } else if (thread->GetStatus() == ThreadStatus::WaitMutex || thread->GetStatus() == ThreadStatus::WaitCondVar) { thread->SetMutexWaitAddress(0); - thread->SetCondVarWaitAddress(0); thread->SetWaitHandle(0); + if (thread->GetStatus() == ThreadStatus::WaitCondVar) { + thread->GetOwnerProcess()->RemoveConditionVariableThread(thread); + thread->SetCondVarWaitAddress(0); + } auto* const lock_owner = thread->GetLockOwner(); // Threads waking up by timeout from WaitProcessWideKey do not perform priority inheritance @@ -76,9 +78,9 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_ } } - if (thread->GetArbiterWaitAddress() != 0) { - ASSERT(thread->GetStatus() == ThreadStatus::WaitArb); - thread->SetArbiterWaitAddress(0); + if (thread->GetStatus() == ThreadStatus::WaitArb) { + auto& address_arbiter = thread->GetOwnerProcess()->GetAddressArbiter(); + address_arbiter.HandleWakeupThread(thread); } if (resume) { @@ -136,12 +138,12 @@ struct KernelCore::Impl { void InitializeThreads() { thread_wakeup_event_type = - system.CoreTiming().RegisterEvent("ThreadWakeupCallback", ThreadWakeupCallback); + Core::Timing::CreateEvent("ThreadWakeupCallback", ThreadWakeupCallback); } void InitializePreemption() { - preemption_event = system.CoreTiming().RegisterEvent( - "PreemptionCallback", [this](u64 userdata, s64 cycles_late) { + preemption_event = + Core::Timing::CreateEvent("PreemptionCallback", [this](u64 userdata, s64 cycles_late) { global_scheduler.PreemptThreads(); s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10)); system.CoreTiming().ScheduleEvent(time_interval, preemption_event); @@ -151,20 +153,31 @@ struct KernelCore::Impl { system.CoreTiming().ScheduleEvent(time_interval, preemption_event); } + void MakeCurrentProcess(Process* process) { + current_process = process; + + if (process == nullptr) { + return; + } + + system.Memory().SetCurrentPageTable(*process); + } + std::atomic<u32> next_object_id{0}; std::atomic<u64> next_kernel_process_id{Process::InitialKIPIDMin}; std::atomic<u64> next_user_process_id{Process::ProcessIDMin}; std::atomic<u64> next_thread_id{1}; // Lists all processes that exist in the current session. - std::vector<SharedPtr<Process>> process_list; + std::vector<std::shared_ptr<Process>> process_list; Process* current_process = nullptr; Kernel::GlobalScheduler global_scheduler; - SharedPtr<ResourceLimit> system_resource_limit; + std::shared_ptr<ResourceLimit> system_resource_limit; + + std::shared_ptr<Core::Timing::EventType> thread_wakeup_event_type; + std::shared_ptr<Core::Timing::EventType> preemption_event; - Core::Timing::EventType* thread_wakeup_event_type = nullptr; - Core::Timing::EventType* preemption_event = nullptr; // TODO(yuriks): This can be removed if Thread objects are explicitly pooled in the future, // allowing us to simply use a pool index or similar. Kernel::HandleTable thread_wakeup_callback_handle_table; @@ -190,26 +203,21 @@ void KernelCore::Shutdown() { impl->Shutdown(); } -SharedPtr<ResourceLimit> KernelCore::GetSystemResourceLimit() const { +std::shared_ptr<ResourceLimit> KernelCore::GetSystemResourceLimit() const { return impl->system_resource_limit; } -SharedPtr<Thread> KernelCore::RetrieveThreadFromWakeupCallbackHandleTable(Handle handle) const { +std::shared_ptr<Thread> KernelCore::RetrieveThreadFromWakeupCallbackHandleTable( + Handle handle) const { return impl->thread_wakeup_callback_handle_table.Get<Thread>(handle); } -void KernelCore::AppendNewProcess(SharedPtr<Process> process) { +void KernelCore::AppendNewProcess(std::shared_ptr<Process> process) { impl->process_list.push_back(std::move(process)); } void KernelCore::MakeCurrentProcess(Process* process) { - impl->current_process = process; - - if (process == nullptr) { - return; - } - - Memory::SetCurrentPageTable(*process); + impl->MakeCurrentProcess(process); } Process* KernelCore::CurrentProcess() { @@ -220,7 +228,7 @@ const Process* KernelCore::CurrentProcess() const { return impl->current_process; } -const std::vector<SharedPtr<Process>>& KernelCore::GetProcessList() const { +const std::vector<std::shared_ptr<Process>>& KernelCore::GetProcessList() const { return impl->process_list; } @@ -232,7 +240,7 @@ const Kernel::GlobalScheduler& KernelCore::GlobalScheduler() const { return impl->global_scheduler; } -void KernelCore::AddNamedPort(std::string name, SharedPtr<ClientPort> port) { +void KernelCore::AddNamedPort(std::string name, std::shared_ptr<ClientPort> port) { impl->named_ports.emplace(std::move(name), std::move(port)); } @@ -265,7 +273,7 @@ u64 KernelCore::CreateNewUserProcessID() { return impl->next_user_process_id++; } -Core::Timing::EventType* KernelCore::ThreadWakeupCallbackEventType() const { +const std::shared_ptr<Core::Timing::EventType>& KernelCore::ThreadWakeupCallbackEventType() const { return impl->thread_wakeup_event_type; } diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index c4397fc77..3bf0068ed 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -4,8 +4,10 @@ #pragma once +#include <memory> #include <string> #include <unordered_map> +#include <vector> #include "core/hle/kernel/object.h" namespace Core { @@ -30,7 +32,7 @@ class Thread; /// Represents a single instance of the kernel. class KernelCore { private: - using NamedPortTable = std::unordered_map<std::string, SharedPtr<ClientPort>>; + using NamedPortTable = std::unordered_map<std::string, std::shared_ptr<ClientPort>>; public: /// Constructs an instance of the kernel using the given System @@ -56,13 +58,13 @@ public: void Shutdown(); /// Retrieves a shared pointer to the system resource limit instance. - SharedPtr<ResourceLimit> GetSystemResourceLimit() const; + std::shared_ptr<ResourceLimit> GetSystemResourceLimit() const; /// Retrieves a shared pointer to a Thread instance within the thread wakeup handle table. - SharedPtr<Thread> RetrieveThreadFromWakeupCallbackHandleTable(Handle handle) const; + std::shared_ptr<Thread> RetrieveThreadFromWakeupCallbackHandleTable(Handle handle) const; /// Adds the given shared pointer to an internal list of active processes. - void AppendNewProcess(SharedPtr<Process> process); + void AppendNewProcess(std::shared_ptr<Process> process); /// Makes the given process the new current process. void MakeCurrentProcess(Process* process); @@ -74,7 +76,7 @@ public: const Process* CurrentProcess() const; /// Retrieves the list of processes. - const std::vector<SharedPtr<Process>>& GetProcessList() const; + const std::vector<std::shared_ptr<Process>>& GetProcessList() const; /// Gets the sole instance of the global scheduler Kernel::GlobalScheduler& GlobalScheduler(); @@ -83,7 +85,7 @@ public: const Kernel::GlobalScheduler& GlobalScheduler() const; /// Adds a port to the named port table - void AddNamedPort(std::string name, SharedPtr<ClientPort> port); + void AddNamedPort(std::string name, std::shared_ptr<ClientPort> port); /// Finds a port within the named port table with the given name. NamedPortTable::iterator FindNamedPort(const std::string& name); @@ -112,7 +114,7 @@ private: u64 CreateNewThreadID(); /// Retrieves the event type used for thread wakeup callbacks. - Core::Timing::EventType* ThreadWakeupCallbackEventType() const; + const std::shared_ptr<Core::Timing::EventType>& ThreadWakeupCallbackEventType() const; /// Provides a reference to the thread wakeup callback handle table. Kernel::HandleTable& ThreadWakeupCallbackHandleTable(); diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp index 663d0f4b6..eff4e45b0 100644 --- a/src/core/hle/kernel/mutex.cpp +++ b/src/core/hle/kernel/mutex.cpp @@ -2,6 +2,7 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include <memory> #include <utility> #include <vector> @@ -22,10 +23,10 @@ namespace Kernel { /// Returns the number of threads that are waiting for a mutex, and the highest priority one among /// those. -static std::pair<SharedPtr<Thread>, u32> GetHighestPriorityMutexWaitingThread( - const SharedPtr<Thread>& current_thread, VAddr mutex_addr) { +static std::pair<std::shared_ptr<Thread>, u32> GetHighestPriorityMutexWaitingThread( + const std::shared_ptr<Thread>& current_thread, VAddr mutex_addr) { - SharedPtr<Thread> highest_priority_thread; + std::shared_ptr<Thread> highest_priority_thread; u32 num_waiters = 0; for (const auto& thread : current_thread->GetMutexWaitingThreads()) { @@ -45,14 +46,14 @@ static std::pair<SharedPtr<Thread>, u32> GetHighestPriorityMutexWaitingThread( } /// Update the mutex owner field of all threads waiting on the mutex to point to the new owner. -static void TransferMutexOwnership(VAddr mutex_addr, SharedPtr<Thread> current_thread, - SharedPtr<Thread> new_owner) { +static void TransferMutexOwnership(VAddr mutex_addr, std::shared_ptr<Thread> current_thread, + std::shared_ptr<Thread> new_owner) { const auto threads = current_thread->GetMutexWaitingThreads(); for (const auto& thread : threads) { if (thread->GetMutexWaitAddress() != mutex_addr) continue; - ASSERT(thread->GetLockOwner() == current_thread); + ASSERT(thread->GetLockOwner() == current_thread.get()); current_thread->RemoveMutexWaiter(thread); if (new_owner != thread) new_owner->AddMutexWaiter(thread); @@ -70,15 +71,16 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle, } const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - Thread* const current_thread = system.CurrentScheduler().GetCurrentThread(); - SharedPtr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle); - SharedPtr<Thread> requesting_thread = handle_table.Get<Thread>(requesting_thread_handle); + std::shared_ptr<Thread> current_thread = + SharedFrom(system.CurrentScheduler().GetCurrentThread()); + std::shared_ptr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle); + std::shared_ptr<Thread> requesting_thread = handle_table.Get<Thread>(requesting_thread_handle); // TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of another // thread. ASSERT(requesting_thread == current_thread); - const u32 addr_value = Memory::Read32(address); + const u32 addr_value = system.Memory().Read32(address); // If the mutex isn't being held, just return success. if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) { @@ -110,12 +112,13 @@ ResultCode Mutex::Release(VAddr address) { return ERR_INVALID_ADDRESS; } - auto* const current_thread = system.CurrentScheduler().GetCurrentThread(); + std::shared_ptr<Thread> current_thread = + SharedFrom(system.CurrentScheduler().GetCurrentThread()); auto [thread, num_waiters] = GetHighestPriorityMutexWaitingThread(current_thread, address); // There are no more threads waiting for the mutex, release it completely. if (thread == nullptr) { - Memory::Write32(address, 0); + system.Memory().Write32(address, 0); return RESULT_SUCCESS; } @@ -130,7 +133,7 @@ ResultCode Mutex::Release(VAddr address) { } // Grant the mutex to the next waiting thread and resume it. - Memory::Write32(address, mutex_value); + system.Memory().Write32(address, mutex_value); ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); thread->ResumeFromWait(); diff --git a/src/core/hle/kernel/object.cpp b/src/core/hle/kernel/object.cpp index 10431e94c..2c571792b 100644 --- a/src/core/hle/kernel/object.cpp +++ b/src/core/hle/kernel/object.cpp @@ -27,6 +27,7 @@ bool Object::IsWaitable() const { case HandleType::ResourceLimit: case HandleType::ClientPort: case HandleType::ClientSession: + case HandleType::Session: return false; } diff --git a/src/core/hle/kernel/object.h b/src/core/hle/kernel/object.h index a6faeb83b..e3391e2af 100644 --- a/src/core/hle/kernel/object.h +++ b/src/core/hle/kernel/object.h @@ -5,10 +5,9 @@ #pragma once #include <atomic> +#include <memory> #include <string> -#include <boost/smart_ptr/intrusive_ptr.hpp> - #include "common/common_types.h" namespace Kernel { @@ -30,9 +29,10 @@ enum class HandleType : u32 { ServerPort, ClientSession, ServerSession, + Session, }; -class Object : NonCopyable { +class Object : NonCopyable, public std::enable_shared_from_this<Object> { public: explicit Object(KernelCore& kernel); virtual ~Object(); @@ -61,35 +61,24 @@ protected: KernelCore& kernel; private: - friend void intrusive_ptr_add_ref(Object*); - friend void intrusive_ptr_release(Object*); - - std::atomic<u32> ref_count{0}; std::atomic<u32> object_id{0}; }; -// Special functions used by boost::instrusive_ptr to do automatic ref-counting -inline void intrusive_ptr_add_ref(Object* object) { - object->ref_count.fetch_add(1, std::memory_order_relaxed); -} - -inline void intrusive_ptr_release(Object* object) { - if (object->ref_count.fetch_sub(1, std::memory_order_acq_rel) == 1) { - delete object; - } -} - template <typename T> -using SharedPtr = boost::intrusive_ptr<T>; +std::shared_ptr<T> SharedFrom(T* raw) { + if (raw == nullptr) + return nullptr; + return std::static_pointer_cast<T>(raw->shared_from_this()); +} /** * Attempts to downcast the given Object pointer to a pointer to T. * @return Derived pointer to the object, or `nullptr` if `object` isn't of type T. */ template <typename T> -inline SharedPtr<T> DynamicObjectCast(SharedPtr<Object> object) { +inline std::shared_ptr<T> DynamicObjectCast(std::shared_ptr<Object> object) { if (object != nullptr && object->GetHandleType() == T::HANDLE_TYPE) { - return boost::static_pointer_cast<T>(object); + return std::static_pointer_cast<T>(object); } return nullptr; } diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index 12a900bcc..12ea4ebe3 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -38,7 +38,7 @@ void SetupMainThread(Process& owner_process, KernelCore& kernel, u32 priority) { auto thread_res = Thread::Create(kernel, "main", entry_point, priority, 0, owner_process.GetIdealCore(), stack_top, owner_process); - SharedPtr<Thread> thread = std::move(thread_res).Unwrap(); + std::shared_ptr<Thread> thread = std::move(thread_res).Unwrap(); // Register 1 must be a handle to the main thread const Handle thread_handle = owner_process.GetHandleTable().Create(thread).Unwrap(); @@ -100,10 +100,10 @@ private: std::bitset<num_slot_entries> is_slot_used; }; -SharedPtr<Process> Process::Create(Core::System& system, std::string name, ProcessType type) { +std::shared_ptr<Process> Process::Create(Core::System& system, std::string name, ProcessType type) { auto& kernel = system.Kernel(); - SharedPtr<Process> process(new Process(system)); + std::shared_ptr<Process> process = std::make_shared<Process>(system); process->name = std::move(name); process->resource_limit = kernel.GetSystemResourceLimit(); process->status = ProcessStatus::Created; @@ -121,7 +121,7 @@ SharedPtr<Process> Process::Create(Core::System& system, std::string name, Proce return process; } -SharedPtr<ResourceLimit> Process::GetResourceLimit() const { +std::shared_ptr<ResourceLimit> Process::GetResourceLimit() const { return resource_limit; } @@ -142,6 +142,49 @@ u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const { return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage(); } +void Process::InsertConditionVariableThread(std::shared_ptr<Thread> thread) { + VAddr cond_var_addr = thread->GetCondVarWaitAddress(); + std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr]; + auto it = thread_list.begin(); + while (it != thread_list.end()) { + const std::shared_ptr<Thread> current_thread = *it; + if (current_thread->GetPriority() > thread->GetPriority()) { + thread_list.insert(it, thread); + return; + } + ++it; + } + thread_list.push_back(thread); +} + +void Process::RemoveConditionVariableThread(std::shared_ptr<Thread> thread) { + VAddr cond_var_addr = thread->GetCondVarWaitAddress(); + std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr]; + auto it = thread_list.begin(); + while (it != thread_list.end()) { + const std::shared_ptr<Thread> current_thread = *it; + if (current_thread.get() == thread.get()) { + thread_list.erase(it); + return; + } + ++it; + } + UNREACHABLE(); +} + +std::vector<std::shared_ptr<Thread>> Process::GetConditionVariableThreads( + const VAddr cond_var_addr) { + std::vector<std::shared_ptr<Thread>> result{}; + std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr]; + auto it = thread_list.begin(); + while (it != thread_list.end()) { + std::shared_ptr<Thread> current_thread = *it; + result.push_back(current_thread); + ++it; + } + return result; +} + void Process::RegisterThread(const Thread* thread) { thread_list.push_back(thread); } @@ -197,12 +240,12 @@ void Process::Run(s32 main_thread_priority, u64 stack_size) { void Process::PrepareForTermination() { ChangeStatus(ProcessStatus::Exiting); - const auto stop_threads = [this](const std::vector<SharedPtr<Thread>>& thread_list) { + const auto stop_threads = [this](const std::vector<std::shared_ptr<Thread>>& thread_list) { for (auto& thread : thread_list) { if (thread->GetOwnerProcess() != this) continue; - if (thread == system.CurrentScheduler().GetCurrentThread()) + if (thread.get() == system.CurrentScheduler().GetCurrentThread()) continue; // TODO(Subv): When are the other running/ready threads terminated? diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h index c2df451f3..3483fa19d 100644 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/process.h @@ -8,6 +8,7 @@ #include <cstddef> #include <list> #include <string> +#include <unordered_map> #include <vector> #include "common/common_types.h" #include "core/hle/kernel/address_arbiter.h" @@ -61,6 +62,9 @@ enum class ProcessStatus { class Process final : public WaitObject { public: + explicit Process(Core::System& system); + ~Process() override; + enum : u64 { /// Lowest allowed process ID for a kernel initial process. InitialKIPIDMin = 1, @@ -81,7 +85,8 @@ public: static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4; - static SharedPtr<Process> Create(Core::System& system, std::string name, ProcessType type); + static std::shared_ptr<Process> Create(Core::System& system, std::string name, + ProcessType type); std::string GetTypeName() const override { return "Process"; @@ -156,7 +161,7 @@ public: } /// Gets the resource limit descriptor for this process - SharedPtr<ResourceLimit> GetResourceLimit() const; + std::shared_ptr<ResourceLimit> GetResourceLimit() const; /// Gets the ideal CPU core ID for this process u8 GetIdealCore() const { @@ -232,6 +237,15 @@ public: return thread_list; } + /// Insert a thread into the condition variable wait container + void InsertConditionVariableThread(std::shared_ptr<Thread> thread); + + /// Remove a thread from the condition variable wait container + void RemoveConditionVariableThread(std::shared_ptr<Thread> thread); + + /// Obtain all condition variable threads waiting for some address + std::vector<std::shared_ptr<Thread>> GetConditionVariableThreads(VAddr cond_var_addr); + /// Registers a thread as being created under this process, /// adding it to this process' thread list. void RegisterThread(const Thread* thread); @@ -287,9 +301,6 @@ public: void FreeTLSRegion(VAddr tls_address); private: - explicit Process(Core::System& system); - ~Process() override; - /// Checks if the specified thread should wait until this process is available. bool ShouldWait(const Thread* thread) const override; @@ -328,7 +339,7 @@ private: u32 system_resource_size = 0; /// Resource limit descriptor for this process - SharedPtr<ResourceLimit> resource_limit; + std::shared_ptr<ResourceLimit> resource_limit; /// The ideal CPU core for this process, threads are scheduled on this core by default. u8 ideal_core = 0; @@ -375,6 +386,9 @@ private: /// List of threads that are running with this process as their owner. std::list<const Thread*> thread_list; + /// List of threads waiting for a condition variable + std::unordered_map<VAddr, std::list<std::shared_ptr<Thread>>> cond_var_threads; + /// System context Core::System& system; diff --git a/src/core/hle/kernel/resource_limit.cpp b/src/core/hle/kernel/resource_limit.cpp index 173f69915..b53423462 100644 --- a/src/core/hle/kernel/resource_limit.cpp +++ b/src/core/hle/kernel/resource_limit.cpp @@ -16,8 +16,8 @@ constexpr std::size_t ResourceTypeToIndex(ResourceType type) { ResourceLimit::ResourceLimit(KernelCore& kernel) : Object{kernel} {} ResourceLimit::~ResourceLimit() = default; -SharedPtr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel) { - return new ResourceLimit(kernel); +std::shared_ptr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel) { + return std::make_shared<ResourceLimit>(kernel); } s64 ResourceLimit::GetCurrentResourceValue(ResourceType resource) const { diff --git a/src/core/hle/kernel/resource_limit.h b/src/core/hle/kernel/resource_limit.h index 2613a6bb5..53b89e621 100644 --- a/src/core/hle/kernel/resource_limit.h +++ b/src/core/hle/kernel/resource_limit.h @@ -5,6 +5,8 @@ #pragma once #include <array> +#include <memory> + #include "common/common_types.h" #include "core/hle/kernel/object.h" @@ -31,8 +33,11 @@ constexpr bool IsValidResourceType(ResourceType type) { class ResourceLimit final : public Object { public: + explicit ResourceLimit(KernelCore& kernel); + ~ResourceLimit() override; + /// Creates a resource limit object. - static SharedPtr<ResourceLimit> Create(KernelCore& kernel); + static std::shared_ptr<ResourceLimit> Create(KernelCore& kernel); std::string GetTypeName() const override { return "ResourceLimit"; @@ -76,9 +81,6 @@ public: ResultCode SetLimitValue(ResourceType resource, s64 value); private: - explicit ResourceLimit(KernelCore& kernel); - ~ResourceLimit() override; - // TODO(Subv): Increment resource limit current values in their respective Kernel::T::Create // functions // diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index 0e2dbf13e..d36fcd7d9 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp @@ -26,27 +26,27 @@ GlobalScheduler::GlobalScheduler(Core::System& system) : system{system} {} GlobalScheduler::~GlobalScheduler() = default; -void GlobalScheduler::AddThread(SharedPtr<Thread> thread) { +void GlobalScheduler::AddThread(std::shared_ptr<Thread> thread) { thread_list.push_back(std::move(thread)); } -void GlobalScheduler::RemoveThread(const Thread* thread) { +void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) { thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), thread_list.end()); } -void GlobalScheduler::UnloadThread(s32 core) { +void GlobalScheduler::UnloadThread(std::size_t core) { Scheduler& sched = system.Scheduler(core); sched.UnloadThread(); } -void GlobalScheduler::SelectThread(u32 core) { +void GlobalScheduler::SelectThread(std::size_t core) { const auto update_thread = [](Thread* thread, Scheduler& sched) { - if (thread != sched.selected_thread) { + if (thread != sched.selected_thread.get()) { if (thread == nullptr) { ++sched.idle_selection_count; } - sched.selected_thread = thread; + sched.selected_thread = SharedFrom(thread); } sched.is_context_switch_pending = sched.selected_thread != sched.current_thread; std::atomic_thread_fence(std::memory_order_seq_cst); @@ -77,9 +77,9 @@ void GlobalScheduler::SelectThread(u32 core) { // if we got a suggested thread, select it, else do a second pass. if (winner && winner->GetPriority() > 2) { if (winner->IsRunning()) { - UnloadThread(winner->GetProcessorID()); + UnloadThread(static_cast<u32>(winner->GetProcessorID())); } - TransferToCore(winner->GetPriority(), core, winner); + TransferToCore(winner->GetPriority(), static_cast<s32>(core), winner); update_thread(winner, sched); return; } @@ -91,9 +91,9 @@ void GlobalScheduler::SelectThread(u32 core) { Thread* thread_on_core = scheduled_queue[src_core].front(); Thread* to_change = *it; if (thread_on_core->IsRunning() || to_change->IsRunning()) { - UnloadThread(src_core); + UnloadThread(static_cast<u32>(src_core)); } - TransferToCore(thread_on_core->GetPriority(), core, thread_on_core); + TransferToCore(thread_on_core->GetPriority(), static_cast<s32>(core), thread_on_core); current_thread = thread_on_core; break; } @@ -154,9 +154,9 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { if (winner != nullptr) { if (winner != yielding_thread) { if (winner->IsRunning()) { - UnloadThread(winner->GetProcessorID()); + UnloadThread(static_cast<u32>(winner->GetProcessorID())); } - TransferToCore(winner->GetPriority(), core_id, winner); + TransferToCore(winner->GetPriority(), s32(core_id), winner); } } else { winner = next_thread; @@ -196,9 +196,9 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread if (winner != nullptr) { if (winner != yielding_thread) { if (winner->IsRunning()) { - UnloadThread(winner->GetProcessorID()); + UnloadThread(static_cast<u32>(winner->GetProcessorID())); } - TransferToCore(winner->GetPriority(), core_id, winner); + TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner); } } else { winner = yielding_thread; @@ -248,7 +248,7 @@ void GlobalScheduler::PreemptThreads() { if (winner != nullptr) { if (winner->IsRunning()) { - UnloadThread(winner->GetProcessorID()); + UnloadThread(static_cast<u32>(winner->GetProcessorID())); } TransferToCore(winner->GetPriority(), s32(core_id), winner); current_thread = @@ -281,7 +281,7 @@ void GlobalScheduler::PreemptThreads() { if (winner != nullptr) { if (winner->IsRunning()) { - UnloadThread(winner->GetProcessorID()); + UnloadThread(static_cast<u32>(winner->GetProcessorID())); } TransferToCore(winner->GetPriority(), s32(core_id), winner); current_thread = winner; @@ -292,30 +292,30 @@ void GlobalScheduler::PreemptThreads() { } } -void GlobalScheduler::Suggest(u32 priority, u32 core, Thread* thread) { +void GlobalScheduler::Suggest(u32 priority, std::size_t core, Thread* thread) { suggested_queue[core].add(thread, priority); } -void GlobalScheduler::Unsuggest(u32 priority, u32 core, Thread* thread) { +void GlobalScheduler::Unsuggest(u32 priority, std::size_t core, Thread* thread) { suggested_queue[core].remove(thread, priority); } -void GlobalScheduler::Schedule(u32 priority, u32 core, Thread* thread) { +void GlobalScheduler::Schedule(u32 priority, std::size_t core, Thread* thread) { ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); scheduled_queue[core].add(thread, priority); } -void GlobalScheduler::SchedulePrepend(u32 priority, u32 core, Thread* thread) { +void GlobalScheduler::SchedulePrepend(u32 priority, std::size_t core, Thread* thread) { ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); scheduled_queue[core].add(thread, priority, false); } -void GlobalScheduler::Reschedule(u32 priority, u32 core, Thread* thread) { +void GlobalScheduler::Reschedule(u32 priority, std::size_t core, Thread* thread) { scheduled_queue[core].remove(thread, priority); scheduled_queue[core].add(thread, priority); } -void GlobalScheduler::Unschedule(u32 priority, u32 core, Thread* thread) { +void GlobalScheduler::Unschedule(u32 priority, std::size_t core, Thread* thread) { scheduled_queue[core].remove(thread, priority); } @@ -327,14 +327,14 @@ void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* } thread->SetProcessorID(destination_core); if (source_core >= 0) { - Unschedule(priority, source_core, thread); + Unschedule(priority, static_cast<u32>(source_core), thread); } if (destination_core >= 0) { - Unsuggest(priority, destination_core, thread); - Schedule(priority, destination_core, thread); + Unsuggest(priority, static_cast<u32>(destination_core), thread); + Schedule(priority, static_cast<u32>(destination_core), thread); } if (source_core >= 0) { - Suggest(priority, source_core, thread); + Suggest(priority, static_cast<u32>(source_core), thread); } } @@ -357,7 +357,7 @@ void GlobalScheduler::Shutdown() { thread_list.clear(); } -Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, u32 core_id) +Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, std::size_t core_id) : system(system), cpu_core(cpu_core), core_id(core_id) {} Scheduler::~Scheduler() = default; @@ -446,7 +446,7 @@ void Scheduler::SwitchContext() { // Cancel any outstanding wakeup events for this thread new_thread->CancelWakeupTimer(); - current_thread = new_thread; + current_thread = SharedFrom(new_thread); new_thread->SetStatus(ThreadStatus::Running); new_thread->SetIsRunning(true); @@ -458,7 +458,6 @@ void Scheduler::SwitchContext() { cpu_core.LoadContext(new_thread->GetContext()); cpu_core.SetTlsAddress(new_thread->GetTLSAddress()); cpu_core.SetTPIDR_EL0(new_thread->GetTPIDR_EL0()); - cpu_core.ClearExclusiveState(); } else { current_thread = nullptr; // Note: We do not reset the current process and current page table when idling because diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h index f2d6311b8..14b77960a 100644 --- a/src/core/hle/kernel/scheduler.h +++ b/src/core/hle/kernel/scheduler.h @@ -4,11 +4,12 @@ #pragma once -#include <mutex> +#include <atomic> +#include <memory> #include <vector> + #include "common/common_types.h" #include "common/multi_level_queue.h" -#include "core/hle/kernel/object.h" #include "core/hle/kernel/thread.h" namespace Core { @@ -28,13 +29,13 @@ public: ~GlobalScheduler(); /// Adds a new thread to the scheduler - void AddThread(SharedPtr<Thread> thread); + void AddThread(std::shared_ptr<Thread> thread); /// Removes a thread from the scheduler - void RemoveThread(const Thread* thread); + void RemoveThread(std::shared_ptr<Thread> thread); /// Returns a list of all threads managed by the scheduler - const std::vector<SharedPtr<Thread>>& GetThreadList() const { + const std::vector<std::shared_ptr<Thread>>& GetThreadList() const { return thread_list; } @@ -42,41 +43,34 @@ public: * Add a thread to the suggested queue of a cpu core. Suggested threads may be * picked if no thread is scheduled to run on the core. */ - void Suggest(u32 priority, u32 core, Thread* thread); + void Suggest(u32 priority, std::size_t core, Thread* thread); /** * Remove a thread to the suggested queue of a cpu core. Suggested threads may be * picked if no thread is scheduled to run on the core. */ - void Unsuggest(u32 priority, u32 core, Thread* thread); + void Unsuggest(u32 priority, std::size_t core, Thread* thread); /** * Add a thread to the scheduling queue of a cpu core. The thread is added at the * back the queue in its priority level. */ - void Schedule(u32 priority, u32 core, Thread* thread); + void Schedule(u32 priority, std::size_t core, Thread* thread); /** * Add a thread to the scheduling queue of a cpu core. The thread is added at the * front the queue in its priority level. */ - void SchedulePrepend(u32 priority, u32 core, Thread* thread); + void SchedulePrepend(u32 priority, std::size_t core, Thread* thread); /// Reschedule an already scheduled thread based on a new priority - void Reschedule(u32 priority, u32 core, Thread* thread); + void Reschedule(u32 priority, std::size_t core, Thread* thread); /// Unschedules a thread. - void Unschedule(u32 priority, u32 core, Thread* thread); - - /** - * Transfers a thread into an specific core. If the destination_core is -1 - * it will be unscheduled from its source code and added into its suggested - * queue. - */ - void TransferToCore(u32 priority, s32 destination_core, Thread* thread); + void Unschedule(u32 priority, std::size_t core, Thread* thread); /// Selects a core and forces it to unload its current thread's context - void UnloadThread(s32 core); + void UnloadThread(std::size_t core); /** * Takes care of selecting the new scheduled thread in three steps: @@ -90,9 +84,9 @@ public: * 3. Third is no suggested thread is found, we do a second pass and pick a running * thread in another core and swap it with its current thread. */ - void SelectThread(u32 core); + void SelectThread(std::size_t core); - bool HaveReadyThreads(u32 core_id) const { + bool HaveReadyThreads(std::size_t core_id) const { return !scheduled_queue[core_id].empty(); } @@ -145,6 +139,13 @@ public: void Shutdown(); private: + /** + * Transfers a thread into an specific core. If the destination_core is -1 + * it will be unscheduled from its source code and added into its suggested + * queue. + */ + void TransferToCore(u32 priority, s32 destination_core, Thread* thread); + bool AskForReselectionOrMarkRedundant(Thread* current_thread, const Thread* winner); static constexpr u32 min_regular_priority = 2; @@ -157,13 +158,13 @@ private: std::array<u32, NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62}; /// Lists all thread ids that aren't deleted/etc. - std::vector<SharedPtr<Thread>> thread_list; + std::vector<std::shared_ptr<Thread>> thread_list; Core::System& system; }; class Scheduler final { public: - explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, u32 core_id); + explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, std::size_t core_id); ~Scheduler(); /// Returns whether there are any threads that are ready to run. @@ -213,14 +214,14 @@ private: */ void UpdateLastContextSwitchTime(Thread* thread, Process* process); - SharedPtr<Thread> current_thread = nullptr; - SharedPtr<Thread> selected_thread = nullptr; + std::shared_ptr<Thread> current_thread = nullptr; + std::shared_ptr<Thread> selected_thread = nullptr; Core::System& system; Core::ARM_Interface& cpu_core; u64 last_context_switch_time = 0; u64 idle_selection_count = 0; - const u32 core_id; + const std::size_t core_id; bool is_context_switch_pending = false; }; diff --git a/src/core/hle/kernel/server_port.cpp b/src/core/hle/kernel/server_port.cpp index 02e7c60e6..a4ccfa35e 100644 --- a/src/core/hle/kernel/server_port.cpp +++ b/src/core/hle/kernel/server_port.cpp @@ -16,7 +16,7 @@ namespace Kernel { ServerPort::ServerPort(KernelCore& kernel) : WaitObject{kernel} {} ServerPort::~ServerPort() = default; -ResultVal<SharedPtr<ServerSession>> ServerPort::Accept() { +ResultVal<std::shared_ptr<ServerSession>> ServerPort::Accept() { if (pending_sessions.empty()) { return ERR_NOT_FOUND; } @@ -26,7 +26,7 @@ ResultVal<SharedPtr<ServerSession>> ServerPort::Accept() { return MakeResult(std::move(session)); } -void ServerPort::AppendPendingSession(SharedPtr<ServerSession> pending_session) { +void ServerPort::AppendPendingSession(std::shared_ptr<ServerSession> pending_session) { pending_sessions.push_back(std::move(pending_session)); } @@ -41,8 +41,8 @@ void ServerPort::Acquire(Thread* thread) { ServerPort::PortPair ServerPort::CreatePortPair(KernelCore& kernel, u32 max_sessions, std::string name) { - SharedPtr<ServerPort> server_port(new ServerPort(kernel)); - SharedPtr<ClientPort> client_port(new ClientPort(kernel)); + std::shared_ptr<ServerPort> server_port = std::make_shared<ServerPort>(kernel); + std::shared_ptr<ClientPort> client_port = std::make_shared<ClientPort>(kernel); server_port->name = name + "_Server"; client_port->name = name + "_Client"; diff --git a/src/core/hle/kernel/server_port.h b/src/core/hle/kernel/server_port.h index dc88a1ebd..8be8a75ea 100644 --- a/src/core/hle/kernel/server_port.h +++ b/src/core/hle/kernel/server_port.h @@ -22,8 +22,11 @@ class SessionRequestHandler; class ServerPort final : public WaitObject { public: + explicit ServerPort(KernelCore& kernel); + ~ServerPort() override; + using HLEHandler = std::shared_ptr<SessionRequestHandler>; - using PortPair = std::pair<SharedPtr<ServerPort>, SharedPtr<ClientPort>>; + using PortPair = std::pair<std::shared_ptr<ServerPort>, std::shared_ptr<ClientPort>>; /** * Creates a pair of ServerPort and an associated ClientPort. @@ -52,7 +55,7 @@ public: * Accepts a pending incoming connection on this port. If there are no pending sessions, will * return ERR_NO_PENDING_SESSIONS. */ - ResultVal<SharedPtr<ServerSession>> Accept(); + ResultVal<std::shared_ptr<ServerSession>> Accept(); /// Whether or not this server port has an HLE handler available. bool HasHLEHandler() const { @@ -74,17 +77,14 @@ public: /// Appends a ServerSession to the collection of ServerSessions /// waiting to be accepted by this port. - void AppendPendingSession(SharedPtr<ServerSession> pending_session); + void AppendPendingSession(std::shared_ptr<ServerSession> pending_session); bool ShouldWait(const Thread* thread) const override; void Acquire(Thread* thread) override; private: - explicit ServerPort(KernelCore& kernel); - ~ServerPort() override; - /// ServerSessions waiting to be accepted by the port - std::vector<SharedPtr<ServerSession>> pending_sessions; + std::vector<std::shared_ptr<ServerSession>> pending_sessions; /// This session's HLE request handler template (optional) /// ServerSessions created from this port inherit a reference to this handler. diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp index 30b2bfb5a..7825e1ec4 100644 --- a/src/core/hle/kernel/server_session.cpp +++ b/src/core/hle/kernel/server_session.cpp @@ -1,4 +1,4 @@ -// Copyright 2016 Citra Emulator Project +// Copyright 2019 yuzu emulator team // Licensed under GPLv2 or any later version // Refer to the license.txt file included. @@ -9,6 +9,7 @@ #include "common/common_types.h" #include "common/logging/log.h" #include "core/core.h" +#include "core/core_timing.h" #include "core/hle/ipc_helpers.h" #include "core/hle/kernel/client_port.h" #include "core/hle/kernel/client_session.h" @@ -19,35 +20,32 @@ #include "core/hle/kernel/server_session.h" #include "core/hle/kernel/session.h" #include "core/hle/kernel/thread.h" +#include "core/memory.h" namespace Kernel { ServerSession::ServerSession(KernelCore& kernel) : WaitObject{kernel} {} -ServerSession::~ServerSession() { - // This destructor will be called automatically when the last ServerSession handle is closed by - // the emulated application. +ServerSession::~ServerSession() = default; - // Decrease the port's connection count. - if (parent->port) { - parent->port->ConnectionClosed(); - } - - parent->server = nullptr; -} +ResultVal<std::shared_ptr<ServerSession>> ServerSession::Create(KernelCore& kernel, + std::shared_ptr<Session> parent, + std::string name) { + std::shared_ptr<ServerSession> session{std::make_shared<ServerSession>(kernel)}; -ResultVal<SharedPtr<ServerSession>> ServerSession::Create(KernelCore& kernel, std::string name) { - SharedPtr<ServerSession> server_session(new ServerSession(kernel)); + session->request_event = Core::Timing::CreateEvent( + name, [session](u64 userdata, s64 cycles_late) { session->CompleteSyncRequest(); }); + session->name = std::move(name); + session->parent = std::move(parent); - server_session->name = std::move(name); - server_session->parent = nullptr; - - return MakeResult(std::move(server_session)); + return MakeResult(std::move(session)); } bool ServerSession::ShouldWait(const Thread* thread) const { // Closed sessions should never wait, an error will be returned from svcReplyAndReceive. - if (parent->client == nullptr) + if (!parent->Client()) { return false; + } + // Wait if we have no pending requests, or if we're currently handling a request. return pending_requesting_threads.empty() || currently_handling != nullptr; } @@ -69,7 +67,7 @@ void ServerSession::ClientDisconnected() { if (handler) { // Note that after this returns, this server session's hle_handler is // invalidated (set to null). - handler->ClientDisconnected(this); + handler->ClientDisconnected(SharedFrom(this)); } // Clean up the list of client threads with pending requests, they are unneeded now that the @@ -126,13 +124,21 @@ ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& con return RESULT_SUCCESS; } -ResultCode ServerSession::HandleSyncRequest(SharedPtr<Thread> thread) { - // The ServerSession received a sync request, this means that there's new data available - // from its ClientSession, so wake up any threads that may be waiting on a svcReplyAndReceive or - // similar. - Kernel::HLERequestContext context(this, thread); - u32* cmd_buf = (u32*)Memory::GetPointer(thread->GetTLSAddress()); - context.PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf); +ResultCode ServerSession::QueueSyncRequest(std::shared_ptr<Thread> thread, Memory::Memory& memory) { + u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))}; + std::shared_ptr<Kernel::HLERequestContext> context{ + std::make_shared<Kernel::HLERequestContext>(SharedFrom(this), std::move(thread))}; + + context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf); + request_queue.Push(std::move(context)); + + return RESULT_SUCCESS; +} + +ResultCode ServerSession::CompleteSyncRequest() { + ASSERT(!request_queue.Empty()); + + auto& context = *request_queue.Front(); ResultCode result = RESULT_SUCCESS; // If the session has been converted to a domain, handle the domain request @@ -144,61 +150,27 @@ ResultCode ServerSession::HandleSyncRequest(SharedPtr<Thread> thread) { result = hle_handler->HandleSyncRequest(context); } - if (thread->GetStatus() == ThreadStatus::Running) { - // Put the thread to sleep until the server replies, it will be awoken in - // svcReplyAndReceive for LLE servers. - thread->SetStatus(ThreadStatus::WaitIPC); - - if (hle_handler != nullptr) { - // For HLE services, we put the request threads to sleep for a short duration to - // simulate IPC overhead, but only if the HLE handler didn't put the thread to sleep for - // other reasons like an async callback. The IPC overhead is needed to prevent - // starvation when a thread only does sync requests to HLE services while a - // lower-priority thread is waiting to run. - - // This delay was approximated in a homebrew application by measuring the average time - // it takes for svcSendSyncRequest to return when performing the SetLcdForceBlack IPC - // request to the GSP:GPU service in a n3DS with firmware 11.6. The measured values have - // a high variance and vary between models. - static constexpr u64 IPCDelayNanoseconds = 39000; - thread->WakeAfterDelay(IPCDelayNanoseconds); - } else { - // Add the thread to the list of threads that have issued a sync request with this - // server. - pending_requesting_threads.push_back(std::move(thread)); - } - } - - // If this ServerSession does not have an HLE implementation, just wake up the threads waiting - // on it. - WakeupAllWaitingThreads(); - - // Handle scenario when ConvertToDomain command was issued, as we must do the conversion at the - // end of the command such that only commands following this one are handled as domains if (convert_to_domain) { ASSERT_MSG(IsSession(), "ServerSession is already a domain instance."); domain_request_handlers = {hle_handler}; convert_to_domain = false; } - return result; -} - -ServerSession::SessionPair ServerSession::CreateSessionPair(KernelCore& kernel, - const std::string& name, - SharedPtr<ClientPort> port) { - auto server_session = ServerSession::Create(kernel, name + "_Server").Unwrap(); - SharedPtr<ClientSession> client_session(new ClientSession(kernel)); - client_session->name = name + "_Client"; + // Some service requests require the thread to block + if (!context.IsThreadWaiting()) { + context.GetThread().ResumeFromWait(); + context.GetThread().SetWaitSynchronizationResult(result); + } - std::shared_ptr<Session> parent(new Session); - parent->client = client_session.get(); - parent->server = server_session.get(); - parent->port = std::move(port); + request_queue.Pop(); - client_session->parent = parent; - server_session->parent = parent; + return result; +} - return std::make_pair(std::move(server_session), std::move(client_session)); +ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread, + Memory::Memory& memory) { + Core::System::GetInstance().CoreTiming().ScheduleEvent(20000, request_event, {}); + return QueueSyncRequest(std::move(thread), memory); } + } // namespace Kernel diff --git a/src/core/hle/kernel/server_session.h b/src/core/hle/kernel/server_session.h index 738df30f8..d6e48109e 100644 --- a/src/core/hle/kernel/server_session.h +++ b/src/core/hle/kernel/server_session.h @@ -1,4 +1,4 @@ -// Copyright 2014 Citra Emulator Project +// Copyright 2019 yuzu emulator team // Licensed under GPLv2 or any later version // Refer to the license.txt file included. @@ -9,17 +9,22 @@ #include <utility> #include <vector> -#include "core/hle/kernel/object.h" +#include "common/threadsafe_queue.h" #include "core/hle/kernel/wait_object.h" #include "core/hle/result.h" +namespace Memory { +class Memory; +} + +namespace Core::Timing { +struct EventType; +} + namespace Kernel { -class ClientPort; -class ClientSession; class HLERequestContext; class KernelCore; -class ServerSession; class Session; class SessionRequestHandler; class Thread; @@ -38,6 +43,15 @@ class Thread; */ class ServerSession final : public WaitObject { public: + explicit ServerSession(KernelCore& kernel); + ~ServerSession() override; + + friend class Session; + + static ResultVal<std::shared_ptr<ServerSession>> Create(KernelCore& kernel, + std::shared_ptr<Session> parent, + std::string name = "Unknown"); + std::string GetTypeName() const override { return "ServerSession"; } @@ -59,18 +73,6 @@ public: return parent.get(); } - using SessionPair = std::pair<SharedPtr<ServerSession>, SharedPtr<ClientSession>>; - - /** - * Creates a pair of ServerSession and an associated ClientSession. - * @param kernel The kernal instance to create the session pair under. - * @param name Optional name of the ports. - * @param client_port Optional The ClientPort that spawned this session. - * @return The created session tuple - */ - static SessionPair CreateSessionPair(KernelCore& kernel, const std::string& name = "Unknown", - SharedPtr<ClientPort> client_port = nullptr); - /** * Sets the HLE handler for the session. This handler will be called to service IPC requests * instead of the regular IPC machinery. (The regular IPC machinery is currently not @@ -82,10 +84,13 @@ public: /** * Handle a sync request from the emulated application. + * * @param thread Thread that initiated the request. + * @param memory Memory context to handle the sync request under. + * * @returns ResultCode from the operation. */ - ResultCode HandleSyncRequest(SharedPtr<Thread> thread); + ResultCode HandleSyncRequest(std::shared_ptr<Thread> thread, Memory::Memory& memory); bool ShouldWait(const Thread* thread) const override; @@ -118,18 +123,11 @@ public: } private: - explicit ServerSession(KernelCore& kernel); - ~ServerSession() override; + /// Queues a sync request from the emulated application. + ResultCode QueueSyncRequest(std::shared_ptr<Thread> thread, Memory::Memory& memory); - /** - * Creates a server session. The server session can have an optional HLE handler, - * which will be invoked to handle the IPC requests that this session receives. - * @param kernel The kernel instance to create this server session under. - * @param name Optional name of the server session. - * @return The created server session - */ - static ResultVal<SharedPtr<ServerSession>> Create(KernelCore& kernel, - std::string name = "Unknown"); + /// Completes a sync request from the emulated application. + ResultCode CompleteSyncRequest(); /// Handles a SyncRequest to a domain, forwarding the request to the proper object or closing an /// object handle. @@ -147,18 +145,24 @@ private: /// List of threads that are pending a response after a sync request. This list is processed in /// a LIFO manner, thus, the last request will be dispatched first. /// TODO(Subv): Verify if this is indeed processed in LIFO using a hardware test. - std::vector<SharedPtr<Thread>> pending_requesting_threads; + std::vector<std::shared_ptr<Thread>> pending_requesting_threads; /// Thread whose request is currently being handled. A request is considered "handled" when a /// response is sent via svcReplyAndReceive. /// TODO(Subv): Find a better name for this. - SharedPtr<Thread> currently_handling; + std::shared_ptr<Thread> currently_handling; /// When set to True, converts the session to a domain at the end of the command bool convert_to_domain{}; /// The name of this session (optional) std::string name; + + /// Core timing event used to schedule the service request at some point in the future + std::shared_ptr<Core::Timing::EventType> request_event; + + /// Queue of scheduled service requests + Common::MPSCQueue<std::shared_ptr<Kernel::HLERequestContext>> request_queue; }; } // namespace Kernel diff --git a/src/core/hle/kernel/session.cpp b/src/core/hle/kernel/session.cpp index 642914744..dee6e2b72 100644 --- a/src/core/hle/kernel/session.cpp +++ b/src/core/hle/kernel/session.cpp @@ -1,12 +1,36 @@ -// Copyright 2015 Citra Emulator Project +// Copyright 2019 yuzu emulator team // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include "common/assert.h" +#include "core/hle/kernel/client_session.h" +#include "core/hle/kernel/server_session.h" #include "core/hle/kernel/session.h" -#include "core/hle/kernel/thread.h" namespace Kernel { -Session::Session() {} -Session::~Session() {} +Session::Session(KernelCore& kernel) : WaitObject{kernel} {} +Session::~Session() = default; + +Session::SessionPair Session::Create(KernelCore& kernel, std::string name) { + auto session{std::make_shared<Session>(kernel)}; + auto client_session{Kernel::ClientSession::Create(kernel, session, name + "_Client").Unwrap()}; + auto server_session{Kernel::ServerSession::Create(kernel, session, name + "_Server").Unwrap()}; + + session->name = std::move(name); + session->client = client_session; + session->server = server_session; + + return std::make_pair(std::move(client_session), std::move(server_session)); +} + +bool Session::ShouldWait(const Thread* thread) const { + UNIMPLEMENTED(); + return {}; +} + +void Session::Acquire(Thread* thread) { + UNIMPLEMENTED(); +} + } // namespace Kernel diff --git a/src/core/hle/kernel/session.h b/src/core/hle/kernel/session.h index 7a551f5e4..15a5ac15f 100644 --- a/src/core/hle/kernel/session.h +++ b/src/core/hle/kernel/session.h @@ -1,27 +1,64 @@ -// Copyright 2018 yuzu emulator team +// Copyright 2019 yuzu emulator team // Licensed under GPLv2 or any later version // Refer to the license.txt file included. #pragma once -#include "core/hle/kernel/object.h" +#include <memory> +#include <string> +#include <utility> + +#include "core/hle/kernel/wait_object.h" namespace Kernel { class ClientSession; -class ClientPort; class ServerSession; /** * Parent structure to link the client and server endpoints of a session with their associated - * client port. The client port need not exist, as is the case for portless sessions like the - * FS File and Directory sessions. When one of the endpoints of a session is destroyed, its - * corresponding field in this structure will be set to nullptr. + * client port. */ -class Session final { +class Session final : public WaitObject { public: - ClientSession* client = nullptr; ///< The client endpoint of the session. - ServerSession* server = nullptr; ///< The server endpoint of the session. - SharedPtr<ClientPort> port; ///< The port that this session is associated with (optional). + explicit Session(KernelCore& kernel); + ~Session() override; + + using SessionPair = std::pair<std::shared_ptr<ClientSession>, std::shared_ptr<ServerSession>>; + + static SessionPair Create(KernelCore& kernel, std::string name = "Unknown"); + + std::string GetName() const override { + return name; + } + + static constexpr HandleType HANDLE_TYPE = HandleType::Session; + HandleType GetHandleType() const override { + return HANDLE_TYPE; + } + + bool ShouldWait(const Thread* thread) const override; + + void Acquire(Thread* thread) override; + + std::shared_ptr<ClientSession> Client() { + if (auto result{client.lock()}) { + return result; + } + return {}; + } + + std::shared_ptr<ServerSession> Server() { + if (auto result{server.lock()}) { + return result; + } + return {}; + } + +private: + std::string name; + std::weak_ptr<ClientSession> client; + std::weak_ptr<ServerSession> server; }; + } // namespace Kernel diff --git a/src/core/hle/kernel/shared_memory.cpp b/src/core/hle/kernel/shared_memory.cpp index a815c4eea..afb2e3fc2 100644 --- a/src/core/hle/kernel/shared_memory.cpp +++ b/src/core/hle/kernel/shared_memory.cpp @@ -15,11 +15,12 @@ namespace Kernel { SharedMemory::SharedMemory(KernelCore& kernel) : Object{kernel} {} SharedMemory::~SharedMemory() = default; -SharedPtr<SharedMemory> SharedMemory::Create(KernelCore& kernel, Process* owner_process, u64 size, - MemoryPermission permissions, - MemoryPermission other_permissions, VAddr address, - MemoryRegion region, std::string name) { - SharedPtr<SharedMemory> shared_memory(new SharedMemory(kernel)); +std::shared_ptr<SharedMemory> SharedMemory::Create(KernelCore& kernel, Process* owner_process, + u64 size, MemoryPermission permissions, + MemoryPermission other_permissions, + VAddr address, MemoryRegion region, + std::string name) { + std::shared_ptr<SharedMemory> shared_memory = std::make_shared<SharedMemory>(kernel); shared_memory->owner_process = owner_process; shared_memory->name = std::move(name); @@ -58,10 +59,10 @@ SharedPtr<SharedMemory> SharedMemory::Create(KernelCore& kernel, Process* owner_ return shared_memory; } -SharedPtr<SharedMemory> SharedMemory::CreateForApplet( +std::shared_ptr<SharedMemory> SharedMemory::CreateForApplet( KernelCore& kernel, std::shared_ptr<Kernel::PhysicalMemory> heap_block, std::size_t offset, u64 size, MemoryPermission permissions, MemoryPermission other_permissions, std::string name) { - SharedPtr<SharedMemory> shared_memory(new SharedMemory(kernel)); + std::shared_ptr<SharedMemory> shared_memory = std::make_shared<SharedMemory>(kernel); shared_memory->owner_process = nullptr; shared_memory->name = std::move(name); diff --git a/src/core/hle/kernel/shared_memory.h b/src/core/hle/kernel/shared_memory.h index 01ca6dcd2..014951d82 100644 --- a/src/core/hle/kernel/shared_memory.h +++ b/src/core/hle/kernel/shared_memory.h @@ -6,7 +6,6 @@ #include <memory> #include <string> -#include <vector> #include "common/common_types.h" #include "core/hle/kernel/object.h" @@ -33,6 +32,9 @@ enum class MemoryPermission : u32 { class SharedMemory final : public Object { public: + explicit SharedMemory(KernelCore& kernel); + ~SharedMemory() override; + /** * Creates a shared memory object. * @param kernel The kernel instance to create a shared memory instance under. @@ -46,11 +48,12 @@ public: * linear heap. * @param name Optional object name, used for debugging purposes. */ - static SharedPtr<SharedMemory> Create(KernelCore& kernel, Process* owner_process, u64 size, - MemoryPermission permissions, - MemoryPermission other_permissions, VAddr address = 0, - MemoryRegion region = MemoryRegion::BASE, - std::string name = "Unknown"); + static std::shared_ptr<SharedMemory> Create(KernelCore& kernel, Process* owner_process, + u64 size, MemoryPermission permissions, + MemoryPermission other_permissions, + VAddr address = 0, + MemoryRegion region = MemoryRegion::BASE, + std::string name = "Unknown"); /** * Creates a shared memory object from a block of memory managed by an HLE applet. @@ -63,7 +66,7 @@ public: * block. * @param name Optional object name, used for debugging purposes. */ - static SharedPtr<SharedMemory> CreateForApplet( + static std::shared_ptr<SharedMemory> CreateForApplet( KernelCore& kernel, std::shared_ptr<Kernel::PhysicalMemory> heap_block, std::size_t offset, u64 size, MemoryPermission permissions, MemoryPermission other_permissions, std::string name = "Unknown Applet"); @@ -130,9 +133,6 @@ public: const u8* GetPointer(std::size_t offset = 0) const; private: - explicit SharedMemory(KernelCore& kernel); - ~SharedMemory() override; - /// Backing memory for this shared memory block. std::shared_ptr<PhysicalMemory> backing_block; /// Offset into the backing block for this shared memory. diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index c63a9ba8b..dbcdb0b88 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -17,6 +17,7 @@ #include "core/core.h" #include "core/core_cpu.h" #include "core/core_timing.h" +#include "core/core_timing_util.h" #include "core/hle/kernel/address_arbiter.h" #include "core/hle/kernel/client_port.h" #include "core/hle/kernel/client_session.h" @@ -331,7 +332,9 @@ static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_ad /// Connect to an OS service given the port name, returns the handle to the port to out static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle, VAddr port_name_address) { - if (!Memory::IsValidVirtualAddress(port_name_address)) { + auto& memory = system.Memory(); + + if (!memory.IsValidVirtualAddress(port_name_address)) { LOG_ERROR(Kernel_SVC, "Port Name Address is not a valid virtual address, port_name_address=0x{:016X}", port_name_address); @@ -340,7 +343,7 @@ static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle, static constexpr std::size_t PortNameMaxLength = 11; // Read 1 char beyond the max allowed port name to detect names that are too long. - std::string port_name = Memory::ReadCString(port_name_address, PortNameMaxLength + 1); + const std::string port_name = memory.ReadCString(port_name_address, PortNameMaxLength + 1); if (port_name.size() > PortNameMaxLength) { LOG_ERROR(Kernel_SVC, "Port name is too long, expected {} but got {}", PortNameMaxLength, port_name.size()); @@ -358,7 +361,7 @@ static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle, auto client_port = it->second; - SharedPtr<ClientSession> client_session; + std::shared_ptr<ClientSession> client_session; CASCADE_RESULT(client_session, client_port->Connect()); // Return the client session @@ -370,7 +373,7 @@ static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle, /// Makes a blocking IPC call to an OS service. static ResultCode SendSyncRequest(Core::System& system, Handle handle) { const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - SharedPtr<ClientSession> session = handle_table.Get<ClientSession>(handle); + std::shared_ptr<ClientSession> session = handle_table.Get<ClientSession>(handle); if (!session) { LOG_ERROR(Kernel_SVC, "called with invalid handle=0x{:08X}", handle); return ERR_INVALID_HANDLE; @@ -378,11 +381,12 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) { LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName()); - system.PrepareReschedule(); + auto thread = system.CurrentScheduler().GetCurrentThread(); + thread->InvalidateWakeupCallback(); + thread->SetStatus(ThreadStatus::WaitIPC); + system.PrepareReschedule(thread->GetProcessorID()); - // TODO(Subv): svcSendSyncRequest should put the caller thread to sleep while the server - // responds and cause a reschedule. - return session->SendSyncRequest(system.CurrentScheduler().GetCurrentThread()); + return session->SendSyncRequest(SharedFrom(thread), system.Memory()); } /// Get the ID for the specified thread. @@ -390,7 +394,7 @@ static ResultCode GetThreadId(Core::System& system, u64* thread_id, Handle threa LOG_TRACE(Kernel_SVC, "called thread=0x{:08X}", thread_handle); const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - const SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle); + const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle); if (!thread) { LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", thread_handle); return ERR_INVALID_HANDLE; @@ -405,13 +409,13 @@ static ResultCode GetProcessId(Core::System& system, u64* process_id, Handle han LOG_DEBUG(Kernel_SVC, "called handle=0x{:08X}", handle); const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - const SharedPtr<Process> process = handle_table.Get<Process>(handle); + const std::shared_ptr<Process> process = handle_table.Get<Process>(handle); if (process) { *process_id = process->GetProcessID(); return RESULT_SUCCESS; } - const SharedPtr<Thread> thread = handle_table.Get<Thread>(handle); + const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(handle); if (thread) { const Process* const owner_process = thread->GetOwnerProcess(); if (!owner_process) { @@ -430,8 +434,8 @@ static ResultCode GetProcessId(Core::System& system, u64* process_id, Handle han } /// Default thread wakeup callback for WaitSynchronization -static bool DefaultThreadWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread, - SharedPtr<WaitObject> object, std::size_t index) { +static bool DefaultThreadWakeupCallback(ThreadWakeupReason reason, std::shared_ptr<Thread> thread, + std::shared_ptr<WaitObject> object, std::size_t index) { ASSERT(thread->GetStatus() == ThreadStatus::WaitSynch); if (reason == ThreadWakeupReason::Timeout) { @@ -451,7 +455,8 @@ static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, handle_count={}, nano_seconds={}", handles_address, handle_count, nano_seconds); - if (!Memory::IsValidVirtualAddress(handles_address)) { + auto& memory = system.Memory(); + if (!memory.IsValidVirtualAddress(handles_address)) { LOG_ERROR(Kernel_SVC, "Handle address is not a valid virtual address, handle_address=0x{:016X}", handles_address); @@ -473,7 +478,7 @@ static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); for (u64 i = 0; i < handle_count; ++i) { - const Handle handle = Memory::Read32(handles_address + i * sizeof(Handle)); + const Handle handle = memory.Read32(handles_address + i * sizeof(Handle)); const auto object = handle_table.Get<WaitObject>(handle); if (object == nullptr) { @@ -505,8 +510,13 @@ static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr return RESULT_TIMEOUT; } + if (thread->IsSyncCancelled()) { + thread->SetSyncCancelled(false); + return ERR_SYNCHRONIZATION_CANCELED; + } + for (auto& object : objects) { - object->AddWaitingThread(thread); + object->AddWaitingThread(SharedFrom(thread)); } thread->SetWaitObjects(std::move(objects)); @@ -526,7 +536,7 @@ static ResultCode CancelSynchronization(Core::System& system, Handle thread_hand LOG_TRACE(Kernel_SVC, "called thread=0x{:X}", thread_handle); const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle); + std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle); if (!thread) { LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}", thread_handle); @@ -610,13 +620,15 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) { return; } + auto& memory = system.Memory(); + // This typically is an error code so we're going to assume this is the case if (sz == sizeof(u32)) { - LOG_CRITICAL(Debug_Emulated, "debug_buffer_err_code={:X}", Memory::Read32(addr)); + LOG_CRITICAL(Debug_Emulated, "debug_buffer_err_code={:X}", memory.Read32(addr)); } else { // We don't know what's in here so we'll hexdump it debug_buffer.resize(sz); - Memory::ReadBlock(addr, debug_buffer.data(), sz); + memory.ReadBlock(addr, debug_buffer.data(), sz); std::string hexdump; for (std::size_t i = 0; i < debug_buffer.size(); i++) { hexdump += fmt::format("{:02X} ", debug_buffer[i]); @@ -706,7 +718,7 @@ static void OutputDebugString([[maybe_unused]] Core::System& system, VAddr addre } std::string str(len, '\0'); - Memory::ReadBlock(address, str.data(), str.size()); + system.Memory().ReadBlock(address, str.data(), str.size()); LOG_DEBUG(Debug_Emulated, "{}", str); } @@ -935,7 +947,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha const auto& core_timing = system.CoreTiming(); const auto& scheduler = system.CurrentScheduler(); const auto* const current_thread = scheduler.GetCurrentThread(); - const bool same_thread = current_thread == thread; + const bool same_thread = current_thread == thread.get(); const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTicks(); u64 out_ticks = 0; @@ -1045,7 +1057,7 @@ static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 act } const auto* current_process = system.Kernel().CurrentProcess(); - const SharedPtr<Thread> thread = current_process->GetHandleTable().Get<Thread>(handle); + const std::shared_ptr<Thread> thread = current_process->GetHandleTable().Get<Thread>(handle); if (!thread) { LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle); return ERR_INVALID_HANDLE; @@ -1061,7 +1073,7 @@ static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 act return ERR_INVALID_HANDLE; } - if (thread == system.CurrentScheduler().GetCurrentThread()) { + if (thread.get() == system.CurrentScheduler().GetCurrentThread()) { LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread"); return ERR_BUSY; } @@ -1077,7 +1089,7 @@ static ResultCode GetThreadContext(Core::System& system, VAddr thread_context, H LOG_DEBUG(Kernel_SVC, "called, context=0x{:08X}, thread=0x{:X}", thread_context, handle); const auto* current_process = system.Kernel().CurrentProcess(); - const SharedPtr<Thread> thread = current_process->GetHandleTable().Get<Thread>(handle); + const std::shared_ptr<Thread> thread = current_process->GetHandleTable().Get<Thread>(handle); if (!thread) { LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle); return ERR_INVALID_HANDLE; @@ -1093,7 +1105,7 @@ static ResultCode GetThreadContext(Core::System& system, VAddr thread_context, H return ERR_INVALID_HANDLE; } - if (thread == system.CurrentScheduler().GetCurrentThread()) { + if (thread.get() == system.CurrentScheduler().GetCurrentThread()) { LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread"); return ERR_BUSY; } @@ -1109,7 +1121,7 @@ static ResultCode GetThreadContext(Core::System& system, VAddr thread_context, H std::fill(ctx.vector_registers.begin() + 16, ctx.vector_registers.end(), u128{}); } - Memory::WriteBlock(thread_context, &ctx, sizeof(ctx)); + system.Memory().WriteBlock(thread_context, &ctx, sizeof(ctx)); return RESULT_SUCCESS; } @@ -1118,7 +1130,7 @@ static ResultCode GetThreadPriority(Core::System& system, u32* priority, Handle LOG_TRACE(Kernel_SVC, "called"); const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - const SharedPtr<Thread> thread = handle_table.Get<Thread>(handle); + const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(handle); if (!thread) { LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle); return ERR_INVALID_HANDLE; @@ -1142,7 +1154,7 @@ static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 pri const auto* const current_process = system.Kernel().CurrentProcess(); - SharedPtr<Thread> thread = current_process->GetHandleTable().Get<Thread>(handle); + std::shared_ptr<Thread> thread = current_process->GetHandleTable().Get<Thread>(handle); if (!thread) { LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle); return ERR_INVALID_HANDLE; @@ -1262,27 +1274,28 @@ static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_add VAddr address) { LOG_TRACE(Kernel_SVC, "called process=0x{:08X} address={:X}", process_handle, address); const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - SharedPtr<Process> process = handle_table.Get<Process>(process_handle); + std::shared_ptr<Process> process = handle_table.Get<Process>(process_handle); if (!process) { LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}", process_handle); return ERR_INVALID_HANDLE; } + auto& memory = system.Memory(); const auto& vm_manager = process->VMManager(); const MemoryInfo memory_info = vm_manager.QueryMemory(address); - Memory::Write64(memory_info_address, memory_info.base_address); - Memory::Write64(memory_info_address + 8, memory_info.size); - Memory::Write32(memory_info_address + 16, memory_info.state); - Memory::Write32(memory_info_address + 20, memory_info.attributes); - Memory::Write32(memory_info_address + 24, memory_info.permission); - Memory::Write32(memory_info_address + 32, memory_info.ipc_ref_count); - Memory::Write32(memory_info_address + 28, memory_info.device_ref_count); - Memory::Write32(memory_info_address + 36, 0); + memory.Write64(memory_info_address, memory_info.base_address); + memory.Write64(memory_info_address + 8, memory_info.size); + memory.Write32(memory_info_address + 16, memory_info.state); + memory.Write32(memory_info_address + 20, memory_info.attributes); + memory.Write32(memory_info_address + 24, memory_info.permission); + memory.Write32(memory_info_address + 32, memory_info.ipc_ref_count); + memory.Write32(memory_info_address + 28, memory_info.device_ref_count); + memory.Write32(memory_info_address + 36, 0); // Page info appears to be currently unused by the kernel and is always set to zero. - Memory::Write32(page_info_address, 0); + memory.Write32(page_info_address, 0); return RESULT_SUCCESS; } @@ -1490,7 +1503,7 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e } auto& kernel = system.Kernel(); - CASCADE_RESULT(SharedPtr<Thread> thread, + CASCADE_RESULT(std::shared_ptr<Thread> thread, Thread::Create(kernel, "", entry_point, priority, arg, processor_id, stack_top, *current_process)); @@ -1516,7 +1529,7 @@ static ResultCode StartThread(Core::System& system, Handle thread_handle) { LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle); const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - const SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle); + const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle); if (!thread) { LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}", thread_handle); @@ -1540,7 +1553,7 @@ static void ExitThread(Core::System& system) { auto* const current_thread = system.CurrentScheduler().GetCurrentThread(); current_thread->Stop(); - system.GlobalScheduler().RemoveThread(current_thread); + system.GlobalScheduler().RemoveThread(SharedFrom(current_thread)); system.PrepareReschedule(); } @@ -1612,7 +1625,7 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add auto* const current_process = system.Kernel().CurrentProcess(); const auto& handle_table = current_process->GetHandleTable(); - SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle); + std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle); ASSERT(thread); const auto release_result = current_process->GetMutex().Release(mutex_addr); @@ -1620,12 +1633,13 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add return release_result; } - SharedPtr<Thread> current_thread = system.CurrentScheduler().GetCurrentThread(); + Thread* current_thread = system.CurrentScheduler().GetCurrentThread(); current_thread->SetCondVarWaitAddress(condition_variable_addr); current_thread->SetMutexWaitAddress(mutex_addr); current_thread->SetWaitHandle(thread_handle); current_thread->SetStatus(ThreadStatus::WaitCondVar); current_thread->InvalidateWakeupCallback(); + current_process->InsertConditionVariableThread(SharedFrom(current_thread)); current_thread->WakeAfterDelay(nano_seconds); @@ -1636,50 +1650,35 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add } /// Signal process wide key -static ResultCode SignalProcessWideKey(Core::System& system, VAddr condition_variable_addr, - s32 target) { +static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_addr, s32 target) { LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}", condition_variable_addr, target); ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4)); // Retrieve a list of all threads that are waiting for this condition variable. - std::vector<SharedPtr<Thread>> waiting_threads; - const auto& scheduler = system.GlobalScheduler(); - const auto& thread_list = scheduler.GetThreadList(); - - for (const auto& thread : thread_list) { - if (thread->GetCondVarWaitAddress() == condition_variable_addr) { - waiting_threads.push_back(thread); - } - } - - // Sort them by priority, such that the highest priority ones come first. - std::sort(waiting_threads.begin(), waiting_threads.end(), - [](const SharedPtr<Thread>& lhs, const SharedPtr<Thread>& rhs) { - return lhs->GetPriority() < rhs->GetPriority(); - }); + auto* const current_process = system.Kernel().CurrentProcess(); + std::vector<std::shared_ptr<Thread>> waiting_threads = + current_process->GetConditionVariableThreads(condition_variable_addr); - // Only process up to 'target' threads, unless 'target' is -1, in which case process + // Only process up to 'target' threads, unless 'target' is less equal 0, in which case process // them all. std::size_t last = waiting_threads.size(); - if (target != -1) + if (target > 0) last = std::min(waiting_threads.size(), static_cast<std::size_t>(target)); - // If there are no threads waiting on this condition variable, just exit - if (last == 0) - return RESULT_SUCCESS; - for (std::size_t index = 0; index < last; ++index) { auto& thread = waiting_threads[index]; ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr); // liberate Cond Var Thread. + current_process->RemoveConditionVariableThread(thread); thread->SetCondVarWaitAddress(0); const std::size_t current_core = system.CurrentCoreIndex(); auto& monitor = system.Monitor(); + auto& memory = system.Memory(); // Atomically read the value of the mutex. u32 mutex_val = 0; @@ -1689,7 +1688,7 @@ static ResultCode SignalProcessWideKey(Core::System& system, VAddr condition_var monitor.SetExclusive(current_core, mutex_address); // If the mutex is not yet acquired, acquire it. - mutex_val = Memory::Read32(mutex_address); + mutex_val = memory.Read32(mutex_address); if (mutex_val != 0) { update_val = mutex_val | Mutex::MutexHasWaitersFlag; @@ -1726,8 +1725,6 @@ static ResultCode SignalProcessWideKey(Core::System& system, VAddr condition_var system.PrepareReschedule(thread->GetProcessorID()); } } - - return RESULT_SUCCESS; } // Wait for an address (via Address Arbiter) @@ -1781,12 +1778,25 @@ static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type, return address_arbiter.SignalToAddress(address, signal_type, value, num_to_wake); } +static void KernelDebug([[maybe_unused]] Core::System& system, + [[maybe_unused]] u32 kernel_debug_type, [[maybe_unused]] u64 param1, + [[maybe_unused]] u64 param2, [[maybe_unused]] u64 param3) { + // Intentionally do nothing, as this does nothing in released kernel binaries. +} + +static void ChangeKernelTraceState([[maybe_unused]] Core::System& system, + [[maybe_unused]] u32 trace_state) { + // Intentionally do nothing, as this does nothing in released kernel binaries. +} + /// This returns the total CPU ticks elapsed since the CPU was powered-on static u64 GetSystemTick(Core::System& system) { LOG_TRACE(Kernel_SVC, "called"); auto& core_timing = system.CoreTiming(); - const u64 result{core_timing.GetTicks()}; + + // Returns the value of cntpct_el0 (https://switchbrew.org/wiki/SVC#svcGetSystemTick) + const u64 result{Core::Timing::CpuCyclesToClockCycles(system.CoreTiming().GetTicks())}; // Advance time to defeat dumb games that busy-wait for the frame to end. core_timing.AddTicks(400); @@ -1975,7 +1985,7 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle); const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - const SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle); + const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle); if (!thread) { LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}", thread_handle); @@ -2034,7 +2044,7 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, } const auto& handle_table = current_process->GetHandleTable(); - const SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle); + const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle); if (!thread) { LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}", thread_handle); @@ -2290,12 +2300,13 @@ static ResultCode GetProcessList(Core::System& system, u32* out_num_processes, return ERR_INVALID_ADDRESS_STATE; } + auto& memory = system.Memory(); const auto& process_list = kernel.GetProcessList(); const auto num_processes = process_list.size(); const auto copy_amount = std::min(std::size_t{out_process_ids_size}, num_processes); for (std::size_t i = 0; i < copy_amount; ++i) { - Memory::Write64(out_process_ids, process_list[i]->GetProcessID()); + memory.Write64(out_process_ids, process_list[i]->GetProcessID()); out_process_ids += sizeof(u64); } @@ -2329,13 +2340,14 @@ static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAdd return ERR_INVALID_ADDRESS_STATE; } + auto& memory = system.Memory(); const auto& thread_list = current_process->GetThreadList(); const auto num_threads = thread_list.size(); const auto copy_amount = std::min(std::size_t{out_thread_ids_size}, num_threads); auto list_iter = thread_list.cbegin(); for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) { - Memory::Write64(out_thread_ids, (*list_iter)->GetThreadID()); + memory.Write64(out_thread_ids, (*list_iter)->GetThreadID()); out_thread_ids += sizeof(u64); } @@ -2414,8 +2426,8 @@ static const FunctionDef SVC_Table[] = { {0x39, nullptr, "Unknown"}, {0x3A, nullptr, "Unknown"}, {0x3B, nullptr, "Unknown"}, - {0x3C, nullptr, "DumpInfo"}, - {0x3D, nullptr, "DumpInfoNew"}, + {0x3C, SvcWrap<KernelDebug>, "KernelDebug"}, + {0x3D, SvcWrap<ChangeKernelTraceState>, "ChangeKernelTraceState"}, {0x3E, nullptr, "Unknown"}, {0x3F, nullptr, "Unknown"}, {0x40, nullptr, "CreateSession"}, diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h index c2d8d0dc3..29a2cfa9d 100644 --- a/src/core/hle/kernel/svc_wrap.h +++ b/src/core/hle/kernel/svc_wrap.h @@ -112,11 +112,6 @@ void SvcWrap(Core::System& system) { FuncReturn(system, retval); } -template <ResultCode func(Core::System&, u64, s32)> -void SvcWrap(Core::System& system) { - FuncReturn(system, func(system, Param(system, 0), static_cast<s32>(Param(system, 1))).raw); -} - template <ResultCode func(Core::System&, u64, u32)> void SvcWrap(Core::System& system) { FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1))).raw); @@ -311,11 +306,27 @@ void SvcWrap(Core::System& system) { func(system); } +template <void func(Core::System&, u32)> +void SvcWrap(Core::System& system) { + func(system, static_cast<u32>(Param(system, 0))); +} + +template <void func(Core::System&, u32, u64, u64, u64)> +void SvcWrap(Core::System& system) { + func(system, static_cast<u32>(Param(system, 0)), Param(system, 1), Param(system, 2), + Param(system, 3)); +} + template <void func(Core::System&, s64)> void SvcWrap(Core::System& system) { func(system, static_cast<s64>(Param(system, 0))); } +template <void func(Core::System&, u64, s32)> +void SvcWrap(Core::System& system) { + func(system, Param(system, 0), static_cast<s32>(Param(system, 1))); +} + template <void func(Core::System&, u64, u64)> void SvcWrap(Core::System& system) { func(system, Param(system, 0), Param(system, 1)); diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 962530d2d..e84e5ce0d 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -50,7 +50,7 @@ void Thread::Stop() { // Clean up any dangling references in objects that this thread was waiting for for (auto& wait_object : wait_objects) { - wait_object->RemoveWaitingThread(this); + wait_object->RemoveWaitingThread(SharedFrom(this)); } wait_objects.clear(); @@ -77,18 +77,6 @@ void Thread::CancelWakeupTimer() { callback_handle); } -static std::optional<s32> GetNextProcessorId(u64 mask) { - for (s32 index = 0; index < Core::NUM_CPU_CORES; ++index) { - if (mask & (1ULL << index)) { - if (!Core::System::GetInstance().Scheduler(index).GetCurrentThread()) { - // Core is enabled and not running any threads, use this one - return index; - } - } - } - return {}; -} - void Thread::ResumeFromWait() { ASSERT_MSG(wait_objects.empty(), "Thread is waking up while waiting for objects"); @@ -132,8 +120,11 @@ void Thread::ResumeFromWait() { } void Thread::CancelWait() { - ASSERT(GetStatus() == ThreadStatus::WaitSynch); - ClearWaitObjects(); + if (GetSchedulingStatus() != ThreadSchedStatus::Paused) { + is_sync_cancelled = true; + return; + } + is_sync_cancelled = false; SetWaitSynchronizationResult(ERR_SYNCHRONIZATION_CANCELED); ResumeFromWait(); } @@ -156,9 +147,10 @@ static void ResetThreadContext(Core::ARM_Interface::ThreadContext& context, VAdd context.fpcr = 0x03C00000; } -ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name, VAddr entry_point, - u32 priority, u64 arg, s32 processor_id, - VAddr stack_top, Process& owner_process) { +ResultVal<std::shared_ptr<Thread>> Thread::Create(KernelCore& kernel, std::string name, + VAddr entry_point, u32 priority, u64 arg, + s32 processor_id, VAddr stack_top, + Process& owner_process) { // Check if priority is in ranged. Lowest priority -> highest priority id. if (priority > THREADPRIO_LOWEST) { LOG_ERROR(Kernel_SVC, "Invalid thread priority: {}", priority); @@ -170,14 +162,14 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name return ERR_INVALID_PROCESSOR_ID; } - if (!Memory::IsValidVirtualAddress(owner_process, entry_point)) { + auto& system = Core::System::GetInstance(); + if (!system.Memory().IsValidVirtualAddress(owner_process, entry_point)) { LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point); // TODO (bunnei): Find the correct error code to use here - return ResultCode(-1); + return RESULT_UNKNOWN; } - auto& system = Core::System::GetInstance(); - SharedPtr<Thread> thread(new Thread(kernel)); + std::shared_ptr<Thread> thread = std::make_shared<Thread>(kernel); thread->thread_id = kernel.CreateNewThreadID(); thread->status = ThreadStatus::Dormant; @@ -206,7 +198,7 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name // to initialize the context ResetThreadContext(thread->context, stack_top, entry_point, arg); - return MakeResult<SharedPtr<Thread>>(std::move(thread)); + return MakeResult<std::shared_ptr<Thread>>(std::move(thread)); } void Thread::SetPriority(u32 priority) { @@ -224,7 +216,7 @@ void Thread::SetWaitSynchronizationOutput(s32 output) { context.cpu_registers[1] = output; } -s32 Thread::GetWaitObjectIndex(const WaitObject* object) const { +s32 Thread::GetWaitObjectIndex(std::shared_ptr<WaitObject> object) const { ASSERT_MSG(!wait_objects.empty(), "Thread is not waiting for anything"); const auto match = std::find(wait_objects.rbegin(), wait_objects.rend(), object); return static_cast<s32>(std::distance(match, wait_objects.rend()) - 1); @@ -264,8 +256,8 @@ void Thread::SetStatus(ThreadStatus new_status) { status = new_status; } -void Thread::AddMutexWaiter(SharedPtr<Thread> thread) { - if (thread->lock_owner == this) { +void Thread::AddMutexWaiter(std::shared_ptr<Thread> thread) { + if (thread->lock_owner.get() == this) { // If the thread is already waiting for this thread to release the mutex, ensure that the // waiters list is consistent and return without doing anything. const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); @@ -285,13 +277,13 @@ void Thread::AddMutexWaiter(SharedPtr<Thread> thread) { wait_mutex_threads.begin(), wait_mutex_threads.end(), [&thread](const auto& entry) { return entry->GetPriority() > thread->GetPriority(); }); wait_mutex_threads.insert(insertion_point, thread); - thread->lock_owner = this; + thread->lock_owner = SharedFrom(this); UpdatePriority(); } -void Thread::RemoveMutexWaiter(SharedPtr<Thread> thread) { - ASSERT(thread->lock_owner == this); +void Thread::RemoveMutexWaiter(std::shared_ptr<Thread> thread) { + ASSERT(thread->lock_owner.get() == this); // Ensure that the thread is in the list of mutex waiters const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); @@ -318,16 +310,24 @@ void Thread::UpdatePriority() { return; } + if (GetStatus() == ThreadStatus::WaitCondVar) { + owner_process->RemoveConditionVariableThread(SharedFrom(this)); + } + SetCurrentPriority(new_priority); + if (GetStatus() == ThreadStatus::WaitCondVar) { + owner_process->InsertConditionVariableThread(SharedFrom(this)); + } + if (!lock_owner) { return; } // Ensure that the thread is within the correct location in the waiting list. auto old_owner = lock_owner; - lock_owner->RemoveMutexWaiter(this); - old_owner->AddMutexWaiter(this); + lock_owner->RemoveMutexWaiter(SharedFrom(this)); + old_owner->AddMutexWaiter(SharedFrom(this)); // Recursively update the priority of the thread that depends on the priority of this one. lock_owner->UpdatePriority(); @@ -340,11 +340,11 @@ void Thread::ChangeCore(u32 core, u64 mask) { bool Thread::AllWaitObjectsReady() const { return std::none_of( wait_objects.begin(), wait_objects.end(), - [this](const SharedPtr<WaitObject>& object) { return object->ShouldWait(this); }); + [this](const std::shared_ptr<WaitObject>& object) { return object->ShouldWait(this); }); } -bool Thread::InvokeWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread, - SharedPtr<WaitObject> object, std::size_t index) { +bool Thread::InvokeWakeupCallback(ThreadWakeupReason reason, std::shared_ptr<Thread> thread, + std::shared_ptr<WaitObject> object, std::size_t index) { ASSERT(wakeup_callback); return wakeup_callback(reason, std::move(thread), std::move(object), index); } @@ -401,7 +401,7 @@ void Thread::SetCurrentPriority(u32 new_priority) { ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { const auto HighestSetCore = [](u64 mask, u32 max_cores) { - for (s32 core = max_cores - 1; core >= 0; core--) { + for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) { if (((mask >> core) & 1) != 0) { return core; } @@ -425,7 +425,7 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { if (old_affinity_mask != new_affinity_mask) { const s32 old_core = processor_id; if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) { - if (ideal_core < 0) { + if (static_cast<s32>(ideal_core) < 0) { processor_id = HighestSetCore(affinity_mask, GlobalScheduler::NUM_CPU_CORES); } else { processor_id = ideal_core; @@ -447,23 +447,23 @@ void Thread::AdjustSchedulingOnStatus(u32 old_flags) { ThreadSchedStatus::Runnable) { // In this case the thread was running, now it's pausing/exitting if (processor_id >= 0) { - scheduler.Unschedule(current_priority, processor_id, this); + scheduler.Unschedule(current_priority, static_cast<u32>(processor_id), this); } - for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { - if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { - scheduler.Unsuggest(current_priority, static_cast<u32>(core), this); + for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { + if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { + scheduler.Unsuggest(current_priority, core, this); } } } else if (GetSchedulingStatus() == ThreadSchedStatus::Runnable) { // The thread is now set to running from being stopped if (processor_id >= 0) { - scheduler.Schedule(current_priority, processor_id, this); + scheduler.Schedule(current_priority, static_cast<u32>(processor_id), this); } - for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { - if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { - scheduler.Suggest(current_priority, static_cast<u32>(core), this); + for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { + if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { + scheduler.Suggest(current_priority, core, this); } } } @@ -477,11 +477,11 @@ void Thread::AdjustSchedulingOnPriority(u32 old_priority) { } auto& scheduler = Core::System::GetInstance().GlobalScheduler(); if (processor_id >= 0) { - scheduler.Unschedule(old_priority, processor_id, this); + scheduler.Unschedule(old_priority, static_cast<u32>(processor_id), this); } for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { - if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { + if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { scheduler.Unsuggest(old_priority, core, this); } } @@ -491,14 +491,14 @@ void Thread::AdjustSchedulingOnPriority(u32 old_priority) { if (processor_id >= 0) { if (current_thread == this) { - scheduler.SchedulePrepend(current_priority, processor_id, this); + scheduler.SchedulePrepend(current_priority, static_cast<u32>(processor_id), this); } else { - scheduler.Schedule(current_priority, processor_id, this); + scheduler.Schedule(current_priority, static_cast<u32>(processor_id), this); } } for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { - if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { + if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { scheduler.Suggest(current_priority, core, this); } } @@ -515,7 +515,7 @@ void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) { for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { if (((old_affinity_mask >> core) & 1) != 0) { - if (core == old_core) { + if (core == static_cast<u32>(old_core)) { scheduler.Unschedule(current_priority, core, this); } else { scheduler.Unsuggest(current_priority, core, this); @@ -525,7 +525,7 @@ void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) { for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { if (((affinity_mask >> core) & 1) != 0) { - if (core == processor_id) { + if (core == static_cast<u32>(processor_id)) { scheduler.Schedule(current_priority, core, this); } else { scheduler.Suggest(current_priority, core, this); diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index c9870873d..3bcf9e137 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h @@ -97,14 +97,18 @@ enum class ThreadSchedMasks : u32 { class Thread final : public WaitObject { public: - using MutexWaitingThreads = std::vector<SharedPtr<Thread>>; + explicit Thread(KernelCore& kernel); + ~Thread() override; + + using MutexWaitingThreads = std::vector<std::shared_ptr<Thread>>; using ThreadContext = Core::ARM_Interface::ThreadContext; - using ThreadWaitObjects = std::vector<SharedPtr<WaitObject>>; + using ThreadWaitObjects = std::vector<std::shared_ptr<WaitObject>>; - using WakeupCallback = std::function<bool(ThreadWakeupReason reason, SharedPtr<Thread> thread, - SharedPtr<WaitObject> object, std::size_t index)>; + using WakeupCallback = + std::function<bool(ThreadWakeupReason reason, std::shared_ptr<Thread> thread, + std::shared_ptr<WaitObject> object, std::size_t index)>; /** * Creates and returns a new thread. The new thread is immediately scheduled @@ -118,10 +122,10 @@ public: * @param owner_process The parent process for the thread * @return A shared pointer to the newly created thread */ - static ResultVal<SharedPtr<Thread>> Create(KernelCore& kernel, std::string name, - VAddr entry_point, u32 priority, u64 arg, - s32 processor_id, VAddr stack_top, - Process& owner_process); + static ResultVal<std::shared_ptr<Thread>> Create(KernelCore& kernel, std::string name, + VAddr entry_point, u32 priority, u64 arg, + s32 processor_id, VAddr stack_top, + Process& owner_process); std::string GetName() const override { return name; @@ -166,10 +170,10 @@ public: void SetPriority(u32 priority); /// Adds a thread to the list of threads that are waiting for a lock held by this thread. - void AddMutexWaiter(SharedPtr<Thread> thread); + void AddMutexWaiter(std::shared_ptr<Thread> thread); /// Removes a thread from the list of threads that are waiting for a lock held by this thread. - void RemoveMutexWaiter(SharedPtr<Thread> thread); + void RemoveMutexWaiter(std::shared_ptr<Thread> thread); /// Recalculates the current priority taking into account priority inheritance. void UpdatePriority(); @@ -229,7 +233,7 @@ public: * * @param object Object to query the index of. */ - s32 GetWaitObjectIndex(const WaitObject* object) const; + s32 GetWaitObjectIndex(std::shared_ptr<WaitObject> object) const; /** * Stops a thread, invalidating it from further use @@ -320,7 +324,7 @@ public: void ClearWaitObjects() { for (const auto& waiting_object : wait_objects) { - waiting_object->RemoveWaitingThread(this); + waiting_object->RemoveWaitingThread(SharedFrom(this)); } wait_objects.clear(); } @@ -336,7 +340,7 @@ public: return lock_owner.get(); } - void SetLockOwner(SharedPtr<Thread> owner) { + void SetLockOwner(std::shared_ptr<Thread> owner) { lock_owner = std::move(owner); } @@ -390,8 +394,8 @@ public: * @pre A valid wakeup callback has been set. Violating this precondition * will cause an assertion to trigger. */ - bool InvokeWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread, - SharedPtr<WaitObject> object, std::size_t index); + bool InvokeWakeupCallback(ThreadWakeupReason reason, std::shared_ptr<Thread> thread, + std::shared_ptr<WaitObject> object, std::size_t index); u32 GetIdealCore() const { return ideal_core; @@ -440,10 +444,15 @@ public: is_running = value; } -private: - explicit Thread(KernelCore& kernel); - ~Thread() override; + bool IsSyncCancelled() const { + return is_sync_cancelled; + } + void SetSyncCancelled(bool value) { + is_sync_cancelled = value; + } + +private: void SetSchedulingStatus(ThreadSchedStatus new_status); void SetCurrentPriority(u32 new_priority); ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask); @@ -491,7 +500,7 @@ private: MutexWaitingThreads wait_mutex_threads; /// Thread that owns the lock that this thread is waiting for. - SharedPtr<Thread> lock_owner; + std::shared_ptr<Thread> lock_owner; /// If waiting on a ConditionVariable, this is the ConditionVariable address VAddr condvar_wait_address = 0; @@ -524,6 +533,7 @@ private: u32 scheduling_state = 0; bool is_running = false; + bool is_sync_cancelled = false; std::string name; }; diff --git a/src/core/hle/kernel/transfer_memory.cpp b/src/core/hle/kernel/transfer_memory.cpp index 1113c815e..f0e73f57b 100644 --- a/src/core/hle/kernel/transfer_memory.cpp +++ b/src/core/hle/kernel/transfer_memory.cpp @@ -14,9 +14,9 @@ namespace Kernel { TransferMemory::TransferMemory(KernelCore& kernel) : Object{kernel} {} TransferMemory::~TransferMemory() = default; -SharedPtr<TransferMemory> TransferMemory::Create(KernelCore& kernel, VAddr base_address, u64 size, - MemoryPermission permissions) { - SharedPtr<TransferMemory> transfer_memory{new TransferMemory(kernel)}; +std::shared_ptr<TransferMemory> TransferMemory::Create(KernelCore& kernel, VAddr base_address, + u64 size, MemoryPermission permissions) { + std::shared_ptr<TransferMemory> transfer_memory{std::make_shared<TransferMemory>(kernel)}; transfer_memory->base_address = base_address; transfer_memory->memory_size = size; diff --git a/src/core/hle/kernel/transfer_memory.h b/src/core/hle/kernel/transfer_memory.h index 6be9dc094..0a6e15d18 100644 --- a/src/core/hle/kernel/transfer_memory.h +++ b/src/core/hle/kernel/transfer_memory.h @@ -5,7 +5,6 @@ #pragma once #include <memory> -#include <vector> #include "core/hle/kernel/object.h" #include "core/hle/kernel/physical_memory.h" @@ -27,10 +26,13 @@ enum class MemoryPermission : u32; /// class TransferMemory final : public Object { public: + explicit TransferMemory(KernelCore& kernel); + ~TransferMemory() override; + static constexpr HandleType HANDLE_TYPE = HandleType::TransferMemory; - static SharedPtr<TransferMemory> Create(KernelCore& kernel, VAddr base_address, u64 size, - MemoryPermission permissions); + static std::shared_ptr<TransferMemory> Create(KernelCore& kernel, VAddr base_address, u64 size, + MemoryPermission permissions); TransferMemory(const TransferMemory&) = delete; TransferMemory& operator=(const TransferMemory&) = delete; @@ -79,9 +81,6 @@ public: ResultCode UnmapMemory(VAddr address, u64 size); private: - explicit TransferMemory(KernelCore& kernel); - ~TransferMemory() override; - /// Memory block backing this instance. std::shared_ptr<PhysicalMemory> backing_block; diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp index c7af87073..a9a20ef76 100644 --- a/src/core/hle/kernel/vm_manager.cpp +++ b/src/core/hle/kernel/vm_manager.cpp @@ -16,7 +16,6 @@ #include "core/hle/kernel/resource_limit.h" #include "core/hle/kernel/vm_manager.h" #include "core/memory.h" -#include "core/memory_setup.h" namespace Kernel { namespace { @@ -167,7 +166,7 @@ ResultVal<VAddr> VMManager::FindFreeRegion(VAddr begin, VAddr end, u64 size) con if (vma_handle == vma_map.cend()) { // TODO(Subv): Find the correct error code here. - return ResultCode(-1); + return RESULT_UNKNOWN; } const VAddr target = std::max(begin, vma_handle->second.base); @@ -786,19 +785,21 @@ void VMManager::MergeAdjacentVMA(VirtualMemoryArea& left, const VirtualMemoryAre } void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) { + auto& memory = system.Memory(); + switch (vma.type) { case VMAType::Free: - Memory::UnmapRegion(page_table, vma.base, vma.size); + memory.UnmapRegion(page_table, vma.base, vma.size); break; case VMAType::AllocatedMemoryBlock: - Memory::MapMemoryRegion(page_table, vma.base, vma.size, - vma.backing_block->data() + vma.offset); + memory.MapMemoryRegion(page_table, vma.base, vma.size, + vma.backing_block->data() + vma.offset); break; case VMAType::BackingMemory: - Memory::MapMemoryRegion(page_table, vma.base, vma.size, vma.backing_memory); + memory.MapMemoryRegion(page_table, vma.base, vma.size, vma.backing_memory); break; case VMAType::MMIO: - Memory::MapIoRegion(page_table, vma.base, vma.size, vma.mmio_handler); + memory.MapIoRegion(page_table, vma.base, vma.size, vma.mmio_handler); break; } } diff --git a/src/core/hle/kernel/wait_object.cpp b/src/core/hle/kernel/wait_object.cpp index c00cef062..745f2c4e8 100644 --- a/src/core/hle/kernel/wait_object.cpp +++ b/src/core/hle/kernel/wait_object.cpp @@ -18,13 +18,13 @@ namespace Kernel { WaitObject::WaitObject(KernelCore& kernel) : Object{kernel} {} WaitObject::~WaitObject() = default; -void WaitObject::AddWaitingThread(SharedPtr<Thread> thread) { +void WaitObject::AddWaitingThread(std::shared_ptr<Thread> thread) { auto itr = std::find(waiting_threads.begin(), waiting_threads.end(), thread); if (itr == waiting_threads.end()) waiting_threads.push_back(std::move(thread)); } -void WaitObject::RemoveWaitingThread(Thread* thread) { +void WaitObject::RemoveWaitingThread(std::shared_ptr<Thread> thread) { auto itr = std::find(waiting_threads.begin(), waiting_threads.end(), thread); // If a thread passed multiple handles to the same object, // the kernel might attempt to remove the thread from the object's @@ -33,7 +33,7 @@ void WaitObject::RemoveWaitingThread(Thread* thread) { waiting_threads.erase(itr); } -SharedPtr<Thread> WaitObject::GetHighestPriorityReadyThread() const { +std::shared_ptr<Thread> WaitObject::GetHighestPriorityReadyThread() const { Thread* candidate = nullptr; u32 candidate_priority = THREADPRIO_LOWEST + 1; @@ -64,10 +64,10 @@ SharedPtr<Thread> WaitObject::GetHighestPriorityReadyThread() const { } } - return candidate; + return SharedFrom(candidate); } -void WaitObject::WakeupWaitingThread(SharedPtr<Thread> thread) { +void WaitObject::WakeupWaitingThread(std::shared_ptr<Thread> thread) { ASSERT(!ShouldWait(thread.get())); if (!thread) { @@ -83,7 +83,7 @@ void WaitObject::WakeupWaitingThread(SharedPtr<Thread> thread) { Acquire(thread.get()); } - const std::size_t index = thread->GetWaitObjectIndex(this); + const std::size_t index = thread->GetWaitObjectIndex(SharedFrom(this)); thread->ClearWaitObjects(); @@ -91,7 +91,8 @@ void WaitObject::WakeupWaitingThread(SharedPtr<Thread> thread) { bool resume = true; if (thread->HasWakeupCallback()) { - resume = thread->InvokeWakeupCallback(ThreadWakeupReason::Signal, thread, this, index); + resume = thread->InvokeWakeupCallback(ThreadWakeupReason::Signal, thread, SharedFrom(this), + index); } if (resume) { thread->ResumeFromWait(); @@ -105,7 +106,7 @@ void WaitObject::WakeupAllWaitingThreads() { } } -const std::vector<SharedPtr<Thread>>& WaitObject::GetWaitingThreads() const { +const std::vector<std::shared_ptr<Thread>>& WaitObject::GetWaitingThreads() const { return waiting_threads; } diff --git a/src/core/hle/kernel/wait_object.h b/src/core/hle/kernel/wait_object.h index 3271a30a7..9a17958a4 100644 --- a/src/core/hle/kernel/wait_object.h +++ b/src/core/hle/kernel/wait_object.h @@ -4,8 +4,9 @@ #pragma once +#include <memory> #include <vector> -#include <boost/smart_ptr/intrusive_ptr.hpp> + #include "core/hle/kernel/object.h" namespace Kernel { @@ -33,13 +34,13 @@ public: * Add a thread to wait on this object * @param thread Pointer to thread to add */ - void AddWaitingThread(SharedPtr<Thread> thread); + void AddWaitingThread(std::shared_ptr<Thread> thread); /** * Removes a thread from waiting on this object (e.g. if it was resumed already) * @param thread Pointer to thread to remove */ - void RemoveWaitingThread(Thread* thread); + void RemoveWaitingThread(std::shared_ptr<Thread> thread); /** * Wake up all threads waiting on this object that can be awoken, in priority order, @@ -51,24 +52,24 @@ public: * Wakes up a single thread waiting on this object. * @param thread Thread that is waiting on this object to wakeup. */ - void WakeupWaitingThread(SharedPtr<Thread> thread); + void WakeupWaitingThread(std::shared_ptr<Thread> thread); /// Obtains the highest priority thread that is ready to run from this object's waiting list. - SharedPtr<Thread> GetHighestPriorityReadyThread() const; + std::shared_ptr<Thread> GetHighestPriorityReadyThread() const; /// Get a const reference to the waiting threads list for debug use - const std::vector<SharedPtr<Thread>>& GetWaitingThreads() const; + const std::vector<std::shared_ptr<Thread>>& GetWaitingThreads() const; private: /// Threads waiting for this object to become available - std::vector<SharedPtr<Thread>> waiting_threads; + std::vector<std::shared_ptr<Thread>> waiting_threads; }; // Specialization of DynamicObjectCast for WaitObjects template <> -inline SharedPtr<WaitObject> DynamicObjectCast<WaitObject>(SharedPtr<Object> object) { +inline std::shared_ptr<WaitObject> DynamicObjectCast<WaitObject>(std::shared_ptr<Object> object) { if (object != nullptr && object->IsWaitable()) { - return boost::static_pointer_cast<WaitObject>(object); + return std::static_pointer_cast<WaitObject>(object); } return nullptr; } diff --git a/src/core/hle/kernel/writable_event.cpp b/src/core/hle/kernel/writable_event.cpp index c783a34ee..c9332e3e1 100644 --- a/src/core/hle/kernel/writable_event.cpp +++ b/src/core/hle/kernel/writable_event.cpp @@ -16,8 +16,8 @@ WritableEvent::WritableEvent(KernelCore& kernel) : Object{kernel} {} WritableEvent::~WritableEvent() = default; EventPair WritableEvent::CreateEventPair(KernelCore& kernel, std::string name) { - SharedPtr<WritableEvent> writable_event(new WritableEvent(kernel)); - SharedPtr<ReadableEvent> readable_event(new ReadableEvent(kernel)); + std::shared_ptr<WritableEvent> writable_event(new WritableEvent(kernel)); + std::shared_ptr<ReadableEvent> readable_event(new ReadableEvent(kernel)); writable_event->name = name + ":Writable"; writable_event->readable = readable_event; @@ -27,7 +27,7 @@ EventPair WritableEvent::CreateEventPair(KernelCore& kernel, std::string name) { return {std::move(readable_event), std::move(writable_event)}; } -SharedPtr<ReadableEvent> WritableEvent::GetReadableEvent() const { +std::shared_ptr<ReadableEvent> WritableEvent::GetReadableEvent() const { return readable; } diff --git a/src/core/hle/kernel/writable_event.h b/src/core/hle/kernel/writable_event.h index f46cf1dd8..6189cf65c 100644 --- a/src/core/hle/kernel/writable_event.h +++ b/src/core/hle/kernel/writable_event.h @@ -4,6 +4,8 @@ #pragma once +#include <memory> + #include "core/hle/kernel/object.h" namespace Kernel { @@ -13,8 +15,8 @@ class ReadableEvent; class WritableEvent; struct EventPair { - SharedPtr<ReadableEvent> readable; - SharedPtr<WritableEvent> writable; + std::shared_ptr<ReadableEvent> readable; + std::shared_ptr<WritableEvent> writable; }; class WritableEvent final : public Object { @@ -40,7 +42,7 @@ public: return HANDLE_TYPE; } - SharedPtr<ReadableEvent> GetReadableEvent() const; + std::shared_ptr<ReadableEvent> GetReadableEvent() const; void Signal(); void Clear(); @@ -49,7 +51,7 @@ public: private: explicit WritableEvent(KernelCore& kernel); - SharedPtr<ReadableEvent> readable; + std::shared_ptr<ReadableEvent> readable; std::string name; ///< Name of event (optional) }; diff --git a/src/core/hle/result.h b/src/core/hle/result.h index 8a3701151..450f61fea 100644 --- a/src/core/hle/result.h +++ b/src/core/hle/result.h @@ -147,6 +147,14 @@ constexpr bool operator!=(const ResultCode& a, const ResultCode& b) { constexpr ResultCode RESULT_SUCCESS(0); /** + * Placeholder result code used for unknown error codes. + * + * @note This should only be used when a particular error code + * is not known yet. + */ +constexpr ResultCode RESULT_UNKNOWN(UINT32_MAX); + +/** * This is an optional value type. It holds a `ResultCode` and, if that code is a success code, * also holds a result of type `T`. If the code is an error code then trying to access the inner * value fails, thus ensuring that the ResultCode of functions is always checked properly before @@ -183,7 +191,7 @@ class ResultVal { public: /// Constructs an empty `ResultVal` with the given error code. The code must not be a success /// code. - ResultVal(ResultCode error_code = ResultCode(-1)) : result_code(error_code) { + ResultVal(ResultCode error_code = RESULT_UNKNOWN) : result_code(error_code) { ASSERT(error_code.IsError()); } diff --git a/src/core/hle/service/acc/acc.cpp b/src/core/hle/service/acc/acc.cpp index 0c0f7ed6e..7e3e311fb 100644 --- a/src/core/hle/service/acc/acc.cpp +++ b/src/core/hle/service/acc/acc.cpp @@ -84,7 +84,7 @@ protected: LOG_ERROR(Service_ACC, "Failed to get profile base and data for user={}", user_id.Format()); IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ResultCode(-1)); // TODO(ogniK): Get actual error code + rb.Push(RESULT_UNKNOWN); // TODO(ogniK): Get actual error code } } @@ -98,7 +98,7 @@ protected: } else { LOG_ERROR(Service_ACC, "Failed to get profile base for user={}", user_id.Format()); IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ResultCode(-1)); // TODO(ogniK): Get actual error code + rb.Push(RESULT_UNKNOWN); // TODO(ogniK): Get actual error code } } @@ -442,7 +442,7 @@ void Module::Interface::TrySelectUserWithoutInteraction(Kernel::HLERequestContex const auto user_list = profile_manager->GetAllUsers(); if (std::all_of(user_list.begin(), user_list.end(), [](const auto& user) { return user.uuid == Common::INVALID_UUID; })) { - rb.Push(ResultCode(-1)); // TODO(ogniK): Find the correct error code + rb.Push(RESULT_UNKNOWN); // TODO(ogniK): Find the correct error code rb.PushRaw<u128>(Common::INVALID_UUID); return; } diff --git a/src/core/hle/service/acc/acc_su.cpp b/src/core/hle/service/acc/acc_su.cpp index 0d1663657..b941c260b 100644 --- a/src/core/hle/service/acc/acc_su.cpp +++ b/src/core/hle/service/acc/acc_su.cpp @@ -28,6 +28,7 @@ ACC_SU::ACC_SU(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> p {103, nullptr, "GetBaasUserAvailabilityChangeNotifier"}, {104, nullptr, "GetProfileUpdateNotifier"}, {105, nullptr, "CheckNetworkServiceAvailabilityAsync"}, + {106, nullptr, "GetProfileSyncNotifier"}, {110, nullptr, "StoreSaveDataThumbnail"}, {111, nullptr, "ClearSaveDataThumbnail"}, {112, nullptr, "LoadSaveDataThumbnail"}, @@ -44,6 +45,8 @@ ACC_SU::ACC_SU(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> p {205, &ACC_SU::GetProfileEditor, "GetProfileEditor"}, {206, nullptr, "CompleteUserRegistrationForcibly"}, {210, nullptr, "CreateFloatingRegistrationRequest"}, + {211, nullptr, "CreateProcedureToRegisterUserWithNintendoAccount"}, + {212, nullptr, "ResumeProcedureToRegisterUserWithNintendoAccount"}, {230, nullptr, "AuthenticateServiceAsync"}, {250, nullptr, "GetBaasAccountAdministrator"}, {290, nullptr, "ProxyProcedureForGuestLoginWithNintendoAccount"}, diff --git a/src/core/hle/service/acc/acc_u1.cpp b/src/core/hle/service/acc/acc_u1.cpp index 6520b3968..858e91dde 100644 --- a/src/core/hle/service/acc/acc_u1.cpp +++ b/src/core/hle/service/acc/acc_u1.cpp @@ -28,6 +28,7 @@ ACC_U1::ACC_U1(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> p {103, nullptr, "GetProfileUpdateNotifier"}, {104, nullptr, "CheckNetworkServiceAvailabilityAsync"}, {105, nullptr, "GetBaasUserAvailabilityChangeNotifier"}, + {106, nullptr, "GetProfileSyncNotifier"}, {110, nullptr, "StoreSaveDataThumbnail"}, {111, nullptr, "ClearSaveDataThumbnail"}, {112, nullptr, "LoadSaveDataThumbnail"}, diff --git a/src/core/hle/service/acc/profile_manager.cpp b/src/core/hle/service/acc/profile_manager.cpp index 8f9986326..3e756e59e 100644 --- a/src/core/hle/service/acc/profile_manager.cpp +++ b/src/core/hle/service/acc/profile_manager.cpp @@ -31,8 +31,8 @@ struct ProfileDataRaw { static_assert(sizeof(ProfileDataRaw) == 0x650, "ProfileDataRaw has incorrect size."); // TODO(ogniK): Get actual error codes -constexpr ResultCode ERROR_TOO_MANY_USERS(ErrorModule::Account, -1); -constexpr ResultCode ERROR_USER_ALREADY_EXISTS(ErrorModule::Account, -2); +constexpr ResultCode ERROR_TOO_MANY_USERS(ErrorModule::Account, u32(-1)); +constexpr ResultCode ERROR_USER_ALREADY_EXISTS(ErrorModule::Account, u32(-2)); constexpr ResultCode ERROR_ARGUMENT_IS_NULL(ErrorModule::Account, 20); constexpr char ACC_SAVE_AVATORS_BASE_PATH[] = "/system/save/8000000000000010/su/avators/"; diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp index ba54b3040..95aa5d23d 100644 --- a/src/core/hle/service/am/am.cpp +++ b/src/core/hle/service/am/am.cpp @@ -229,7 +229,15 @@ IDebugFunctions::IDebugFunctions() : ServiceFramework{"IDebugFunctions"} { {20, nullptr, "InvalidateTransitionLayer"}, {30, nullptr, "RequestLaunchApplicationWithUserAndArgumentForDebug"}, {40, nullptr, "GetAppletResourceUsageInfo"}, - {41, nullptr, "SetCpuBoostModeForApplet"}, + {100, nullptr, "SetCpuBoostModeForApplet"}, + {110, nullptr, "PushToAppletBoundChannelForDebug"}, + {111, nullptr, "TryPopFromAppletBoundChannelForDebug"}, + {120, nullptr, "AlarmSettingNotificationEnableAppEventReserve"}, + {121, nullptr, "AlarmSettingNotificationDisableAppEventReserve"}, + {122, nullptr, "AlarmSettingNotificationPushAppEventNotify"}, + {130, nullptr, "FriendInvitationSetApplicationParameter"}, + {131, nullptr, "FriendInvitationClearApplicationParameter"}, + {132, nullptr, "FriendInvitationPushApplicationParameter"}, }; // clang-format on @@ -278,10 +286,12 @@ ISelfController::ISelfController(Core::System& system, {69, &ISelfController::IsAutoSleepDisabled, "IsAutoSleepDisabled"}, {70, nullptr, "ReportMultimediaError"}, {71, nullptr, "GetCurrentIlluminanceEx"}, + {72, nullptr, "SetInputDetectionPolicy"}, {80, nullptr, "SetWirelessPriorityMode"}, {90, &ISelfController::GetAccumulatedSuspendedTickValue, "GetAccumulatedSuspendedTickValue"}, {91, &ISelfController::GetAccumulatedSuspendedTickChangedEvent, "GetAccumulatedSuspendedTickChangedEvent"}, {100, nullptr, "SetAlbumImageTakenNotificationEnabled"}, + {110, nullptr, "SetApplicationAlbumUserData"}, {1000, nullptr, "GetDebugStorageChannel"}, }; // clang-format on @@ -531,12 +541,11 @@ AppletMessageQueue::AppletMessageQueue(Kernel::KernelCore& kernel) { AppletMessageQueue::~AppletMessageQueue() = default; -const Kernel::SharedPtr<Kernel::ReadableEvent>& AppletMessageQueue::GetMesssageRecieveEvent() - const { +const std::shared_ptr<Kernel::ReadableEvent>& AppletMessageQueue::GetMesssageRecieveEvent() const { return on_new_message.readable; } -const Kernel::SharedPtr<Kernel::ReadableEvent>& AppletMessageQueue::GetOperationModeChangedEvent() +const std::shared_ptr<Kernel::ReadableEvent>& AppletMessageQueue::GetOperationModeChangedEvent() const { return on_operation_mode_changed.readable; } @@ -613,6 +622,7 @@ ICommonStateGetter::ICommonStateGetter(Core::System& system, {90, nullptr, "SetPerformanceConfigurationChangedNotification"}, {91, nullptr, "GetCurrentPerformanceConfiguration"}, {200, nullptr, "GetOperationModeSystemInfo"}, + {300, nullptr, "GetSettingsPlatformRegion"}, }; // clang-format on @@ -991,7 +1001,7 @@ void ILibraryAppletCreator::CreateLibraryApplet(Kernel::HLERequestContext& ctx) LOG_ERROR(Service_AM, "Applet doesn't exist! applet_id={}", static_cast<u32>(applet_id)); IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ResultCode(-1)); + rb.Push(RESULT_UNKNOWN); return; } @@ -1027,7 +1037,7 @@ void ILibraryAppletCreator::CreateTransferMemoryStorage(Kernel::HLERequestContex if (transfer_mem == nullptr) { LOG_ERROR(Service_AM, "shared_mem is a nullpr for handle={:08X}", handle); IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ResultCode(-1)); + rb.Push(RESULT_UNKNOWN); return; } @@ -1076,11 +1086,18 @@ IApplicationFunctions::IApplicationFunctions(Core::System& system_) {100, &IApplicationFunctions::InitializeApplicationCopyrightFrameBuffer, "InitializeApplicationCopyrightFrameBuffer"}, {101, &IApplicationFunctions::SetApplicationCopyrightImage, "SetApplicationCopyrightImage"}, {102, &IApplicationFunctions::SetApplicationCopyrightVisibility, "SetApplicationCopyrightVisibility"}, - {110, nullptr, "QueryApplicationPlayStatistics"}, + {110, &IApplicationFunctions::QueryApplicationPlayStatistics, "QueryApplicationPlayStatistics"}, + {111, &IApplicationFunctions::QueryApplicationPlayStatisticsByUid, "QueryApplicationPlayStatisticsByUid"}, {120, nullptr, "ExecuteProgram"}, {121, nullptr, "ClearUserChannel"}, {122, nullptr, "UnpopToUserChannel"}, {130, &IApplicationFunctions::GetGpuErrorDetectedSystemEvent, "GetGpuErrorDetectedSystemEvent"}, + {140, nullptr, "GetFriendInvitationStorageChannelEvent"}, + {141, nullptr, "TryPopFromFriendInvitationStorageChannel"}, + {150, nullptr, "GetNotificationStorageChannelEvent"}, + {151, nullptr, "TryPopFromNotificationStorageChannel"}, + {160, nullptr, "GetHealthWarningDisappearedSystemEvent"}, + {170, nullptr, "SetHdcpAuthenticationActivated"}, {500, nullptr, "StartContinuousRecordingFlushForDebug"}, {1000, nullptr, "CreateMovieMaker"}, {1001, nullptr, "PrepareForJit"}, @@ -1335,12 +1352,16 @@ void IApplicationFunctions::GetPseudoDeviceId(Kernel::HLERequestContext& ctx) { } void IApplicationFunctions::ExtendSaveData(Kernel::HLERequestContext& ctx) { + struct Parameters { + FileSys::SaveDataType type; + u128 user_id; + u64 new_normal_size; + u64 new_journal_size; + }; + static_assert(sizeof(Parameters) == 40); + IPC::RequestParser rp{ctx}; - const auto type{rp.PopRaw<FileSys::SaveDataType>()}; - rp.Skip(1, false); - const auto user_id{rp.PopRaw<u128>()}; - const auto new_normal_size{rp.PopRaw<u64>()}; - const auto new_journal_size{rp.PopRaw<u64>()}; + const auto [type, user_id, new_normal_size, new_journal_size] = rp.PopRaw<Parameters>(); LOG_DEBUG(Service_AM, "called with type={:02X}, user_id={:016X}{:016X}, new_normal={:016X}, " @@ -1359,10 +1380,14 @@ void IApplicationFunctions::ExtendSaveData(Kernel::HLERequestContext& ctx) { } void IApplicationFunctions::GetSaveDataSize(Kernel::HLERequestContext& ctx) { + struct Parameters { + FileSys::SaveDataType type; + u128 user_id; + }; + static_assert(sizeof(Parameters) == 24); + IPC::RequestParser rp{ctx}; - const auto type{rp.PopRaw<FileSys::SaveDataType>()}; - rp.Skip(1, false); - const auto user_id{rp.PopRaw<u128>()}; + const auto [type, user_id] = rp.PopRaw<Parameters>(); LOG_DEBUG(Service_AM, "called with type={:02X}, user_id={:016X}{:016X}", static_cast<u8>(type), user_id[1], user_id[0]); @@ -1376,6 +1401,22 @@ void IApplicationFunctions::GetSaveDataSize(Kernel::HLERequestContext& ctx) { rb.Push(size.journal); } +void IApplicationFunctions::QueryApplicationPlayStatistics(Kernel::HLERequestContext& ctx) { + LOG_WARNING(Service_AM, "(STUBBED) called"); + + IPC::ResponseBuilder rb{ctx, 3}; + rb.Push(RESULT_SUCCESS); + rb.Push<u32>(0); +} + +void IApplicationFunctions::QueryApplicationPlayStatisticsByUid(Kernel::HLERequestContext& ctx) { + LOG_WARNING(Service_AM, "(STUBBED) called"); + + IPC::ResponseBuilder rb{ctx, 3}; + rb.Push(RESULT_SUCCESS); + rb.Push<u32>(0); +} + void IApplicationFunctions::GetGpuErrorDetectedSystemEvent(Kernel::HLERequestContext& ctx) { LOG_WARNING(Service_AM, "(STUBBED) called"); @@ -1409,6 +1450,8 @@ IHomeMenuFunctions::IHomeMenuFunctions() : ServiceFramework("IHomeMenuFunctions" {30, nullptr, "GetHomeButtonWriterLockAccessor"}, {31, nullptr, "GetWriterLockAccessorEx"}, {100, nullptr, "PopRequestLaunchApplicationForDebug"}, + {110, nullptr, "IsForceTerminateApplicationDisabledForDebug"}, + {200, nullptr, "LaunchDevMenu"}, }; // clang-format on diff --git a/src/core/hle/service/am/am.h b/src/core/hle/service/am/am.h index 2ae9402a8..448817be9 100644 --- a/src/core/hle/service/am/am.h +++ b/src/core/hle/service/am/am.h @@ -54,8 +54,8 @@ public: explicit AppletMessageQueue(Kernel::KernelCore& kernel); ~AppletMessageQueue(); - const Kernel::SharedPtr<Kernel::ReadableEvent>& GetMesssageRecieveEvent() const; - const Kernel::SharedPtr<Kernel::ReadableEvent>& GetOperationModeChangedEvent() const; + const std::shared_ptr<Kernel::ReadableEvent>& GetMesssageRecieveEvent() const; + const std::shared_ptr<Kernel::ReadableEvent>& GetOperationModeChangedEvent() const; void PushMessage(AppletMessage msg); AppletMessage PopMessage(); std::size_t GetMessageCount() const; @@ -255,6 +255,8 @@ private: void InitializeApplicationCopyrightFrameBuffer(Kernel::HLERequestContext& ctx); void SetApplicationCopyrightImage(Kernel::HLERequestContext& ctx); void SetApplicationCopyrightVisibility(Kernel::HLERequestContext& ctx); + void QueryApplicationPlayStatistics(Kernel::HLERequestContext& ctx); + void QueryApplicationPlayStatisticsByUid(Kernel::HLERequestContext& ctx); void GetGpuErrorDetectedSystemEvent(Kernel::HLERequestContext& ctx); bool launch_popped_application_specific = false; diff --git a/src/core/hle/service/am/applets/applets.cpp b/src/core/hle/service/am/applets/applets.cpp index 673ad1f7f..92f995f8f 100644 --- a/src/core/hle/service/am/applets/applets.cpp +++ b/src/core/hle/service/am/applets/applets.cpp @@ -108,15 +108,15 @@ void AppletDataBroker::SignalStateChanged() const { state_changed_event.writable->Signal(); } -Kernel::SharedPtr<Kernel::ReadableEvent> AppletDataBroker::GetNormalDataEvent() const { +std::shared_ptr<Kernel::ReadableEvent> AppletDataBroker::GetNormalDataEvent() const { return pop_out_data_event.readable; } -Kernel::SharedPtr<Kernel::ReadableEvent> AppletDataBroker::GetInteractiveDataEvent() const { +std::shared_ptr<Kernel::ReadableEvent> AppletDataBroker::GetInteractiveDataEvent() const { return pop_interactive_out_data_event.readable; } -Kernel::SharedPtr<Kernel::ReadableEvent> AppletDataBroker::GetStateChangedEvent() const { +std::shared_ptr<Kernel::ReadableEvent> AppletDataBroker::GetStateChangedEvent() const { return state_changed_event.readable; } diff --git a/src/core/hle/service/am/applets/applets.h b/src/core/hle/service/am/applets/applets.h index 226be88b1..16e61fc6f 100644 --- a/src/core/hle/service/am/applets/applets.h +++ b/src/core/hle/service/am/applets/applets.h @@ -86,9 +86,9 @@ public: void SignalStateChanged() const; - Kernel::SharedPtr<Kernel::ReadableEvent> GetNormalDataEvent() const; - Kernel::SharedPtr<Kernel::ReadableEvent> GetInteractiveDataEvent() const; - Kernel::SharedPtr<Kernel::ReadableEvent> GetStateChangedEvent() const; + std::shared_ptr<Kernel::ReadableEvent> GetNormalDataEvent() const; + std::shared_ptr<Kernel::ReadableEvent> GetInteractiveDataEvent() const; + std::shared_ptr<Kernel::ReadableEvent> GetStateChangedEvent() const; private: // Queues are named from applet's perspective diff --git a/src/core/hle/service/am/applets/web_browser.cpp b/src/core/hle/service/am/applets/web_browser.cpp index 32283e819..5546ef6e8 100644 --- a/src/core/hle/service/am/applets/web_browser.cpp +++ b/src/core/hle/service/am/applets/web_browser.cpp @@ -337,7 +337,7 @@ void WebBrowser::ExecuteInternal() { void WebBrowser::InitializeShop() { if (frontend_e_commerce == nullptr) { LOG_ERROR(Service_AM, "Missing ECommerce Applet frontend!"); - status = ResultCode(-1); + status = RESULT_UNKNOWN; return; } @@ -353,7 +353,7 @@ void WebBrowser::InitializeShop() { if (url == args.end()) { LOG_ERROR(Service_AM, "Missing EShop Arguments URL for initialization!"); - status = ResultCode(-1); + status = RESULT_UNKNOWN; return; } @@ -366,7 +366,7 @@ void WebBrowser::InitializeShop() { // Less is missing info, More is malformed if (split_query.size() != 2) { LOG_ERROR(Service_AM, "EShop Arguments has more than one question mark, malformed"); - status = ResultCode(-1); + status = RESULT_UNKNOWN; return; } @@ -390,7 +390,7 @@ void WebBrowser::InitializeShop() { if (scene == shop_query.end()) { LOG_ERROR(Service_AM, "No scene parameter was passed via shop query!"); - status = ResultCode(-1); + status = RESULT_UNKNOWN; return; } @@ -406,7 +406,7 @@ void WebBrowser::InitializeShop() { const auto target = target_map.find(scene->second); if (target == target_map.end()) { LOG_ERROR(Service_AM, "Scene for shop query is invalid! (scene={})", scene->second); - status = ResultCode(-1); + status = RESULT_UNKNOWN; return; } @@ -427,7 +427,7 @@ void WebBrowser::InitializeOffline() { if (args.find(WebArgTLVType::DocumentPath) == args.end() || args.find(WebArgTLVType::DocumentKind) == args.end() || args.find(WebArgTLVType::ApplicationID) == args.end()) { - status = ResultCode(-1); + status = RESULT_UNKNOWN; LOG_ERROR(Service_AM, "Missing necessary parameters for initialization!"); } @@ -476,7 +476,7 @@ void WebBrowser::InitializeOffline() { offline_romfs = GetApplicationRomFS(system, title_id, type); if (offline_romfs == nullptr) { - status = ResultCode(-1); + status = RESULT_UNKNOWN; LOG_ERROR(Service_AM, "Failed to find offline data for request!"); } @@ -496,7 +496,7 @@ void WebBrowser::ExecuteShop() { const auto check_optional_parameter = [this](const auto& p) { if (!p.has_value()) { LOG_ERROR(Service_AM, "Missing one or more necessary parameters for execution!"); - status = ResultCode(-1); + status = RESULT_UNKNOWN; return false; } diff --git a/src/core/hle/service/am/idle.cpp b/src/core/hle/service/am/idle.cpp index f814fe2c0..d256d57c8 100644 --- a/src/core/hle/service/am/idle.cpp +++ b/src/core/hle/service/am/idle.cpp @@ -10,7 +10,7 @@ IdleSys::IdleSys() : ServiceFramework{"idle:sys"} { // clang-format off static const FunctionInfo functions[] = { {0, nullptr, "GetAutoPowerDownEvent"}, - {1, nullptr, "Unknown1"}, + {1, nullptr, "IsAutoPowerDownRequested"}, {2, nullptr, "Unknown2"}, {3, nullptr, "SetHandlingContext"}, {4, nullptr, "LoadAndApplySettings"}, diff --git a/src/core/hle/service/am/omm.cpp b/src/core/hle/service/am/omm.cpp index 6ab3fb906..37389ccda 100644 --- a/src/core/hle/service/am/omm.cpp +++ b/src/core/hle/service/am/omm.cpp @@ -35,6 +35,8 @@ OMM::OMM() : ServiceFramework{"omm"} { {23, nullptr, "GetHdcpState"}, {24, nullptr, "ShowCardUpdateProcessing"}, {25, nullptr, "SetApplicationCecSettingsAndNotifyChanged"}, + {26, nullptr, "GetOperationModeSystemInfo"}, + {27, nullptr, "GetAppletFullAwakingSystemEvent"}, }; // clang-format on diff --git a/src/core/hle/service/aoc/aoc_u.cpp b/src/core/hle/service/aoc/aoc_u.cpp index f36ccbc49..4227a4adf 100644 --- a/src/core/hle/service/aoc/aoc_u.cpp +++ b/src/core/hle/service/aoc/aoc_u.cpp @@ -61,6 +61,7 @@ AOC_U::AOC_U(Core::System& system) {7, &AOC_U::PrepareAddOnContent, "PrepareAddOnContent"}, {8, &AOC_U::GetAddOnContentListChangedEvent, "GetAddOnContentListChangedEvent"}, {100, nullptr, "CreateEcPurchasedEventManager"}, + {101, nullptr, "CreatePermanentEcPurchasedEventManager"}, }; // clang-format on @@ -131,7 +132,7 @@ void AOC_U::ListAddOnContent(Kernel::HLERequestContext& ctx) { if (out.size() < offset) { IPC::ResponseBuilder rb{ctx, 2}; // TODO(DarkLordZach): Find the correct error code. - rb.Push(ResultCode(-1)); + rb.Push(RESULT_UNKNOWN); return; } diff --git a/src/core/hle/service/audio/audctl.cpp b/src/core/hle/service/audio/audctl.cpp index 6a01d4d29..9e08e5346 100644 --- a/src/core/hle/service/audio/audctl.cpp +++ b/src/core/hle/service/audio/audctl.cpp @@ -38,6 +38,7 @@ AudCtl::AudCtl() : ServiceFramework{"audctl"} { {24, nullptr, "GetSystemOutputMasterVolume"}, {25, nullptr, "GetAudioVolumeDataForPlayReport"}, {26, nullptr, "UpdateHeadphoneSettings"}, + {27, nullptr, "SetVolumeMappingTableForDev"}, }; // clang-format on diff --git a/src/core/hle/service/audio/audout_u.cpp b/src/core/hle/service/audio/audout_u.cpp index 6a29377e3..4fb2cbc4b 100644 --- a/src/core/hle/service/audio/audout_u.cpp +++ b/src/core/hle/service/audio/audout_u.cpp @@ -43,7 +43,8 @@ public: IAudioOut(Core::System& system, AudoutParams audio_params, AudioCore::AudioOut& audio_core, std::string&& device_name, std::string&& unique_name) : ServiceFramework("IAudioOut"), audio_core(audio_core), - device_name(std::move(device_name)), audio_params(audio_params) { + device_name(std::move(device_name)), + audio_params(audio_params), main_memory{system.Memory()} { // clang-format off static const FunctionInfo functions[] = { {0, &IAudioOut::GetAudioOutState, "GetAudioOutState"}, @@ -137,7 +138,7 @@ private: const u64 tag{rp.Pop<u64>()}; std::vector<s16> samples(audio_buffer.buffer_size / sizeof(s16)); - Memory::ReadBlock(audio_buffer.buffer, samples.data(), audio_buffer.buffer_size); + main_memory.ReadBlock(audio_buffer.buffer, samples.data(), audio_buffer.buffer_size); if (!audio_core.QueueBuffer(stream, tag, std::move(samples))) { IPC::ResponseBuilder rb{ctx, 2}; @@ -209,6 +210,7 @@ private: /// This is the event handle used to check if the audio buffer was released Kernel::EventPair buffer_event; + Memory::Memory& main_memory; }; AudOutU::AudOutU(Core::System& system_) : ServiceFramework("audout:u"), system{system_} { diff --git a/src/core/hle/service/audio/audren_u.cpp b/src/core/hle/service/audio/audren_u.cpp index 4ea7ade6e..82a5dbf14 100644 --- a/src/core/hle/service/audio/audren_u.cpp +++ b/src/core/hle/service/audio/audren_u.cpp @@ -49,8 +49,9 @@ public: system_event = Kernel::WritableEvent::CreateEventPair(system.Kernel(), "IAudioRenderer:SystemEvent"); - renderer = std::make_unique<AudioCore::AudioRenderer>( - system.CoreTiming(), audren_params, system_event.writable, instance_number); + renderer = std::make_unique<AudioCore::AudioRenderer>(system.CoreTiming(), system.Memory(), + audren_params, system_event.writable, + instance_number); } private: diff --git a/src/core/hle/service/audio/hwopus.cpp b/src/core/hle/service/audio/hwopus.cpp index cb4a1160d..cb839e4a2 100644 --- a/src/core/hle/service/audio/hwopus.cpp +++ b/src/core/hle/service/audio/hwopus.cpp @@ -80,7 +80,7 @@ private: LOG_ERROR(Audio, "Failed to decode opus data"); IPC::ResponseBuilder rb{ctx, 2}; // TODO(ogniK): Use correct error code - rb.Push(ResultCode(-1)); + rb.Push(RESULT_UNKNOWN); return; } @@ -278,7 +278,7 @@ void HwOpus::OpenOpusDecoder(Kernel::HLERequestContext& ctx) { LOG_ERROR(Audio, "Failed to create Opus decoder (error={}).", error); IPC::ResponseBuilder rb{ctx, 2}; // TODO(ogniK): Use correct error code - rb.Push(ResultCode(-1)); + rb.Push(RESULT_UNKNOWN); return; } diff --git a/src/core/hle/service/bcat/backend/backend.cpp b/src/core/hle/service/bcat/backend/backend.cpp index dec0849b8..6f5ea095a 100644 --- a/src/core/hle/service/bcat/backend/backend.cpp +++ b/src/core/hle/service/bcat/backend/backend.cpp @@ -16,7 +16,7 @@ ProgressServiceBackend::ProgressServiceBackend(Kernel::KernelCore& kernel, kernel, std::string("ProgressServiceBackend:UpdateEvent:").append(event_name)); } -Kernel::SharedPtr<Kernel::ReadableEvent> ProgressServiceBackend::GetEvent() const { +std::shared_ptr<Kernel::ReadableEvent> ProgressServiceBackend::GetEvent() const { return event.readable; } diff --git a/src/core/hle/service/bcat/backend/backend.h b/src/core/hle/service/bcat/backend/backend.h index ea4b16ad0..48bbbe66f 100644 --- a/src/core/hle/service/bcat/backend/backend.h +++ b/src/core/hle/service/bcat/backend/backend.h @@ -98,7 +98,7 @@ public: private: explicit ProgressServiceBackend(Kernel::KernelCore& kernel, std::string_view event_name); - Kernel::SharedPtr<Kernel::ReadableEvent> GetEvent() const; + std::shared_ptr<Kernel::ReadableEvent> GetEvent() const; DeliveryCacheProgressImpl& GetImpl(); void SignalUpdate() const; diff --git a/src/core/hle/service/bcat/backend/boxcat.cpp b/src/core/hle/service/bcat/backend/boxcat.cpp index 918159e11..67e39a5c4 100644 --- a/src/core/hle/service/bcat/backend/boxcat.cpp +++ b/src/core/hle/service/bcat/backend/boxcat.cpp @@ -114,7 +114,7 @@ void HandleDownloadDisplayResult(const AM::Applets::AppletManager& applet_manage const auto& frontend{applet_manager.GetAppletFrontendSet()}; frontend.error->ShowCustomErrorText( - ResultCode(-1), "There was an error while attempting to use Boxcat.", + RESULT_UNKNOWN, "There was an error while attempting to use Boxcat.", DOWNLOAD_RESULT_LOG_MESSAGES[static_cast<std::size_t>(res)], [] {}); } @@ -255,7 +255,7 @@ private: using Digest = std::array<u8, 0x20>; static Digest DigestFile(std::vector<u8> bytes) { Digest out{}; - mbedtls_sha256(bytes.data(), bytes.size(), out.data(), 0); + mbedtls_sha256_ret(bytes.data(), bytes.size(), out.data(), 0); return out; } diff --git a/src/core/hle/service/bcat/module.cpp b/src/core/hle/service/bcat/module.cpp index 6d9d1527d..7ada67130 100644 --- a/src/core/hle/service/bcat/module.cpp +++ b/src/core/hle/service/bcat/module.cpp @@ -46,7 +46,7 @@ u64 GetCurrentBuildID(const Core::System::CurrentBuildProcessID& id) { BCATDigest DigestFile(const FileSys::VirtualFile& file) { BCATDigest out{}; const auto bytes = file->ReadAllBytes(); - mbedtls_md5(bytes.data(), bytes.size(), out.data()); + mbedtls_md5_ret(bytes.data(), bytes.size(), out.data()); return out; } @@ -87,7 +87,7 @@ struct DeliveryCacheDirectoryEntry { class IDeliveryCacheProgressService final : public ServiceFramework<IDeliveryCacheProgressService> { public: - IDeliveryCacheProgressService(Kernel::SharedPtr<Kernel::ReadableEvent> event, + IDeliveryCacheProgressService(std::shared_ptr<Kernel::ReadableEvent> event, const DeliveryCacheProgressImpl& impl) : ServiceFramework{"IDeliveryCacheProgressService"}, event(std::move(event)), impl(impl) { // clang-format off @@ -118,7 +118,7 @@ private: rb.Push(RESULT_SUCCESS); } - Kernel::SharedPtr<Kernel::ReadableEvent> event; + std::shared_ptr<Kernel::ReadableEvent> event; const DeliveryCacheProgressImpl& impl; }; @@ -137,14 +137,20 @@ public: {10200, nullptr, "CancelSyncDeliveryCacheRequest"}, {20100, nullptr, "RequestSyncDeliveryCacheWithApplicationId"}, {20101, nullptr, "RequestSyncDeliveryCacheWithApplicationIdAndDirectoryName"}, + {20300, nullptr, "GetDeliveryCacheStorageUpdateNotifier"}, + {20301, nullptr, "RequestSuspendDeliveryTask"}, + {20400, nullptr, "RegisterSystemApplicationDeliveryTask"}, + {20401, nullptr, "UnregisterSystemApplicationDeliveryTask"}, {30100, &IBcatService::SetPassphrase, "SetPassphrase"}, {30200, nullptr, "RegisterBackgroundDeliveryTask"}, {30201, nullptr, "UnregisterBackgroundDeliveryTask"}, {30202, nullptr, "BlockDeliveryTask"}, {30203, nullptr, "UnblockDeliveryTask"}, + {30300, nullptr, "RegisterSystemApplicationDeliveryTasks"}, {90100, nullptr, "EnumerateBackgroundDeliveryTask"}, {90200, nullptr, "GetDeliveryList"}, {90201, &IBcatService::ClearDeliveryCacheStorage, "ClearDeliveryCacheStorage"}, + {90202, nullptr, "ClearDeliveryTaskSubscriptionStatus"}, {90300, nullptr, "GetPushNotificationLog"}, }; // clang-format on diff --git a/src/core/hle/service/btdrv/btdrv.cpp b/src/core/hle/service/btdrv/btdrv.cpp index 4574d9572..40a06c9fd 100644 --- a/src/core/hle/service/btdrv/btdrv.cpp +++ b/src/core/hle/service/btdrv/btdrv.cpp @@ -155,6 +155,7 @@ public: {98, nullptr, "SetLeScanParameter"}, {256, nullptr, "GetIsManufacturingMode"}, {257, nullptr, "EmulateBluetoothCrash"}, + {258, nullptr, "GetBleChannelMap"}, }; // clang-format on diff --git a/src/core/hle/service/erpt/erpt.cpp b/src/core/hle/service/erpt/erpt.cpp index d9b32954e..4ec8c3093 100644 --- a/src/core/hle/service/erpt/erpt.cpp +++ b/src/core/hle/service/erpt/erpt.cpp @@ -24,6 +24,8 @@ public: {6, nullptr, "SubmitMultipleCategoryContext"}, {7, nullptr, "UpdateApplicationLaunchTime"}, {8, nullptr, "ClearApplicationLaunchTime"}, + {9, nullptr, "SubmitAttachment"}, + {10, nullptr, "CreateReportWithAttachments"}, }; // clang-format on @@ -38,6 +40,7 @@ public: static const FunctionInfo functions[] = { {0, nullptr, "OpenReport"}, {1, nullptr, "OpenManager"}, + {2, nullptr, "OpenAttachment"}, }; // clang-format on diff --git a/src/core/hle/service/es/es.cpp b/src/core/hle/service/es/es.cpp index f77ddd739..df00ae625 100644 --- a/src/core/hle/service/es/es.cpp +++ b/src/core/hle/service/es/es.cpp @@ -52,6 +52,8 @@ public: {34, nullptr, "GetEncryptedTicketSize"}, {35, nullptr, "GetEncryptedTicketData"}, {36, nullptr, "DeleteAllInactiveELicenseRequiredPersonalizedTicket"}, + {37, nullptr, "OwnTicket2"}, + {38, nullptr, "OwnTicket3"}, {503, nullptr, "GetTitleKey"}, }; // clang-format on diff --git a/src/core/hle/service/filesystem/filesystem.cpp b/src/core/hle/service/filesystem/filesystem.cpp index 11e5c56b7..102017d73 100644 --- a/src/core/hle/service/filesystem/filesystem.cpp +++ b/src/core/hle/service/filesystem/filesystem.cpp @@ -58,11 +58,11 @@ ResultCode VfsDirectoryServiceWrapper::CreateFile(const std::string& path_, u64 auto file = dir->CreateFile(FileUtil::GetFilename(path)); if (file == nullptr) { // TODO(DarkLordZach): Find a better error code for this - return ResultCode(-1); + return RESULT_UNKNOWN; } if (!file->Resize(size)) { // TODO(DarkLordZach): Find a better error code for this - return ResultCode(-1); + return RESULT_UNKNOWN; } return RESULT_SUCCESS; } @@ -80,7 +80,7 @@ ResultCode VfsDirectoryServiceWrapper::DeleteFile(const std::string& path_) cons } if (!dir->DeleteFile(FileUtil::GetFilename(path))) { // TODO(DarkLordZach): Find a better error code for this - return ResultCode(-1); + return RESULT_UNKNOWN; } return RESULT_SUCCESS; @@ -94,7 +94,7 @@ ResultCode VfsDirectoryServiceWrapper::CreateDirectory(const std::string& path_) auto new_dir = dir->CreateSubdirectory(FileUtil::GetFilename(path)); if (new_dir == nullptr) { // TODO(DarkLordZach): Find a better error code for this - return ResultCode(-1); + return RESULT_UNKNOWN; } return RESULT_SUCCESS; } @@ -104,7 +104,7 @@ ResultCode VfsDirectoryServiceWrapper::DeleteDirectory(const std::string& path_) auto dir = GetDirectoryRelativeWrapped(backing, FileUtil::GetParentPath(path)); if (!dir->DeleteSubdirectory(FileUtil::GetFilename(path))) { // TODO(DarkLordZach): Find a better error code for this - return ResultCode(-1); + return RESULT_UNKNOWN; } return RESULT_SUCCESS; } @@ -114,7 +114,7 @@ ResultCode VfsDirectoryServiceWrapper::DeleteDirectoryRecursively(const std::str auto dir = GetDirectoryRelativeWrapped(backing, FileUtil::GetParentPath(path)); if (!dir->DeleteSubdirectoryRecursive(FileUtil::GetFilename(path))) { // TODO(DarkLordZach): Find a better error code for this - return ResultCode(-1); + return RESULT_UNKNOWN; } return RESULT_SUCCESS; } @@ -125,7 +125,7 @@ ResultCode VfsDirectoryServiceWrapper::CleanDirectoryRecursively(const std::stri if (!dir->CleanSubdirectoryRecursive(FileUtil::GetFilename(sanitized_path))) { // TODO(DarkLordZach): Find a better error code for this - return ResultCode(-1); + return RESULT_UNKNOWN; } return RESULT_SUCCESS; @@ -142,7 +142,7 @@ ResultCode VfsDirectoryServiceWrapper::RenameFile(const std::string& src_path_, return FileSys::ERROR_PATH_NOT_FOUND; if (!src->Rename(FileUtil::GetFilename(dest_path))) { // TODO(DarkLordZach): Find a better error code for this - return ResultCode(-1); + return RESULT_UNKNOWN; } return RESULT_SUCCESS; } @@ -160,7 +160,7 @@ ResultCode VfsDirectoryServiceWrapper::RenameFile(const std::string& src_path_, if (!src->GetContainingDirectory()->DeleteFile(FileUtil::GetFilename(src_path))) { // TODO(DarkLordZach): Find a better error code for this - return ResultCode(-1); + return RESULT_UNKNOWN; } return RESULT_SUCCESS; @@ -177,7 +177,7 @@ ResultCode VfsDirectoryServiceWrapper::RenameDirectory(const std::string& src_pa return FileSys::ERROR_PATH_NOT_FOUND; if (!src->Rename(FileUtil::GetFilename(dest_path))) { // TODO(DarkLordZach): Find a better error code for this - return ResultCode(-1); + return RESULT_UNKNOWN; } return RESULT_SUCCESS; } @@ -189,7 +189,7 @@ ResultCode VfsDirectoryServiceWrapper::RenameDirectory(const std::string& src_pa src_path, dest_path); // TODO(DarkLordZach): Find a better error code for this - return ResultCode(-1); + return RESULT_UNKNOWN; } ResultVal<FileSys::VirtualFile> VfsDirectoryServiceWrapper::OpenFile(const std::string& path_, @@ -287,7 +287,7 @@ ResultVal<FileSys::VirtualFile> FileSystemController::OpenRomFSCurrentProcess() if (romfs_factory == nullptr) { // TODO(bunnei): Find a better error code for this - return ResultCode(-1); + return RESULT_UNKNOWN; } return romfs_factory->OpenCurrentProcess(system.CurrentProcess()->GetTitleID()); @@ -300,7 +300,7 @@ ResultVal<FileSys::VirtualFile> FileSystemController::OpenRomFS( if (romfs_factory == nullptr) { // TODO(bunnei): Find a better error code for this - return ResultCode(-1); + return RESULT_UNKNOWN; } return romfs_factory->Open(title_id, storage_id, type); diff --git a/src/core/hle/service/filesystem/fsp_srv.cpp b/src/core/hle/service/filesystem/fsp_srv.cpp index cbd5466c1..55d62fc5e 100644 --- a/src/core/hle/service/filesystem/fsp_srv.cpp +++ b/src/core/hle/service/filesystem/fsp_srv.cpp @@ -256,8 +256,8 @@ public: // TODO(DarkLordZach): Verify that this is the correct behavior. // Build entry index now to save time later. - BuildEntryIndex(entries, backend->GetFiles(), FileSys::File); - BuildEntryIndex(entries, backend->GetSubdirectories(), FileSys::Directory); + BuildEntryIndex(entries, backend->GetFiles(), FileSys::EntryType::File); + BuildEntryIndex(entries, backend->GetSubdirectories(), FileSys::EntryType::Directory); } private: @@ -391,13 +391,10 @@ public: } void RenameFile(Kernel::HLERequestContext& ctx) { - std::vector<u8> buffer; - buffer.resize(ctx.BufferDescriptorX()[0].Size()); - Memory::ReadBlock(ctx.BufferDescriptorX()[0].Address(), buffer.data(), buffer.size()); + std::vector<u8> buffer = ctx.ReadBuffer(0); const std::string src_name = Common::StringFromBuffer(buffer); - buffer.resize(ctx.BufferDescriptorX()[1].Size()); - Memory::ReadBlock(ctx.BufferDescriptorX()[1].Address(), buffer.data(), buffer.size()); + buffer = ctx.ReadBuffer(1); const std::string dst_name = Common::StringFromBuffer(buffer); LOG_DEBUG(Service_FS, "called. file '{}' to file '{}'", src_name, dst_name); @@ -680,6 +677,7 @@ FSP_SRV::FSP_SRV(FileSystemController& fsc, const Core::Reporter& reporter) {33, nullptr, "DeleteCacheStorage"}, {34, nullptr, "GetCacheStorageSize"}, {35, nullptr, "CreateSaveDataFileSystemByHashSalt"}, + {36, nullptr, "OpenHostFileSystemWithOption"}, {51, &FSP_SRV::OpenSaveDataFileSystem, "OpenSaveDataFileSystem"}, {52, nullptr, "OpenSaveDataFileSystemBySystemSaveDataId"}, {53, &FSP_SRV::OpenReadOnlySaveDataFileSystem, "OpenReadOnlySaveDataFileSystem"}, @@ -694,11 +692,14 @@ FSP_SRV::FSP_SRV(FileSystemController& fsc, const Core::Reporter& reporter) {66, nullptr, "WriteSaveDataFileSystemExtraData2"}, {67, nullptr, "FindSaveDataWithFilter"}, {68, nullptr, "OpenSaveDataInfoReaderBySaveDataFilter"}, + {69, nullptr, "ReadSaveDataFileSystemExtraDataBySaveDataAttribute"}, + {70, nullptr, "WriteSaveDataFileSystemExtraDataBySaveDataAttribute"}, {80, nullptr, "OpenSaveDataMetaFile"}, {81, nullptr, "OpenSaveDataTransferManager"}, {82, nullptr, "OpenSaveDataTransferManagerVersion2"}, {83, nullptr, "OpenSaveDataTransferProhibiterForCloudBackUp"}, {84, nullptr, "ListApplicationAccessibleSaveDataOwnerId"}, + {85, nullptr, "OpenSaveDataTransferManagerForSaveDataRepair"}, {100, nullptr, "OpenImageDirectoryFileSystem"}, {110, nullptr, "OpenContentStorageFileSystem"}, {120, nullptr, "OpenCloudBackupWorkStorageFileSystem"}, @@ -756,6 +757,8 @@ FSP_SRV::FSP_SRV(FileSystemController& fsc, const Core::Reporter& reporter) {1009, nullptr, "GetAndClearMemoryReportInfo"}, {1010, nullptr, "SetDataStorageRedirectTarget"}, {1011, &FSP_SRV::GetAccessLogVersionInfo, "GetAccessLogVersionInfo"}, + {1012, nullptr, "GetFsStackUsage"}, + {1013, nullptr, "UnsetSaveDataRootPath"}, {1100, nullptr, "OverrideSaveDataTransferTokenSignVerificationKey"}, {1110, nullptr, "CorruptSaveDataFileSystemBySaveDataSpaceId2"}, {1200, nullptr, "OpenMultiCommitManager"}, @@ -785,7 +788,7 @@ void FSP_SRV::OpenFileSystemWithPatch(Kernel::HLERequestContext& ctx) { static_cast<u8>(type), title_id); IPC::ResponseBuilder rb{ctx, 2, 0, 0}; - rb.Push(ResultCode(-1)); + rb.Push(RESULT_UNKNOWN); } void FSP_SRV::OpenSdCardFileSystem(Kernel::HLERequestContext& ctx) { @@ -891,7 +894,7 @@ void FSP_SRV::OpenDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx) { // TODO (bunnei): Find the right error code to use here LOG_CRITICAL(Service_FS, "no file system interface available!"); IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ResultCode(-1)); + rb.Push(RESULT_UNKNOWN); return; } @@ -928,7 +931,7 @@ void FSP_SRV::OpenDataStorageByDataId(Kernel::HLERequestContext& ctx) { "could not open data storage with title_id={:016X}, storage_id={:02X}", title_id, static_cast<u8>(storage_id)); IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ResultCode(-1)); + rb.Push(RESULT_UNKNOWN); return; } diff --git a/src/core/hle/service/friend/friend.cpp b/src/core/hle/service/friend/friend.cpp index 1a0214f08..219176c31 100644 --- a/src/core/hle/service/friend/friend.cpp +++ b/src/core/hle/service/friend/friend.cpp @@ -60,6 +60,9 @@ public: {20801, nullptr, "SyncUserSetting"}, {20900, nullptr, "RequestListSummaryOverlayNotification"}, {21000, nullptr, "GetExternalApplicationCatalog"}, + {22000, nullptr, "GetReceivedFriendInvitationList"}, + {22001, nullptr, "GetReceivedFriendInvitationDetailedInfo"}, + {22010, nullptr, "GetReceivedFriendInvitationCountCache"}, {30100, nullptr, "DropFriendNewlyFlags"}, {30101, nullptr, "DeleteFriend"}, {30110, nullptr, "DropFriendNewlyFlag"}, @@ -91,6 +94,8 @@ public: {30812, nullptr, "ChangePlayLogPermission"}, {30820, nullptr, "IssueFriendCode"}, {30830, nullptr, "ClearPlayLog"}, + {30900, nullptr, "SendFriendInvitation"}, + {30910, nullptr, "ReadFriendInvitation"}, {49900, nullptr, "DeleteNetworkServiceAccountCache"}, }; // clang-format on diff --git a/src/core/hle/service/hid/controllers/npad.cpp b/src/core/hle/service/hid/controllers/npad.cpp index 79fff517e..4d952adc0 100644 --- a/src/core/hle/service/hid/controllers/npad.cpp +++ b/src/core/hle/service/hid/controllers/npad.cpp @@ -501,8 +501,7 @@ void Controller_NPad::VibrateController(const std::vector<u32>& controller_ids, last_processed_vibration = vibrations.back(); } -Kernel::SharedPtr<Kernel::ReadableEvent> Controller_NPad::GetStyleSetChangedEvent( - u32 npad_id) const { +std::shared_ptr<Kernel::ReadableEvent> Controller_NPad::GetStyleSetChangedEvent(u32 npad_id) const { // TODO(ogniK): Figure out the best time to signal this event. This event seems that it should // be signalled at least once, and signaled after a new controller is connected? const auto& styleset_event = styleset_changed_events[NPadIdToIndex(npad_id)]; diff --git a/src/core/hle/service/hid/controllers/npad.h b/src/core/hle/service/hid/controllers/npad.h index 16c4caa1f..931f03430 100644 --- a/src/core/hle/service/hid/controllers/npad.h +++ b/src/core/hle/service/hid/controllers/npad.h @@ -109,7 +109,7 @@ public: void VibrateController(const std::vector<u32>& controller_ids, const std::vector<Vibration>& vibrations); - Kernel::SharedPtr<Kernel::ReadableEvent> GetStyleSetChangedEvent(u32 npad_id) const; + std::shared_ptr<Kernel::ReadableEvent> GetStyleSetChangedEvent(u32 npad_id) const; Vibration GetLastVibration() const; void AddNewController(NPadControllerType controller); diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp index ecc130f6c..89bf8b815 100644 --- a/src/core/hle/service/hid/hid.cpp +++ b/src/core/hle/service/hid/hid.cpp @@ -77,15 +77,14 @@ IAppletResource::IAppletResource(Core::System& system) GetController<Controller_Stubbed>(HidController::Unknown3).SetCommonHeaderOffset(0x5000); // Register update callbacks - auto& core_timing = system.CoreTiming(); pad_update_event = - core_timing.RegisterEvent("HID::UpdatePadCallback", [this](u64 userdata, s64 cycles_late) { + Core::Timing::CreateEvent("HID::UpdatePadCallback", [this](u64 userdata, s64 cycles_late) { UpdateControllers(userdata, cycles_late); }); // TODO(shinyquagsire23): Other update callbacks? (accel, gyro?) - core_timing.ScheduleEvent(pad_update_ticks, pad_update_event); + system.CoreTiming().ScheduleEvent(pad_update_ticks, pad_update_event); ReloadInputDevices(); } @@ -215,6 +214,8 @@ Hid::Hid(Core::System& system) : ServiceFramework("hid"), system(system) { {132, nullptr, "EnableUnintendedHomeButtonInputProtection"}, {133, nullptr, "SetNpadJoyAssignmentModeSingleWithDestination"}, {134, nullptr, "SetNpadAnalogStickUseCenterClamp"}, + {135, nullptr, "SetNpadCaptureButtonAssignment"}, + {136, nullptr, "ClearNpadCaptureButtonAssignment"}, {200, &Hid::GetVibrationDeviceInfo, "GetVibrationDeviceInfo"}, {201, &Hid::SendVibrationValue, "SendVibrationValue"}, {202, &Hid::GetActualVibrationValue, "GetActualVibrationValue"}, @@ -245,6 +246,8 @@ Hid::Hid(Core::System& system) : ServiceFramework("hid"), system(system) { {404, nullptr, "HasLeftRightBattery"}, {405, nullptr, "GetNpadInterfaceType"}, {406, nullptr, "GetNpadLeftRightInterfaceType"}, + {407, nullptr, "GetNpadOfHighestBatteryLevelForJoyLeft"}, + {408, nullptr, "GetNpadOfHighestBatteryLevelForJoyRight"}, {500, nullptr, "GetPalmaConnectionHandle"}, {501, nullptr, "InitializePalma"}, {502, nullptr, "AcquirePalmaOperationCompleteEvent"}, @@ -272,8 +275,13 @@ Hid::Hid(Core::System& system) : ServiceFramework("hid"), system(system) { {524, nullptr, "PairPalma"}, {525, &Hid::SetPalmaBoostMode, "SetPalmaBoostMode"}, {526, nullptr, "CancelWritePalmaWaveEntry"}, + {527, nullptr, "EnablePalmaBoostMode"}, + {528, nullptr, "GetPalmaBluetoothAddress"}, + {529, nullptr, "SetDisallowedPalmaConnection"}, {1000, nullptr, "SetNpadCommunicationMode"}, {1001, nullptr, "GetNpadCommunicationMode"}, + {1002, nullptr, "SetTouchScreenConfiguration"}, + {1003, nullptr, "IsFirmwareUpdateNeededForNotification"}, }; // clang-format on @@ -969,6 +977,9 @@ public: {310, nullptr, "GetMaskedSupportedNpadStyleSet"}, {311, nullptr, "SetNpadPlayerLedBlinkingDevice"}, {312, nullptr, "SetSupportedNpadStyleSetAll"}, + {313, nullptr, "GetNpadCaptureButtonAssignment"}, + {314, nullptr, "GetAppletFooterUiType"}, + {315, nullptr, "GetAppletDetailedUiType"}, {321, nullptr, "GetUniquePadsFromNpad"}, {322, nullptr, "GetIrSensorState"}, {323, nullptr, "GetXcdHandleForNpadWithIrSensor"}, @@ -984,6 +995,8 @@ public: {513, nullptr, "EndPermitVibrationSession"}, {520, nullptr, "EnableHandheldHids"}, {521, nullptr, "DisableHandheldHids"}, + {522, nullptr, "SetJoyConRailEnabled"}, + {523, nullptr, "IsJoyConRailEnabled"}, {540, nullptr, "AcquirePlayReportControllerUsageUpdateEvent"}, {541, nullptr, "GetPlayReportControllerUsages"}, {542, nullptr, "AcquirePlayReportRegisteredDeviceUpdateEvent"}, @@ -1010,6 +1023,7 @@ public: {809, nullptr, "GetUniquePadSerialNumber"}, {810, nullptr, "GetUniquePadControllerNumber"}, {811, nullptr, "GetSixAxisSensorUserCalibrationStage"}, + {812, nullptr, "GetConsoleUniqueSixAxisSensorHandle"}, {821, nullptr, "StartAnalogStickManualCalibration"}, {822, nullptr, "RetryCurrentAnalogStickManualCalibrationStage"}, {823, nullptr, "CancelAnalogStickManualCalibration"}, @@ -1020,6 +1034,8 @@ public: {828, nullptr, "IsAnalogStickInReleasePosition"}, {829, nullptr, "IsAnalogStickInCircumference"}, {830, nullptr, "SetNotificationLedPattern"}, + {831, nullptr, "SetNotificationLedPatternWithTimeout"}, + {832, nullptr, "PrepareHidsForNotificationWake"}, {850, nullptr, "IsUsbFullKeyControllerEnabled"}, {851, nullptr, "EnableUsbFullKeyController"}, {852, nullptr, "IsUsbConnected"}, @@ -1049,6 +1065,13 @@ public: {1132, nullptr, "CheckUsbFirmwareUpdateRequired"}, {1133, nullptr, "StartUsbFirmwareUpdate"}, {1134, nullptr, "GetUsbFirmwareUpdateState"}, + {1150, nullptr, "SetTouchScreenMagnification"}, + {1151, nullptr, "GetTouchScreenFirmwareVersion"}, + {1152, nullptr, "SetTouchScreenDefaultConfiguration"}, + {1153, nullptr, "GetTouchScreenDefaultConfiguration"}, + {1154, nullptr, "IsFirmwareAvailableForNotification"}, + {1155, nullptr, "SetForceHandheldStyleVibration"}, + {1156, nullptr, "SendConnectionTriggerWithoutTimeoutEvent"}, }; // clang-format on diff --git a/src/core/hle/service/hid/hid.h b/src/core/hle/service/hid/hid.h index f08e036a3..ad20f147c 100644 --- a/src/core/hle/service/hid/hid.h +++ b/src/core/hle/service/hid/hid.h @@ -67,9 +67,9 @@ private: void GetSharedMemoryHandle(Kernel::HLERequestContext& ctx); void UpdateControllers(u64 userdata, s64 cycles_late); - Kernel::SharedPtr<Kernel::SharedMemory> shared_mem; + std::shared_ptr<Kernel::SharedMemory> shared_mem; - Core::Timing::EventType* pad_update_event; + std::shared_ptr<Core::Timing::EventType> pad_update_event; Core::System& system; std::array<std::unique_ptr<ControllerBase>, static_cast<size_t>(HidController::MaxControllers)> diff --git a/src/core/hle/service/hid/irs.h b/src/core/hle/service/hid/irs.h index eb4e898dd..8918ad6ca 100644 --- a/src/core/hle/service/hid/irs.h +++ b/src/core/hle/service/hid/irs.h @@ -37,7 +37,7 @@ private: void RunIrLedProcessor(Kernel::HLERequestContext& ctx); void StopImageProcessorAsync(Kernel::HLERequestContext& ctx); void ActivateIrsensorWithFunctionLevel(Kernel::HLERequestContext& ctx); - Kernel::SharedPtr<Kernel::SharedMemory> shared_mem; + std::shared_ptr<Kernel::SharedMemory> shared_mem; const u32 device_handle{0xABCD}; Core::System& system; }; diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp index 499376bfc..157aeec88 100644 --- a/src/core/hle/service/ldr/ldr.cpp +++ b/src/core/hle/service/ldr/ldr.cpp @@ -140,9 +140,10 @@ public: rb.Push(ERROR_INVALID_SIZE); return; } + // Read NRR data from memory std::vector<u8> nrr_data(nrr_size); - Memory::ReadBlock(nrr_address, nrr_data.data(), nrr_size); + system.Memory().ReadBlock(nrr_address, nrr_data.data(), nrr_size); NRRHeader header; std::memcpy(&header, nrr_data.data(), sizeof(NRRHeader)); @@ -291,10 +292,10 @@ public: // Read NRO data from memory std::vector<u8> nro_data(nro_size); - Memory::ReadBlock(nro_address, nro_data.data(), nro_size); + system.Memory().ReadBlock(nro_address, nro_data.data(), nro_size); SHA256Hash hash{}; - mbedtls_sha256(nro_data.data(), nro_data.size(), hash.data(), 0); + mbedtls_sha256_ret(nro_data.data(), nro_data.size(), hash.data(), 0); // NRO Hash is already loaded if (std::any_of(nro.begin(), nro.end(), [&hash](const std::pair<VAddr, NROInfo>& info) { diff --git a/src/core/hle/service/lm/lm.cpp b/src/core/hle/service/lm/lm.cpp index 435f2d286..346c8f899 100644 --- a/src/core/hle/service/lm/lm.cpp +++ b/src/core/hle/service/lm/lm.cpp @@ -17,7 +17,8 @@ namespace Service::LM { class ILogger final : public ServiceFramework<ILogger> { public: - ILogger(Manager& manager) : ServiceFramework("ILogger"), manager(manager) { + explicit ILogger(Manager& manager_, Memory::Memory& memory_) + : ServiceFramework("ILogger"), manager{manager_}, memory{memory_} { static const FunctionInfo functions[] = { {0, &ILogger::Log, "Log"}, {1, &ILogger::SetDestination, "SetDestination"}, @@ -35,15 +36,15 @@ private: MessageHeader header{}; VAddr addr{ctx.BufferDescriptorX()[0].Address()}; const VAddr end_addr{addr + ctx.BufferDescriptorX()[0].size}; - Memory::ReadBlock(addr, &header, sizeof(MessageHeader)); + memory.ReadBlock(addr, &header, sizeof(MessageHeader)); addr += sizeof(MessageHeader); FieldMap fields; while (addr < end_addr) { - const auto field = static_cast<Field>(Memory::Read8(addr++)); - const auto length = Memory::Read8(addr++); + const auto field = static_cast<Field>(memory.Read8(addr++)); + const auto length = memory.Read8(addr++); - if (static_cast<Field>(Memory::Read8(addr)) == Field::Skip) { + if (static_cast<Field>(memory.Read8(addr)) == Field::Skip) { ++addr; } @@ -54,7 +55,7 @@ private: } std::vector<u8> data(length); - Memory::ReadBlock(addr, data.data(), length); + memory.ReadBlock(addr, data.data(), length); fields.emplace(field, std::move(data)); } @@ -74,11 +75,13 @@ private: } Manager& manager; + Memory::Memory& memory; }; class LM final : public ServiceFramework<LM> { public: - explicit LM(Manager& manager) : ServiceFramework{"lm"}, manager(manager) { + explicit LM(Manager& manager_, Memory::Memory& memory_) + : ServiceFramework{"lm"}, manager{manager_}, memory{memory_} { // clang-format off static const FunctionInfo functions[] = { {0, &LM::OpenLogger, "OpenLogger"}, @@ -94,14 +97,16 @@ private: IPC::ResponseBuilder rb{ctx, 2, 0, 1}; rb.Push(RESULT_SUCCESS); - rb.PushIpcInterface<ILogger>(manager); + rb.PushIpcInterface<ILogger>(manager, memory); } Manager& manager; + Memory::Memory& memory; }; void InstallInterfaces(Core::System& system) { - std::make_shared<LM>(system.GetLogManager())->InstallAsService(system.ServiceManager()); + std::make_shared<LM>(system.GetLogManager(), system.Memory()) + ->InstallAsService(system.ServiceManager()); } } // namespace Service::LM diff --git a/src/core/hle/service/mii/mii.cpp b/src/core/hle/service/mii/mii.cpp index 0b3923ad9..a128edb43 100644 --- a/src/core/hle/service/mii/mii.cpp +++ b/src/core/hle/service/mii/mii.cpp @@ -50,6 +50,8 @@ public: {21, &IDatabaseService::GetIndex, "GetIndex"}, {22, &IDatabaseService::SetInterfaceVersion, "SetInterfaceVersion"}, {23, nullptr, "Convert"}, + {24, nullptr, "ConvertCoreDataToCharInfo"}, + {25, nullptr, "ConvertCharInfoToCoreData"}, }; // clang-format on @@ -242,7 +244,7 @@ private: const auto index = db.IndexOf(uuid); if (index > MAX_MIIS) { // TODO(DarkLordZach): Find a better error code - rb.Push(ResultCode(-1)); + rb.Push(RESULT_UNKNOWN); rb.Push(index); } else { rb.Push(RESULT_SUCCESS); @@ -268,7 +270,7 @@ private: IPC::ResponseBuilder rb{ctx, 2}; // TODO(DarkLordZach): Find a better error code - rb.Push(success ? RESULT_SUCCESS : ResultCode(-1)); + rb.Push(success ? RESULT_SUCCESS : RESULT_UNKNOWN); } void AddOrReplace(Kernel::HLERequestContext& ctx) { @@ -282,7 +284,7 @@ private: IPC::ResponseBuilder rb{ctx, 2}; // TODO(DarkLordZach): Find a better error code - rb.Push(success ? RESULT_SUCCESS : ResultCode(-1)); + rb.Push(success ? RESULT_SUCCESS : RESULT_UNKNOWN); } void Delete(Kernel::HLERequestContext& ctx) { diff --git a/src/core/hle/service/ncm/ncm.cpp b/src/core/hle/service/ncm/ncm.cpp index b405a4b66..89e283ca5 100644 --- a/src/core/hle/service/ncm/ncm.cpp +++ b/src/core/hle/service/ncm/ncm.cpp @@ -61,7 +61,8 @@ public: {5, nullptr, "RegisterHtmlDocumentPath"}, {6, nullptr, "UnregisterHtmlDocumentPath"}, {7, nullptr, "RedirectHtmlDocumentPath"}, - {8, nullptr, ""}, + {8, nullptr, "Refresh"}, + {9, nullptr, "RefreshExcluding"}, }; // clang-format on @@ -77,6 +78,8 @@ public: {0, nullptr, "ResolveAddOnContentPath"}, {1, nullptr, "RegisterAddOnContentStorage"}, {2, nullptr, "UnregisterAllAddOnContentPath"}, + {3, nullptr, "RefreshApplicationAddOnContent"}, + {4, nullptr, "UnregisterApplicationAddOnContent"}, }; // clang-format on @@ -118,6 +121,7 @@ public: {10, nullptr, "InactivateContentStorage"}, {11, nullptr, "ActivateContentMetaDatabase"}, {12, nullptr, "InactivateContentMetaDatabase"}, + {13, nullptr, "InvalidateRightsIdCache"}, }; // clang-format on diff --git a/src/core/hle/service/nfc/nfc.cpp b/src/core/hle/service/nfc/nfc.cpp index ca88bf97f..b7b34ce7e 100644 --- a/src/core/hle/service/nfc/nfc.cpp +++ b/src/core/hle/service/nfc/nfc.cpp @@ -215,6 +215,7 @@ public: {411, nullptr, "AttachActivateEvent"}, {412, nullptr, "AttachDeactivateEvent"}, {500, nullptr, "SetNfcEnabled"}, + {510, nullptr, "OutputTestWave"}, {1000, nullptr, "ReadMifare"}, {1001, nullptr, "WriteMifare"}, {1300, nullptr, "SendCommandByPassThrough"}, diff --git a/src/core/hle/service/nfp/nfp.cpp b/src/core/hle/service/nfp/nfp.cpp index 795d7b716..4b79eb81d 100644 --- a/src/core/hle/service/nfp/nfp.cpp +++ b/src/core/hle/service/nfp/nfp.cpp @@ -16,10 +16,7 @@ #include "core/hle/service/nfp/nfp_user.h" namespace Service::NFP { - namespace ErrCodes { -[[maybe_unused]] constexpr ResultCode ERR_TAG_FAILED(ErrorModule::NFP, - -1); // TODO(ogniK): Find the actual error code constexpr ResultCode ERR_NO_APPLICATION_AREA(ErrorModule::NFP, 152); } // namespace ErrCodes @@ -192,7 +189,7 @@ private: LOG_DEBUG(Service_NFP, "called"); auto nfc_event = nfp_interface.GetNFCEvent(); - if (!nfc_event->ShouldWait(Kernel::GetCurrentThread()) && !has_attached_handle) { + if (!nfc_event->ShouldWait(&ctx.GetThread()) && !has_attached_handle) { device_state = DeviceState::TagFound; nfc_event->Clear(); } @@ -345,7 +342,7 @@ bool Module::Interface::LoadAmiibo(const std::vector<u8>& buffer) { return true; } -const Kernel::SharedPtr<Kernel::ReadableEvent>& Module::Interface::GetNFCEvent() const { +const std::shared_ptr<Kernel::ReadableEvent>& Module::Interface::GetNFCEvent() const { return nfc_tag_load.readable; } diff --git a/src/core/hle/service/nfp/nfp.h b/src/core/hle/service/nfp/nfp.h index 9718ef745..200013795 100644 --- a/src/core/hle/service/nfp/nfp.h +++ b/src/core/hle/service/nfp/nfp.h @@ -34,7 +34,7 @@ public: void CreateUserInterface(Kernel::HLERequestContext& ctx); bool LoadAmiibo(const std::vector<u8>& buffer); - const Kernel::SharedPtr<Kernel::ReadableEvent>& GetNFCEvent() const; + const std::shared_ptr<Kernel::ReadableEvent>& GetNFCEvent() const; const AmiiboFile& GetAmiiboBuffer() const; private: diff --git a/src/core/hle/service/nifm/nifm.cpp b/src/core/hle/service/nifm/nifm.cpp index feb176668..767158444 100644 --- a/src/core/hle/service/nifm/nifm.cpp +++ b/src/core/hle/service/nifm/nifm.cpp @@ -222,6 +222,7 @@ private: IGeneralService::IGeneralService(Core::System& system) : ServiceFramework("IGeneralService"), system(system) { + // clang-format off static const FunctionInfo functions[] = { {1, &IGeneralService::GetClientId, "GetClientId"}, {2, &IGeneralService::CreateScanRequest, "CreateScanRequest"}, @@ -260,7 +261,14 @@ IGeneralService::IGeneralService(Core::System& system) {36, nullptr, "GetCurrentAccessPoint"}, {37, nullptr, "Shutdown"}, {38, nullptr, "GetAllowedChannels"}, + {39, nullptr, "NotifyApplicationSuspended"}, + {40, nullptr, "SetAcceptableNetworkTypeFlag"}, + {41, nullptr, "GetAcceptableNetworkTypeFlag"}, + {42, nullptr, "NotifyConnectionStateChanged"}, + {43, nullptr, "SetWowlDelayedWakeTime"}, }; + // clang-format on + RegisterHandlers(functions); } diff --git a/src/core/hle/service/nim/nim.cpp b/src/core/hle/service/nim/nim.cpp index 7d6cf2070..e85f123e2 100644 --- a/src/core/hle/service/nim/nim.cpp +++ b/src/core/hle/service/nim/nim.cpp @@ -116,6 +116,8 @@ public: {500, nullptr, "RequestSyncTicket"}, {501, nullptr, "RequestDownloadTicket"}, {502, nullptr, "RequestDownloadTicketForPrepurchasedContents"}, + {503, nullptr, "RequestSyncTicket"}, + {504, nullptr, "RequestDownloadTicketForPrepurchasedContents2"}, }; // clang-format on diff --git a/src/core/hle/service/npns/npns.cpp b/src/core/hle/service/npns/npns.cpp index 8751522ca..aa171473b 100644 --- a/src/core/hle/service/npns/npns.cpp +++ b/src/core/hle/service/npns/npns.cpp @@ -44,6 +44,10 @@ public: {113, nullptr, "DestroyJid"}, {114, nullptr, "AttachJid"}, {115, nullptr, "DetachJid"}, + {120, nullptr, "CreateNotificationReceiver"}, + {151, nullptr, "GetStateWithHandover"}, + {152, nullptr, "GetStateChangeEventWithHandover"}, + {153, nullptr, "GetDropEventWithHandover"}, {201, nullptr, "RequestChangeStateForceTimed"}, {202, nullptr, "RequestChangeStateForceAsync"}, }; @@ -74,6 +78,9 @@ public: {104, nullptr, "GetStatistics"}, {111, nullptr, "GetJid"}, {120, nullptr, "CreateNotificationReceiver"}, + {151, nullptr, "GetStateWithHandover"}, + {152, nullptr, "GetStateChangeEventWithHandover"}, + {153, nullptr, "GetDropEventWithHandover"}, }; // clang-format on diff --git a/src/core/hle/service/ns/ns.cpp b/src/core/hle/service/ns/ns.cpp index 15c156ce1..fdab3cf78 100644 --- a/src/core/hle/service/ns/ns.cpp +++ b/src/core/hle/service/ns/ns.cpp @@ -106,6 +106,7 @@ IApplicationManagerInterface::IApplicationManagerInterface() {96, nullptr, "AcquireApplicationLaunchInfo"}, {97, nullptr, "GetMainApplicationProgramIndex2"}, {98, nullptr, "EnableApplicationAllThreadDumpOnCrash"}, + {99, nullptr, "LaunchDevMenu"}, {100, nullptr, "ResetToFactorySettings"}, {101, nullptr, "ResetToFactorySettingsWithoutUserSaveData"}, {102, nullptr, "ResetToFactorySettingsForRefurbishment"}, @@ -130,6 +131,8 @@ IApplicationManagerInterface::IApplicationManagerInterface() {404, nullptr, "InvalidateApplicationControlCache"}, {405, nullptr, "ListApplicationControlCacheEntryInfo"}, {406, nullptr, "GetApplicationControlProperty"}, + {407, nullptr, "ListApplicationTitle"}, + {408, nullptr, "ListApplicationIcon"}, {502, nullptr, "RequestCheckGameCardRegistration"}, {503, nullptr, "RequestGameCardRegistrationGoldPoint"}, {504, nullptr, "RequestRegisterGameCard"}, @@ -138,6 +141,7 @@ IApplicationManagerInterface::IApplicationManagerInterface() {507, nullptr, "EnsureGameCardAccess"}, {508, nullptr, "GetLastGameCardMountFailureResult"}, {509, nullptr, "ListApplicationIdOnGameCard"}, + {510, nullptr, "GetGameCardPlatformRegion"}, {600, nullptr, "CountApplicationContentMeta"}, {601, nullptr, "ListApplicationContentMetaStatus"}, {602, nullptr, "ListAvailableAddOnContent"}, @@ -168,6 +172,9 @@ IApplicationManagerInterface::IApplicationManagerInterface() {910, nullptr, "HasApplicationRecord"}, {911, nullptr, "SetPreInstalledApplication"}, {912, nullptr, "ClearPreInstalledApplicationFlag"}, + {913, nullptr, "ListAllApplicationRecord"}, + {914, nullptr, "HideApplicationRecord"}, + {915, nullptr, "ShowApplicationRecord"}, {1000, nullptr, "RequestVerifyApplicationDeprecated"}, {1001, nullptr, "CorruptApplicationForDebug"}, {1002, nullptr, "RequestVerifyAddOnContentsRights"}, @@ -190,12 +197,14 @@ IApplicationManagerInterface::IApplicationManagerInterface() {1502, nullptr, "GetLastSdCardFormatUnexpectedResult"}, {1504, nullptr, "InsertSdCard"}, {1505, nullptr, "RemoveSdCard"}, + {1506, nullptr, "GetSdCardStartupStatus"}, {1600, nullptr, "GetSystemSeedForPseudoDeviceId"}, {1601, nullptr, "ResetSystemSeedForPseudoDeviceId"}, {1700, nullptr, "ListApplicationDownloadingContentMeta"}, {1701, nullptr, "GetApplicationView"}, {1702, nullptr, "GetApplicationDownloadTaskStatus"}, {1703, nullptr, "GetApplicationViewDownloadErrorContext"}, + {1704, nullptr, "GetApplicationViewWithPromotionInfo"}, {1800, nullptr, "IsNotificationSetupCompleted"}, {1801, nullptr, "GetLastNotificationInfoCount"}, {1802, nullptr, "ListLastNotificationInfo"}, @@ -223,6 +232,7 @@ IApplicationManagerInterface::IApplicationManagerInterface() {2017, nullptr, "CreateDownloadTask"}, {2018, nullptr, "GetApplicationDeliveryInfoHash"}, {2050, nullptr, "GetApplicationRightsOnClient"}, + {2051, nullptr, "InvalidateRightsIdCache"}, {2100, nullptr, "GetApplicationTerminateResult"}, {2101, nullptr, "GetRawApplicationTerminateResult"}, {2150, nullptr, "CreateRightsEnvironment"}, @@ -230,6 +240,8 @@ IApplicationManagerInterface::IApplicationManagerInterface() {2152, nullptr, "ActivateRightsEnvironment"}, {2153, nullptr, "DeactivateRightsEnvironment"}, {2154, nullptr, "ForceActivateRightsContextForExit"}, + {2155, nullptr, "UpdateRightsEnvironmentStatus"}, + {2156, nullptr, "CreateRightsEnvironmentForPreomia"}, {2160, nullptr, "AddTargetApplicationToRightsEnvironment"}, {2161, nullptr, "SetUsersToRightsEnvironment"}, {2170, nullptr, "GetRightsEnvironmentStatus"}, @@ -243,6 +255,20 @@ IApplicationManagerInterface::IApplicationManagerInterface() {2201, nullptr, "GetInstalledApplicationCopyIdentifier"}, {2250, nullptr, "RequestReportActiveELicence"}, {2300, nullptr, "ListEventLog"}, + {2350, nullptr, "PerformAutoUpdateByApplicationId"}, + {2351, nullptr, "RequestNoDownloadRightsErrorResolution"}, + {2352, nullptr, "RequestResolveNoDownloadRightsError"}, + {2400, nullptr, "GetPromotionInfo"}, + {2401, nullptr, "CountPromotionInfo"}, + {2402, nullptr, "ListPromotionInfo"}, + {2403, nullptr, "ImportPromotionJsonForDebug"}, + {2404, nullptr, "ClearPromotionInfoForDebug"}, + {2500, nullptr, "ConfirmAvailableTime"}, + {2510, nullptr, "CreateApplicationResource"}, + {2511, nullptr, "GetApplicationResource"}, + {2513, nullptr, "LaunchPreomia"}, + {2514, nullptr, "ClearTaskOfAsyncTaskManager"}, + {2800, nullptr, "GetApplicationIdOfPreomia"}, }; // clang-format on @@ -271,7 +297,7 @@ void IApplicationManagerInterface::GetApplicationControlData(Kernel::HLERequestC "output buffer is too small! (actual={:016X}, expected_min=0x4000)", size); IPC::ResponseBuilder rb{ctx, 2}; // TODO(DarkLordZach): Find a better error code for this. - rb.Push(ResultCode(-1)); + rb.Push(RESULT_UNKNOWN); return; } @@ -291,7 +317,7 @@ void IApplicationManagerInterface::GetApplicationControlData(Kernel::HLERequestC 0x4000 + control.second->GetSize()); IPC::ResponseBuilder rb{ctx, 2}; // TODO(DarkLordZach): Find a better error code for this. - rb.Push(ResultCode(-1)); + rb.Push(RESULT_UNKNOWN); return; } @@ -463,6 +489,7 @@ IECommerceInterface::IECommerceInterface() : ServiceFramework{"IECommerceInterfa {3, nullptr, "RequestSyncRights"}, {4, nullptr, "RequestUnlinkDevice"}, {5, nullptr, "RequestRevokeAllELicense"}, + {6, nullptr, "RequestSyncRightsBasedOnAssignedELicenses"}, }; // clang-format on diff --git a/src/core/hle/service/ns/pl_u.cpp b/src/core/hle/service/ns/pl_u.cpp index 23477315f..8da4e52c5 100644 --- a/src/core/hle/service/ns/pl_u.cpp +++ b/src/core/hle/service/ns/pl_u.cpp @@ -97,7 +97,7 @@ void EncryptSharedFont(const std::vector<u32>& input, std::vector<u8>& output, const auto key = Common::swap32(EXPECTED_RESULT ^ EXPECTED_MAGIC); std::vector<u32> transformed_font(input.size() + 2); transformed_font[0] = Common::swap32(EXPECTED_MAGIC); - transformed_font[1] = Common::swap32(input.size() * sizeof(u32)) ^ key; + transformed_font[1] = Common::swap32(static_cast<u32>(input.size() * sizeof(u32))) ^ key; std::transform(input.begin(), input.end(), transformed_font.begin() + 2, [key](u32 in) { return in ^ key; }); std::memcpy(output.data() + offset, transformed_font.data(), @@ -141,7 +141,7 @@ struct PL_U::Impl { } /// Handle to shared memory region designated for a shared font - Kernel::SharedPtr<Kernel::SharedMemory> shared_font_mem; + std::shared_ptr<Kernel::SharedMemory> shared_font_mem; /// Backing memory for the shared font data std::shared_ptr<Kernel::PhysicalMemory> shared_font; @@ -152,7 +152,7 @@ struct PL_U::Impl { PL_U::PL_U(Core::System& system) : ServiceFramework("pl:u"), impl{std::make_unique<Impl>()}, system(system) { - + // clang-format off static const FunctionInfo functions[] = { {0, &PL_U::RequestLoad, "RequestLoad"}, {1, &PL_U::GetLoadState, "GetLoadState"}, @@ -160,7 +160,13 @@ PL_U::PL_U(Core::System& system) {3, &PL_U::GetSharedMemoryAddressOffset, "GetSharedMemoryAddressOffset"}, {4, &PL_U::GetSharedMemoryNativeHandle, "GetSharedMemoryNativeHandle"}, {5, &PL_U::GetSharedFontInOrderOfPriority, "GetSharedFontInOrderOfPriority"}, + {6, nullptr, "GetSharedFontInOrderOfPriorityForSystem"}, + {100, nullptr, "RequestApplicationFunctionAuthorization"}, + {101, nullptr, "RequestApplicationFunctionAuthorizationForSystem"}, + {1000, nullptr, "LoadNgWordDataForPlatformRegionChina"}, + {1001, nullptr, "GetNgWordDataSizeForPlatformRegionChina"}, }; + // clang-format on RegisterHandlers(functions); auto& fsc = system.GetFileSystemController(); diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index 07c88465e..195421cc0 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp @@ -104,10 +104,12 @@ u32 nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& output) ASSERT(object->status == nvmap::Object::Status::Allocated); - u64 size = static_cast<u64>(entry.pages) << 0x10; + const u64 size = static_cast<u64>(entry.pages) << 0x10; ASSERT(size <= object->size); + const u64 map_offset = static_cast<u64>(entry.map_offset) << 0x10; - GPUVAddr returned = gpu.MemoryManager().MapBufferEx(object->addr, offset, size); + const GPUVAddr returned = + gpu.MemoryManager().MapBufferEx(object->addr + map_offset, offset, size); ASSERT(returned == offset); } std::memcpy(output.data(), entries.data(), output.size()); diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h index 169fb8f0e..f79fcc065 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h @@ -62,7 +62,7 @@ private: u16_le flags; u16_le kind; u32_le nvmap_handle; - INSERT_PADDING_WORDS(1); + u32_le map_offset; u32_le offset; u32_le pages; }; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp index 9de0ace22..6d8bca8bb 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp @@ -191,8 +191,8 @@ u32 nvhost_gpu::KickoffPB(const std::vector<u8>& input, std::vector<u8>& output, std::memcpy(entries.data(), input2.data(), params.num_entries * sizeof(Tegra::CommandListHeader)); } else { - Memory::ReadBlock(params.address, entries.data(), - params.num_entries * sizeof(Tegra::CommandListHeader)); + system.Memory().ReadBlock(params.address, entries.data(), + params.num_entries * sizeof(Tegra::CommandListHeader)); } UNIMPLEMENTED_IF(params.flags.add_wait.Value() != 0); UNIMPLEMENTED_IF(params.flags.add_increment.Value() != 0); diff --git a/src/core/hle/service/nvdrv/interface.cpp b/src/core/hle/service/nvdrv/interface.cpp index 68d139cfb..c8ea6c661 100644 --- a/src/core/hle/service/nvdrv/interface.cpp +++ b/src/core/hle/service/nvdrv/interface.cpp @@ -61,7 +61,7 @@ void NVDRV::IoctlBase(Kernel::HLERequestContext& ctx, IoctlVersion version) { if (ctrl.must_delay) { ctrl.fresh_call = false; ctx.SleepClientThread("NVServices::DelayedResponse", ctrl.timeout, - [=](Kernel::SharedPtr<Kernel::Thread> thread, + [=](std::shared_ptr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx, Kernel::ThreadWakeupReason reason) { IoctlCtrl ctrl2{ctrl}; diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp index cc9cd3fd1..197c77db0 100644 --- a/src/core/hle/service/nvdrv/nvdrv.cpp +++ b/src/core/hle/service/nvdrv/nvdrv.cpp @@ -100,11 +100,11 @@ void Module::SignalSyncpt(const u32 syncpoint_id, const u32 value) { } } -Kernel::SharedPtr<Kernel::ReadableEvent> Module::GetEvent(const u32 event_id) const { +std::shared_ptr<Kernel::ReadableEvent> Module::GetEvent(const u32 event_id) const { return events_interface.events[event_id].readable; } -Kernel::SharedPtr<Kernel::WritableEvent> Module::GetEventWriteable(const u32 event_id) const { +std::shared_ptr<Kernel::WritableEvent> Module::GetEventWriteable(const u32 event_id) const { return events_interface.events[event_id].writable; } diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h index f8bb28969..d7a1bef91 100644 --- a/src/core/hle/service/nvdrv/nvdrv.h +++ b/src/core/hle/service/nvdrv/nvdrv.h @@ -114,9 +114,9 @@ public: void SignalSyncpt(const u32 syncpoint_id, const u32 value); - Kernel::SharedPtr<Kernel::ReadableEvent> GetEvent(u32 event_id) const; + std::shared_ptr<Kernel::ReadableEvent> GetEvent(u32 event_id) const; - Kernel::SharedPtr<Kernel::WritableEvent> GetEventWriteable(u32 event_id) const; + std::shared_ptr<Kernel::WritableEvent> GetEventWriteable(u32 event_id) const; private: /// Id to use for the next open file descriptor. diff --git a/src/core/hle/service/nvflinger/buffer_queue.cpp b/src/core/hle/service/nvflinger/buffer_queue.cpp index 1af11e80c..32b6f4b27 100644 --- a/src/core/hle/service/nvflinger/buffer_queue.cpp +++ b/src/core/hle/service/nvflinger/buffer_queue.cpp @@ -117,11 +117,11 @@ u32 BufferQueue::Query(QueryType type) { return 0; } -Kernel::SharedPtr<Kernel::WritableEvent> BufferQueue::GetWritableBufferWaitEvent() const { +std::shared_ptr<Kernel::WritableEvent> BufferQueue::GetWritableBufferWaitEvent() const { return buffer_wait_event.writable; } -Kernel::SharedPtr<Kernel::ReadableEvent> BufferQueue::GetBufferWaitEvent() const { +std::shared_ptr<Kernel::ReadableEvent> BufferQueue::GetBufferWaitEvent() const { return buffer_wait_event.readable; } diff --git a/src/core/hle/service/nvflinger/buffer_queue.h b/src/core/hle/service/nvflinger/buffer_queue.h index 8f9b18547..f4bbfd945 100644 --- a/src/core/hle/service/nvflinger/buffer_queue.h +++ b/src/core/hle/service/nvflinger/buffer_queue.h @@ -93,9 +93,9 @@ public: return id; } - Kernel::SharedPtr<Kernel::WritableEvent> GetWritableBufferWaitEvent() const; + std::shared_ptr<Kernel::WritableEvent> GetWritableBufferWaitEvent() const; - Kernel::SharedPtr<Kernel::ReadableEvent> GetBufferWaitEvent() const; + std::shared_ptr<Kernel::ReadableEvent> GetBufferWaitEvent() const; private: u32 id; diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp index cc9522aad..52623cf89 100644 --- a/src/core/hle/service/nvflinger/nvflinger.cpp +++ b/src/core/hle/service/nvflinger/nvflinger.cpp @@ -37,8 +37,8 @@ NVFlinger::NVFlinger(Core::System& system) : system(system) { displays.emplace_back(4, "Null", system); // Schedule the screen composition events - composition_event = system.CoreTiming().RegisterEvent( - "ScreenComposition", [this](u64 userdata, s64 cycles_late) { + composition_event = + Core::Timing::CreateEvent("ScreenComposition", [this](u64 userdata, s64 cycles_late) { Compose(); const auto ticks = Settings::values.force_30fps_mode ? frame_ticks_30fps : GetNextTicks(); @@ -98,7 +98,7 @@ std::optional<u32> NVFlinger::FindBufferQueueId(u64 display_id, u64 layer_id) co return layer->GetBufferQueue().GetId(); } -Kernel::SharedPtr<Kernel::ReadableEvent> NVFlinger::FindVsyncEvent(u64 display_id) const { +std::shared_ptr<Kernel::ReadableEvent> NVFlinger::FindVsyncEvent(u64 display_id) const { auto* const display = FindDisplay(display_id); if (display == nullptr) { diff --git a/src/core/hle/service/nvflinger/nvflinger.h b/src/core/hle/service/nvflinger/nvflinger.h index 5d7e3bfb8..e3cc14bdc 100644 --- a/src/core/hle/service/nvflinger/nvflinger.h +++ b/src/core/hle/service/nvflinger/nvflinger.h @@ -62,7 +62,7 @@ public: /// Gets the vsync event for the specified display. /// /// If an invalid display ID is provided, then nullptr is returned. - Kernel::SharedPtr<Kernel::ReadableEvent> FindVsyncEvent(u64 display_id) const; + std::shared_ptr<Kernel::ReadableEvent> FindVsyncEvent(u64 display_id) const; /// Obtains a buffer queue identified by the ID. BufferQueue& FindBufferQueue(u32 id); @@ -103,7 +103,7 @@ private: u32 swap_interval = 1; /// Event that handles screen composition. - Core::Timing::EventType* composition_event; + std::shared_ptr<Core::Timing::EventType> composition_event; Core::System& system; }; diff --git a/src/core/hle/service/pm/pm.cpp b/src/core/hle/service/pm/pm.cpp index fe6b5f798..809eca0ab 100644 --- a/src/core/hle/service/pm/pm.cpp +++ b/src/core/hle/service/pm/pm.cpp @@ -16,9 +16,9 @@ constexpr ResultCode ERROR_PROCESS_NOT_FOUND{ErrorModule::PM, 1}; constexpr u64 NO_PROCESS_FOUND_PID{0}; -std::optional<Kernel::SharedPtr<Kernel::Process>> SearchProcessList( - const std::vector<Kernel::SharedPtr<Kernel::Process>>& process_list, - std::function<bool(const Kernel::SharedPtr<Kernel::Process>&)> predicate) { +std::optional<std::shared_ptr<Kernel::Process>> SearchProcessList( + const std::vector<std::shared_ptr<Kernel::Process>>& process_list, + std::function<bool(const std::shared_ptr<Kernel::Process>&)> predicate) { const auto iter = std::find_if(process_list.begin(), process_list.end(), predicate); if (iter == process_list.end()) { @@ -29,7 +29,7 @@ std::optional<Kernel::SharedPtr<Kernel::Process>> SearchProcessList( } void GetApplicationPidGeneric(Kernel::HLERequestContext& ctx, - const std::vector<Kernel::SharedPtr<Kernel::Process>>& process_list) { + const std::vector<std::shared_ptr<Kernel::Process>>& process_list) { const auto process = SearchProcessList(process_list, [](const auto& process) { return process->GetProcessID() == Kernel::Process::ProcessIDMin; }); @@ -124,7 +124,7 @@ private: class Info final : public ServiceFramework<Info> { public: - explicit Info(const std::vector<Kernel::SharedPtr<Kernel::Process>>& process_list) + explicit Info(const std::vector<std::shared_ptr<Kernel::Process>>& process_list) : ServiceFramework{"pm:info"}, process_list(process_list) { static const FunctionInfo functions[] = { {0, &Info::GetTitleId, "GetTitleId"}, @@ -154,7 +154,7 @@ private: rb.Push((*process)->GetTitleID()); } - const std::vector<Kernel::SharedPtr<Kernel::Process>>& process_list; + const std::vector<std::shared_ptr<Kernel::Process>>& process_list; }; class Shell final : public ServiceFramework<Shell> { @@ -172,7 +172,7 @@ public: {6, &Shell::GetApplicationPid, "GetApplicationPid"}, {7, nullptr, "BoostSystemMemoryResourceLimit"}, {8, nullptr, "EnableAdditionalSystemThreads"}, - {9, nullptr, "GetUnimplementedEventHandle"}, + {9, nullptr, "GetBootFinishedEventHandle"}, }; // clang-format on diff --git a/src/core/hle/service/prepo/prepo.cpp b/src/core/hle/service/prepo/prepo.cpp index 18d895263..5eb26caf8 100644 --- a/src/core/hle/service/prepo/prepo.cpp +++ b/src/core/hle/service/prepo/prepo.cpp @@ -25,6 +25,7 @@ public: {10103, &PlayReport::SaveReportWithUser<Core::Reporter::PlayReportType::New>, "SaveReportWithUser"}, {10200, nullptr, "RequestImmediateTransmission"}, {10300, nullptr, "GetTransmissionStatus"}, + {10400, nullptr, "GetSystemSessionId"}, {20100, &PlayReport::SaveSystemReport, "SaveSystemReport"}, {20101, &PlayReport::SaveSystemReportWithUser, "SaveSystemReportWithUser"}, {20200, nullptr, "SetOperationMode"}, diff --git a/src/core/hle/service/service.cpp b/src/core/hle/service/service.cpp index 7c5302017..fa5347af9 100644 --- a/src/core/hle/service/service.cpp +++ b/src/core/hle/service/service.cpp @@ -116,7 +116,7 @@ void ServiceFrameworkBase::InstallAsNamedPort() { port_installed = true; } -Kernel::SharedPtr<Kernel::ClientPort> ServiceFrameworkBase::CreatePort() { +std::shared_ptr<Kernel::ClientPort> ServiceFrameworkBase::CreatePort() { ASSERT(!port_installed); auto& kernel = Core::System::GetInstance().Kernel(); @@ -186,7 +186,7 @@ ResultCode ServiceFrameworkBase::HandleSyncRequest(Kernel::HLERequestContext& co UNIMPLEMENTED_MSG("command_type={}", static_cast<int>(context.GetCommandType())); } - context.WriteToOutgoingCommandBuffer(*Kernel::GetCurrentThread()); + context.WriteToOutgoingCommandBuffer(context.GetThread()); return RESULT_SUCCESS; } @@ -201,7 +201,7 @@ void Init(std::shared_ptr<SM::ServiceManager>& sm, Core::System& system) { auto nv_flinger = std::make_shared<NVFlinger::NVFlinger>(system); system.GetFileSystemController().CreateFactories(*system.GetFilesystem(), false); - SM::ServiceManager::InstallInterfaces(sm); + SM::ServiceManager::InstallInterfaces(sm, system.Kernel()); Account::InstallInterfaces(system); AM::InstallInterfaces(*sm, nv_flinger, system); diff --git a/src/core/hle/service/service.h b/src/core/hle/service/service.h index aef964861..022d885b6 100644 --- a/src/core/hle/service/service.h +++ b/src/core/hle/service/service.h @@ -65,7 +65,7 @@ public: /// Creates a port pair and registers it on the kernel's global port registry. void InstallAsNamedPort(); /// Creates and returns an unregistered port for the service. - Kernel::SharedPtr<Kernel::ClientPort> CreatePort(); + std::shared_ptr<Kernel::ClientPort> CreatePort(); void InvokeRequest(Kernel::HLERequestContext& ctx); diff --git a/src/core/hle/service/set/set.cpp b/src/core/hle/service/set/set.cpp index b54214421..5bcc0b588 100644 --- a/src/core/hle/service/set/set.cpp +++ b/src/core/hle/service/set/set.cpp @@ -124,6 +124,7 @@ SET::SET() : ServiceFramework("set") { {7, nullptr, "GetKeyCodeMap"}, {8, &SET::GetQuestFlag, "GetQuestFlag"}, {9, nullptr, "GetKeyCodeMap2"}, + {10, nullptr, "GetFirmwareVersionForDebug"}, }; // clang-format on diff --git a/src/core/hle/service/set/set_cal.cpp b/src/core/hle/service/set/set_cal.cpp index 5981c575c..1398a4a48 100644 --- a/src/core/hle/service/set/set_cal.cpp +++ b/src/core/hle/service/set/set_cal.cpp @@ -7,6 +7,7 @@ namespace Service::Set { SET_CAL::SET_CAL() : ServiceFramework("set:cal") { + // clang-format off static const FunctionInfo functions[] = { {0, nullptr, "GetBluetoothBdAddress"}, {1, nullptr, "GetConfigurationId1"}, @@ -40,8 +41,18 @@ SET_CAL::SET_CAL() : ServiceFramework("set:cal") { {30, nullptr, "GetAmiiboEcqvBlsCertificate"}, {31, nullptr, "GetAmiiboEcqvBlsRootCertificate"}, {32, nullptr, "GetUsbTypeCPowerSourceCircuitVersion"}, + {33, nullptr, "GetAnalogStickModuleTypeL"}, + {34, nullptr, "GetAnalogStickModelParameterL"}, + {35, nullptr, "GetAnalogStickFactoryCalibrationL"}, + {36, nullptr, "GetAnalogStickModuleTypeR"}, + {37, nullptr, "GetAnalogStickModelParameterR"}, + {38, nullptr, "GetAnalogStickFactoryCalibrationR"}, + {39, nullptr, "GetConsoleSixAxisSensorModuleType"}, + {40, nullptr, "GetConsoleSixAxisSensorHorizontalOffset"}, {41, nullptr, "GetBatteryVersion"}, }; + // clang-format on + RegisterHandlers(functions); } diff --git a/src/core/hle/service/set/set_fd.cpp b/src/core/hle/service/set/set_fd.cpp index cac6af86d..565882a31 100644 --- a/src/core/hle/service/set/set_fd.cpp +++ b/src/core/hle/service/set/set_fd.cpp @@ -7,6 +7,7 @@ namespace Service::Set { SET_FD::SET_FD() : ServiceFramework("set:fd") { + // clang-format off static const FunctionInfo functions[] = { {2, nullptr, "SetSettingsItemValue"}, {3, nullptr, "ResetSettingsItemValue"}, @@ -16,7 +17,10 @@ SET_FD::SET_FD() : ServiceFramework("set:fd") { {20, nullptr, "SetWebInspectorFlag"}, {21, nullptr, "SetAllowedSslHosts"}, {22, nullptr, "SetHostFsMountPoint"}, + {23, nullptr, "SetMemoryUsageRateFlag"}, }; + // clang-format on + RegisterHandlers(functions); } diff --git a/src/core/hle/service/set/set_sys.cpp b/src/core/hle/service/set/set_sys.cpp index 98d0cfdfd..b7c9ea74b 100644 --- a/src/core/hle/service/set/set_sys.cpp +++ b/src/core/hle/service/set/set_sys.cpp @@ -273,10 +273,21 @@ SET_SYS::SET_SYS() : ServiceFramework("set:sys") { {171, nullptr, "SetChineseTraditionalInputMethod"}, {172, nullptr, "GetPtmCycleCountReliability"}, {173, nullptr, "SetPtmCycleCountReliability"}, + {174, nullptr, "GetHomeMenuScheme"}, {175, nullptr, "GetThemeSettings"}, {176, nullptr, "SetThemeSettings"}, {177, nullptr, "GetThemeKey"}, {178, nullptr, "SetThemeKey"}, + {179, nullptr, "GetZoomFlag"}, + {180, nullptr, "SetZoomFlag"}, + {181, nullptr, "GetT"}, + {182, nullptr, "SetT"}, + {183, nullptr, "GetPlatformRegion"}, + {184, nullptr, "SetPlatformRegion"}, + {185, nullptr, "GetHomeMenuSchemeModel"}, + {186, nullptr, "GetMemoryUsageRateFlag"}, + {187, nullptr, "GetTouchScreenMode"}, + {188, nullptr, "SetTouchScreenMode"}, }; // clang-format on diff --git a/src/core/hle/service/sm/controller.cpp b/src/core/hle/service/sm/controller.cpp index e9ee73710..c45b285f8 100644 --- a/src/core/hle/service/sm/controller.cpp +++ b/src/core/hle/service/sm/controller.cpp @@ -30,10 +30,7 @@ void Controller::DuplicateSession(Kernel::HLERequestContext& ctx) { IPC::ResponseBuilder rb{ctx, 2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles}; rb.Push(RESULT_SUCCESS); - Kernel::SharedPtr<Kernel::ClientSession> session{ctx.Session()->GetParent()->client}; - rb.PushMoveObjects(session); - - LOG_DEBUG(Service, "session={}", session->GetObjectId()); + rb.PushMoveObjects(ctx.Session()->GetParent()->Client()); } void Controller::DuplicateSessionEx(Kernel::HLERequestContext& ctx) { diff --git a/src/core/hle/service/sm/sm.cpp b/src/core/hle/service/sm/sm.cpp index 142929124..88909504d 100644 --- a/src/core/hle/service/sm/sm.cpp +++ b/src/core/hle/service/sm/sm.cpp @@ -36,16 +36,17 @@ static ResultCode ValidateServiceName(const std::string& name) { return RESULT_SUCCESS; } -void ServiceManager::InstallInterfaces(std::shared_ptr<ServiceManager> self) { +void ServiceManager::InstallInterfaces(std::shared_ptr<ServiceManager> self, + Kernel::KernelCore& kernel) { ASSERT(self->sm_interface.expired()); - auto sm = std::make_shared<SM>(self); + auto sm = std::make_shared<SM>(self, kernel); sm->InstallAsNamedPort(); self->sm_interface = sm; self->controller_interface = std::make_unique<Controller>(); } -ResultVal<Kernel::SharedPtr<Kernel::ServerPort>> ServiceManager::RegisterService( +ResultVal<std::shared_ptr<Kernel::ServerPort>> ServiceManager::RegisterService( std::string name, unsigned int max_sessions) { CASCADE_CODE(ValidateServiceName(name)); @@ -72,7 +73,7 @@ ResultCode ServiceManager::UnregisterService(const std::string& name) { return RESULT_SUCCESS; } -ResultVal<Kernel::SharedPtr<Kernel::ClientPort>> ServiceManager::GetServicePort( +ResultVal<std::shared_ptr<Kernel::ClientPort>> ServiceManager::GetServicePort( const std::string& name) { CASCADE_CODE(ValidateServiceName(name)); @@ -84,7 +85,7 @@ ResultVal<Kernel::SharedPtr<Kernel::ClientPort>> ServiceManager::GetServicePort( return MakeResult(it->second); } -ResultVal<Kernel::SharedPtr<Kernel::ClientSession>> ServiceManager::ConnectToService( +ResultVal<std::shared_ptr<Kernel::ClientSession>> ServiceManager::ConnectToService( const std::string& name) { CASCADE_RESULT(auto client_port, GetServicePort(name)); @@ -114,8 +115,6 @@ void SM::GetService(Kernel::HLERequestContext& ctx) { std::string name(name_buf.begin(), end); - // TODO(yuriks): Permission checks go here - auto client_port = service_manager->GetServicePort(name); if (client_port.Failed()) { IPC::ResponseBuilder rb{ctx, 2}; @@ -127,14 +126,22 @@ void SM::GetService(Kernel::HLERequestContext& ctx) { return; } - auto session = client_port.Unwrap()->Connect(); - ASSERT(session.Succeeded()); - if (session.Succeeded()) { - LOG_DEBUG(Service_SM, "called service={} -> session={}", name, (*session)->GetObjectId()); - IPC::ResponseBuilder rb{ctx, 2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles}; - rb.Push(session.Code()); - rb.PushMoveObjects(std::move(session).Unwrap()); + auto [client, server] = Kernel::Session::Create(kernel, name); + + const auto& server_port = client_port.Unwrap()->GetServerPort(); + if (server_port->GetHLEHandler()) { + server_port->GetHLEHandler()->ClientConnected(server); + } else { + server_port->AppendPendingSession(server); } + + // Wake the threads waiting on the ServerPort + server_port->WakeupAllWaitingThreads(); + + LOG_DEBUG(Service_SM, "called service={} -> session={}", name, client->GetObjectId()); + IPC::ResponseBuilder rb{ctx, 2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles}; + rb.Push(RESULT_SUCCESS); + rb.PushMoveObjects(std::move(client)); } void SM::RegisterService(Kernel::HLERequestContext& ctx) { @@ -178,8 +185,8 @@ void SM::UnregisterService(Kernel::HLERequestContext& ctx) { rb.Push(service_manager->UnregisterService(name)); } -SM::SM(std::shared_ptr<ServiceManager> service_manager) - : ServiceFramework("sm:", 4), service_manager(std::move(service_manager)) { +SM::SM(std::shared_ptr<ServiceManager> service_manager, Kernel::KernelCore& kernel) + : ServiceFramework{"sm:", 4}, service_manager{std::move(service_manager)}, kernel{kernel} { static const FunctionInfo functions[] = { {0x00000000, &SM::Initialize, "Initialize"}, {0x00000001, &SM::GetService, "GetService"}, diff --git a/src/core/hle/service/sm/sm.h b/src/core/hle/service/sm/sm.h index b9d6381b4..b06d2f103 100644 --- a/src/core/hle/service/sm/sm.h +++ b/src/core/hle/service/sm/sm.h @@ -18,6 +18,7 @@ namespace Kernel { class ClientPort; class ClientSession; +class KernelCore; class ServerPort; class SessionRequestHandler; } // namespace Kernel @@ -29,7 +30,7 @@ class Controller; /// Interface to "sm:" service class SM final : public ServiceFramework<SM> { public: - explicit SM(std::shared_ptr<ServiceManager> service_manager); + explicit SM(std::shared_ptr<ServiceManager> service_manager, Kernel::KernelCore& kernel); ~SM() override; private: @@ -39,20 +40,21 @@ private: void UnregisterService(Kernel::HLERequestContext& ctx); std::shared_ptr<ServiceManager> service_manager; + Kernel::KernelCore& kernel; }; class ServiceManager { public: - static void InstallInterfaces(std::shared_ptr<ServiceManager> self); + static void InstallInterfaces(std::shared_ptr<ServiceManager> self, Kernel::KernelCore& kernel); ServiceManager(); ~ServiceManager(); - ResultVal<Kernel::SharedPtr<Kernel::ServerPort>> RegisterService(std::string name, - unsigned int max_sessions); + ResultVal<std::shared_ptr<Kernel::ServerPort>> RegisterService(std::string name, + unsigned int max_sessions); ResultCode UnregisterService(const std::string& name); - ResultVal<Kernel::SharedPtr<Kernel::ClientPort>> GetServicePort(const std::string& name); - ResultVal<Kernel::SharedPtr<Kernel::ClientSession>> ConnectToService(const std::string& name); + ResultVal<std::shared_ptr<Kernel::ClientPort>> GetServicePort(const std::string& name); + ResultVal<std::shared_ptr<Kernel::ClientSession>> ConnectToService(const std::string& name); template <typename T> std::shared_ptr<T> GetService(const std::string& service_name) const { @@ -77,7 +79,7 @@ private: std::unique_ptr<Controller> controller_interface; /// Map of registered services, retrieved using GetServicePort or ConnectToService. - std::unordered_map<std::string, Kernel::SharedPtr<Kernel::ClientPort>> registered_services; + std::unordered_map<std::string, std::shared_ptr<Kernel::ClientPort>> registered_services; }; } // namespace Service::SM diff --git a/src/core/hle/service/sockets/nsd.cpp b/src/core/hle/service/sockets/nsd.cpp index e6d73065e..dc70fd6fe 100644 --- a/src/core/hle/service/sockets/nsd.cpp +++ b/src/core/hle/service/sockets/nsd.cpp @@ -7,6 +7,7 @@ namespace Service::Sockets { NSD::NSD(const char* name) : ServiceFramework(name) { + // clang-format off static const FunctionInfo functions[] = { {10, nullptr, "GetSettingName"}, {11, nullptr, "GetEnvironmentIdentifier"}, @@ -22,10 +23,14 @@ NSD::NSD(const char* name) : ServiceFramework(name) { {42, nullptr, "GetNasApiFqdn"}, {43, nullptr, "GetNasApiFqdnEx"}, {50, nullptr, "GetCurrentSetting"}, + {51, nullptr, "WriteTestParameter"}, + {52, nullptr, "ReadTestParameter"}, {60, nullptr, "ReadSaveDataFromFsForTest"}, {61, nullptr, "WriteSaveDataToFsForTest"}, {62, nullptr, "DeleteSaveDataOfFsForTest"}, }; + // clang-format on + RegisterHandlers(functions); } diff --git a/src/core/hle/service/ssl/ssl.cpp b/src/core/hle/service/ssl/ssl.cpp index 65040c077..1ba8c19a0 100644 --- a/src/core/hle/service/ssl/ssl.cpp +++ b/src/core/hle/service/ssl/ssl.cpp @@ -13,6 +13,7 @@ namespace Service::SSL { class ISslConnection final : public ServiceFramework<ISslConnection> { public: ISslConnection() : ServiceFramework("ISslConnection") { + // clang-format off static const FunctionInfo functions[] = { {0, nullptr, "SetSocketDescriptor"}, {1, nullptr, "SetHostName"}, @@ -40,7 +41,11 @@ public: {23, nullptr, "GetOption"}, {24, nullptr, "GetVerifyCertErrors"}, {25, nullptr, "GetCipherInfo"}, + {26, nullptr, "SetNextAlpnProto"}, + {27, nullptr, "GetNextAlpnProto"}, }; + // clang-format on + RegisterHandlers(functions); } }; diff --git a/src/core/hle/service/time/interface.cpp b/src/core/hle/service/time/interface.cpp index 9565e7de5..bc74f1e1d 100644 --- a/src/core/hle/service/time/interface.cpp +++ b/src/core/hle/service/time/interface.cpp @@ -21,6 +21,7 @@ Time::Time(std::shared_ptr<Module> time, std::shared_ptr<SharedMemory> shared_me {30, nullptr, "GetStandardNetworkClockOperationEventReadableHandle"}, {31, nullptr, "GetEphemeralNetworkClockOperationEventReadableHandle"}, {50, nullptr, "SetStandardSteadyClockInternalOffset"}, + {51, nullptr, "GetStandardSteadyClockRtcValue"}, {100, &Time::IsStandardUserSystemClockAutomaticCorrectionEnabled, "IsStandardUserSystemClockAutomaticCorrectionEnabled"}, {101, &Time::SetStandardUserSystemClockAutomaticCorrectionEnabled, "SetStandardUserSystemClockAutomaticCorrectionEnabled"}, {102, nullptr, "GetStandardUserSystemClockInitialYear"}, diff --git a/src/core/hle/service/time/time.cpp b/src/core/hle/service/time/time.cpp index 1b9ab8401..6ee77c5f9 100644 --- a/src/core/hle/service/time/time.cpp +++ b/src/core/hle/service/time/time.cpp @@ -34,12 +34,12 @@ static void PosixToCalendar(u64 posix_time, CalendarTime& calendar_time, additional_info = {}; return; } - calendar_time.year = tm->tm_year + 1900; - calendar_time.month = tm->tm_mon + 1; - calendar_time.day = tm->tm_mday; - calendar_time.hour = tm->tm_hour; - calendar_time.minute = tm->tm_min; - calendar_time.second = tm->tm_sec; + calendar_time.year = static_cast<u16_le>(tm->tm_year + 1900); + calendar_time.month = static_cast<u8>(tm->tm_mon + 1); + calendar_time.day = static_cast<u8>(tm->tm_mday); + calendar_time.hour = static_cast<u8>(tm->tm_hour); + calendar_time.minute = static_cast<u8>(tm->tm_min); + calendar_time.second = static_cast<u8>(tm->tm_sec); additional_info.day_of_week = tm->tm_wday; additional_info.day_of_year = tm->tm_yday; @@ -74,15 +74,17 @@ public: ISystemClock(std::shared_ptr<Service::Time::SharedMemory> shared_memory, ClockContextType clock_type) : ServiceFramework("ISystemClock"), shared_memory(shared_memory), clock_type(clock_type) { + // clang-format off static const FunctionInfo functions[] = { {0, &ISystemClock::GetCurrentTime, "GetCurrentTime"}, {1, nullptr, "SetCurrentTime"}, {2, &ISystemClock::GetSystemClockContext, "GetSystemClockContext"}, {3, nullptr, "SetSystemClockContext"}, - + {4, nullptr, "GetOperationEventReadableHandle"}, }; - RegisterHandlers(functions); + // clang-format on + RegisterHandlers(functions); UpdateSharedMemoryContext(system_clock_context); } @@ -162,6 +164,7 @@ private: class ITimeZoneService final : public ServiceFramework<ITimeZoneService> { public: ITimeZoneService() : ServiceFramework("ITimeZoneService") { + // clang-format off static const FunctionInfo functions[] = { {0, &ITimeZoneService::GetDeviceLocationName, "GetDeviceLocationName"}, {1, nullptr, "SetDeviceLocationName"}, @@ -169,11 +172,17 @@ public: {3, nullptr, "LoadLocationNameList"}, {4, &ITimeZoneService::LoadTimeZoneRule, "LoadTimeZoneRule"}, {5, nullptr, "GetTimeZoneRuleVersion"}, + {6, nullptr, "GetDeviceLocationNameAndUpdatedTime"}, + {7, nullptr, "SetDeviceLocationNameWithTimeZoneRule"}, + {8, nullptr, "ParseTimeZoneBinary"}, + {20, nullptr, "GetDeviceLocationNameOperationEventReadableHandle"}, {100, &ITimeZoneService::ToCalendarTime, "ToCalendarTime"}, {101, &ITimeZoneService::ToCalendarTimeWithMyRule, "ToCalendarTimeWithMyRule"}, {201, &ITimeZoneService::ToPosixTime, "ToPosixTime"}, {202, &ITimeZoneService::ToPosixTimeWithMyRule, "ToPosixTimeWithMyRule"}, }; + // clang-format on + RegisterHandlers(functions); } @@ -322,7 +331,7 @@ void Module::Interface::GetClockSnapshot(Kernel::HLERequestContext& ctx) { if (tm == nullptr) { LOG_ERROR(Service_Time, "tm is a nullptr"); IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ResultCode(-1)); // TODO(ogniK): Find appropriate error code + rb.Push(RESULT_UNKNOWN); // TODO(ogniK): Find appropriate error code return; } @@ -331,12 +340,12 @@ void Module::Interface::GetClockSnapshot(Kernel::HLERequestContext& ctx) { const SteadyClockTimePoint steady_clock_time_point{static_cast<u64_le>(ms.count() / 1000), {}}; CalendarTime calendar_time{}; - calendar_time.year = tm->tm_year + 1900; - calendar_time.month = tm->tm_mon + 1; - calendar_time.day = tm->tm_mday; - calendar_time.hour = tm->tm_hour; - calendar_time.minute = tm->tm_min; - calendar_time.second = tm->tm_sec; + calendar_time.year = static_cast<u16_le>(tm->tm_year + 1900); + calendar_time.month = static_cast<u8>(tm->tm_mon + 1); + calendar_time.day = static_cast<u8>(tm->tm_mday); + calendar_time.hour = static_cast<u8>(tm->tm_hour); + calendar_time.minute = static_cast<u8>(tm->tm_min); + calendar_time.second = static_cast<u8>(tm->tm_sec); ClockSnapshot clock_snapshot{}; clock_snapshot.system_posix_time = time_since_epoch; diff --git a/src/core/hle/service/time/time_sharedmemory.cpp b/src/core/hle/service/time/time_sharedmemory.cpp index bfc81b83c..4035f5072 100644 --- a/src/core/hle/service/time/time_sharedmemory.cpp +++ b/src/core/hle/service/time/time_sharedmemory.cpp @@ -21,7 +21,7 @@ SharedMemory::SharedMemory(Core::System& system) : system(system) { SharedMemory::~SharedMemory() = default; -Kernel::SharedPtr<Kernel::SharedMemory> SharedMemory::GetSharedMemoryHolder() const { +std::shared_ptr<Kernel::SharedMemory> SharedMemory::GetSharedMemoryHolder() const { return shared_memory_holder; } diff --git a/src/core/hle/service/time/time_sharedmemory.h b/src/core/hle/service/time/time_sharedmemory.h index cb8253541..904a96430 100644 --- a/src/core/hle/service/time/time_sharedmemory.h +++ b/src/core/hle/service/time/time_sharedmemory.h @@ -15,7 +15,7 @@ public: ~SharedMemory(); // Return the shared memory handle - Kernel::SharedPtr<Kernel::SharedMemory> GetSharedMemoryHolder() const; + std::shared_ptr<Kernel::SharedMemory> GetSharedMemoryHolder() const; // Set memory barriers in shared memory and update them void SetStandardSteadyClockTimepoint(const SteadyClockTimePoint& timepoint); @@ -66,7 +66,7 @@ public: static_assert(sizeof(Format) == 0xd8, "Format is an invalid size"); private: - Kernel::SharedPtr<Kernel::SharedMemory> shared_memory_holder{}; + std::shared_ptr<Kernel::SharedMemory> shared_memory_holder{}; Core::System& system; Format shared_memory_format{}; }; diff --git a/src/core/hle/service/vi/display/vi_display.cpp b/src/core/hle/service/vi/display/vi_display.cpp index 07033fb98..cd18c1610 100644 --- a/src/core/hle/service/vi/display/vi_display.cpp +++ b/src/core/hle/service/vi/display/vi_display.cpp @@ -31,7 +31,7 @@ const Layer& Display::GetLayer(std::size_t index) const { return layers.at(index); } -Kernel::SharedPtr<Kernel::ReadableEvent> Display::GetVSyncEvent() const { +std::shared_ptr<Kernel::ReadableEvent> Display::GetVSyncEvent() const { return vsync_event.readable; } diff --git a/src/core/hle/service/vi/display/vi_display.h b/src/core/hle/service/vi/display/vi_display.h index f56b5badc..8bb966a85 100644 --- a/src/core/hle/service/vi/display/vi_display.h +++ b/src/core/hle/service/vi/display/vi_display.h @@ -57,7 +57,7 @@ public: const Layer& GetLayer(std::size_t index) const; /// Gets the readable vsync event. - Kernel::SharedPtr<Kernel::ReadableEvent> GetVSyncEvent() const; + std::shared_ptr<Kernel::ReadableEvent> GetVSyncEvent() const; /// Signals the internal vsync event. void SignalVSyncEvent(); diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp index 611cecc20..651c89dc0 100644 --- a/src/core/hle/service/vi/vi.cpp +++ b/src/core/hle/service/vi/vi.cpp @@ -541,8 +541,8 @@ private: } else { // Wait the current thread until a buffer becomes available ctx.SleepClientThread( - "IHOSBinderDriver::DequeueBuffer", -1, - [=](Kernel::SharedPtr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx, + "IHOSBinderDriver::DequeueBuffer", UINT64_MAX, + [=](std::shared_ptr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx, Kernel::ThreadWakeupReason reason) { // Repeat TransactParcel DequeueBuffer when a buffer is available auto& buffer_queue = nv_flinger->FindBufferQueue(id); @@ -731,6 +731,7 @@ class IManagerDisplayService final : public ServiceFramework<IManagerDisplayServ public: explicit IManagerDisplayService(std::shared_ptr<NVFlinger::NVFlinger> nv_flinger) : ServiceFramework("IManagerDisplayService"), nv_flinger(std::move(nv_flinger)) { + // clang-format off static const FunctionInfo functions[] = { {200, nullptr, "AllocateProcessHeapBlock"}, {201, nullptr, "FreeProcessHeapBlock"}, @@ -766,8 +767,11 @@ public: {6008, nullptr, "StartLayerPresentationFenceWait"}, {6009, nullptr, "StopLayerPresentationFenceWait"}, {6010, nullptr, "GetLayerPresentationAllFencesExpiredEvent"}, + {6011, nullptr, "EnableLayerAutoClearTransitionBuffer"}, + {6012, nullptr, "DisableLayerAutoClearTransitionBuffer"}, {7000, nullptr, "SetContentVisibility"}, {8000, nullptr, "SetConductorLayer"}, + {8001, nullptr, "SetTimestampTracking"}, {8100, nullptr, "SetIndirectProducerFlipOffset"}, {8200, nullptr, "CreateSharedBufferStaticStorage"}, {8201, nullptr, "CreateSharedBufferTransferMemory"}, @@ -800,6 +804,8 @@ public: {8297, nullptr, "GetSharedFrameBufferContentParameter"}, {8298, nullptr, "ExpandStartupLogoOnSharedFrameBuffer"}, }; + // clang-format on + RegisterHandlers(functions); } diff --git a/src/core/memory.cpp b/src/core/memory.cpp index fa49f3dd0..91bf07a92 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -17,529 +17,699 @@ #include "core/hle/kernel/process.h" #include "core/hle/kernel/vm_manager.h" #include "core/memory.h" -#include "core/memory_setup.h" #include "video_core/gpu.h" namespace Memory { -static Common::PageTable* current_page_table = nullptr; +// Implementation class used to keep the specifics of the memory subsystem hidden +// from outside classes. This also allows modification to the internals of the memory +// subsystem without needing to rebuild all files that make use of the memory interface. +struct Memory::Impl { + explicit Impl(Core::System& system_) : system{system_} {} -void SetCurrentPageTable(Kernel::Process& process) { - current_page_table = &process.VMManager().page_table; + void SetCurrentPageTable(Kernel::Process& process) { + current_page_table = &process.VMManager().page_table; - const std::size_t address_space_width = process.VMManager().GetAddressSpaceWidth(); + const std::size_t address_space_width = process.VMManager().GetAddressSpaceWidth(); - auto& system = Core::System::GetInstance(); - system.ArmInterface(0).PageTableChanged(*current_page_table, address_space_width); - system.ArmInterface(1).PageTableChanged(*current_page_table, address_space_width); - system.ArmInterface(2).PageTableChanged(*current_page_table, address_space_width); - system.ArmInterface(3).PageTableChanged(*current_page_table, address_space_width); -} + system.ArmInterface(0).PageTableChanged(*current_page_table, address_space_width); + system.ArmInterface(1).PageTableChanged(*current_page_table, address_space_width); + system.ArmInterface(2).PageTableChanged(*current_page_table, address_space_width); + system.ArmInterface(3).PageTableChanged(*current_page_table, address_space_width); + } -static void MapPages(Common::PageTable& page_table, VAddr base, u64 size, u8* memory, - Common::PageType type) { - LOG_DEBUG(HW_Memory, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * PAGE_SIZE, - (base + size) * PAGE_SIZE); - - // During boot, current_page_table might not be set yet, in which case we need not flush - if (Core::System::GetInstance().IsPoweredOn()) { - auto& gpu = Core::System::GetInstance().GPU(); - for (u64 i = 0; i < size; i++) { - const auto page = base + i; - if (page_table.attributes[page] == Common::PageType::RasterizerCachedMemory) { - gpu.FlushAndInvalidateRegion(page << PAGE_BITS, PAGE_SIZE); - } - } + void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) { + ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); + ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); + MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory); } - VAddr end = base + size; - ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", - base + page_table.pointers.size()); + void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size, + Common::MemoryHookPointer mmio_handler) { + ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); + ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); + MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, + Common::PageType::Special); + + const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); + const Common::SpecialRegion region{Common::SpecialRegion::Type::IODevice, + std::move(mmio_handler)}; + page_table.special_regions.add( + std::make_pair(interval, std::set<Common::SpecialRegion>{region})); + } - std::fill(page_table.attributes.begin() + base, page_table.attributes.begin() + end, type); + void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { + ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); + ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); + MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, + Common::PageType::Unmapped); - if (memory == nullptr) { - std::fill(page_table.pointers.begin() + base, page_table.pointers.begin() + end, memory); - } else { - while (base != end) { - page_table.pointers[base] = memory; + const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); + page_table.special_regions.erase(interval); + } - base += 1; - memory += PAGE_SIZE; - } + void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size, + Common::MemoryHookPointer hook) { + const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); + const Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)}; + page_table.special_regions.add( + std::make_pair(interval, std::set<Common::SpecialRegion>{region})); } -} -void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) { - ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); - ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); - MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory); -} + void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size, + Common::MemoryHookPointer hook) { + const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); + const Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)}; + page_table.special_regions.subtract( + std::make_pair(interval, std::set<Common::SpecialRegion>{region})); + } -void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size, - Common::MemoryHookPointer mmio_handler) { - ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); - ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); - MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, Common::PageType::Special); + bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) const { + const auto& page_table = process.VMManager().page_table; - auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); - Common::SpecialRegion region{Common::SpecialRegion::Type::IODevice, std::move(mmio_handler)}; - page_table.special_regions.add( - std::make_pair(interval, std::set<Common::SpecialRegion>{region})); -} + const u8* const page_pointer = page_table.pointers[vaddr >> PAGE_BITS]; + if (page_pointer != nullptr) { + return true; + } -void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { - ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); - ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); - MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, Common::PageType::Unmapped); + if (page_table.attributes[vaddr >> PAGE_BITS] == Common::PageType::RasterizerCachedMemory) { + return true; + } - auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); - page_table.special_regions.erase(interval); -} + if (page_table.attributes[vaddr >> PAGE_BITS] != Common::PageType::Special) { + return false; + } -void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size, - Common::MemoryHookPointer hook) { - auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); - Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)}; - page_table.special_regions.add( - std::make_pair(interval, std::set<Common::SpecialRegion>{region})); -} + return false; + } -void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size, - Common::MemoryHookPointer hook) { - auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); - Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)}; - page_table.special_regions.subtract( - std::make_pair(interval, std::set<Common::SpecialRegion>{region})); -} + bool IsValidVirtualAddress(VAddr vaddr) const { + return IsValidVirtualAddress(*system.CurrentProcess(), vaddr); + } -/** - * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned) - * using a VMA from the current process - */ -static u8* GetPointerFromVMA(const Kernel::Process& process, VAddr vaddr) { - const auto& vm_manager = process.VMManager(); - - const auto it = vm_manager.FindVMA(vaddr); - DEBUG_ASSERT(vm_manager.IsValidHandle(it)); - - u8* direct_pointer = nullptr; - const auto& vma = it->second; - switch (vma.type) { - case Kernel::VMAType::AllocatedMemoryBlock: - direct_pointer = vma.backing_block->data() + vma.offset; - break; - case Kernel::VMAType::BackingMemory: - direct_pointer = vma.backing_memory; - break; - case Kernel::VMAType::Free: - return nullptr; - default: - UNREACHABLE(); + /** + * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned) + * using a VMA from the current process + */ + u8* GetPointerFromVMA(const Kernel::Process& process, VAddr vaddr) { + const auto& vm_manager = process.VMManager(); + + const auto it = vm_manager.FindVMA(vaddr); + DEBUG_ASSERT(vm_manager.IsValidHandle(it)); + + u8* direct_pointer = nullptr; + const auto& vma = it->second; + switch (vma.type) { + case Kernel::VMAType::AllocatedMemoryBlock: + direct_pointer = vma.backing_block->data() + vma.offset; + break; + case Kernel::VMAType::BackingMemory: + direct_pointer = vma.backing_memory; + break; + case Kernel::VMAType::Free: + return nullptr; + default: + UNREACHABLE(); + } + + return direct_pointer + (vaddr - vma.base); } - return direct_pointer + (vaddr - vma.base); -} + /** + * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned) + * using a VMA from the current process. + */ + u8* GetPointerFromVMA(VAddr vaddr) { + return GetPointerFromVMA(*system.CurrentProcess(), vaddr); + } -/** - * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned) - * using a VMA from the current process. - */ -static u8* GetPointerFromVMA(VAddr vaddr) { - return GetPointerFromVMA(*Core::System::GetInstance().CurrentProcess(), vaddr); -} + u8* GetPointer(const VAddr vaddr) { + u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; + if (page_pointer != nullptr) { + return page_pointer + (vaddr & PAGE_MASK); + } -template <typename T> -T Read(const VAddr vaddr) { - const u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; - if (page_pointer) { - // NOTE: Avoid adding any extra logic to this fast-path block - T value; - std::memcpy(&value, &page_pointer[vaddr & PAGE_MASK], sizeof(T)); - return value; - } - - Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; - switch (type) { - case Common::PageType::Unmapped: - LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr); - return 0; - case Common::PageType::Memory: - ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); - break; - case Common::PageType::RasterizerCachedMemory: { - auto host_ptr{GetPointerFromVMA(vaddr)}; - Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), sizeof(T)); - T value; - std::memcpy(&value, host_ptr, sizeof(T)); - return value; - } - default: - UNREACHABLE(); - } - return {}; -} + if (current_page_table->attributes[vaddr >> PAGE_BITS] == + Common::PageType::RasterizerCachedMemory) { + return GetPointerFromVMA(vaddr); + } -template <typename T> -void Write(const VAddr vaddr, const T data) { - u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; - if (page_pointer) { - // NOTE: Avoid adding any extra logic to this fast-path block - std::memcpy(&page_pointer[vaddr & PAGE_MASK], &data, sizeof(T)); - return; - } - - Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; - switch (type) { - case Common::PageType::Unmapped: - LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8, - static_cast<u32>(data), vaddr); - return; - case Common::PageType::Memory: - ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); - break; - case Common::PageType::RasterizerCachedMemory: { - auto host_ptr{GetPointerFromVMA(vaddr)}; - Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), sizeof(T)); - std::memcpy(host_ptr, &data, sizeof(T)); - break; - } - default: - UNREACHABLE(); + LOG_ERROR(HW_Memory, "Unknown GetPointer @ 0x{:016X}", vaddr); + return nullptr; } -} -bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) { - const auto& page_table = process.VMManager().page_table; + u8 Read8(const VAddr addr) { + return Read<u8>(addr); + } - const u8* page_pointer = page_table.pointers[vaddr >> PAGE_BITS]; - if (page_pointer) - return true; + u16 Read16(const VAddr addr) { + return Read<u16_le>(addr); + } - if (page_table.attributes[vaddr >> PAGE_BITS] == Common::PageType::RasterizerCachedMemory) - return true; + u32 Read32(const VAddr addr) { + return Read<u32_le>(addr); + } - if (page_table.attributes[vaddr >> PAGE_BITS] != Common::PageType::Special) - return false; + u64 Read64(const VAddr addr) { + return Read<u64_le>(addr); + } - return false; -} + void Write8(const VAddr addr, const u8 data) { + Write<u8>(addr, data); + } -bool IsValidVirtualAddress(const VAddr vaddr) { - return IsValidVirtualAddress(*Core::System::GetInstance().CurrentProcess(), vaddr); -} + void Write16(const VAddr addr, const u16 data) { + Write<u16_le>(addr, data); + } -bool IsKernelVirtualAddress(const VAddr vaddr) { - return KERNEL_REGION_VADDR <= vaddr && vaddr < KERNEL_REGION_END; -} + void Write32(const VAddr addr, const u32 data) { + Write<u32_le>(addr, data); + } -u8* GetPointer(const VAddr vaddr) { - u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; - if (page_pointer) { - return page_pointer + (vaddr & PAGE_MASK); + void Write64(const VAddr addr, const u64 data) { + Write<u64_le>(addr, data); } - if (current_page_table->attributes[vaddr >> PAGE_BITS] == - Common::PageType::RasterizerCachedMemory) { - return GetPointerFromVMA(vaddr); + std::string ReadCString(VAddr vaddr, std::size_t max_length) { + std::string string; + string.reserve(max_length); + for (std::size_t i = 0; i < max_length; ++i) { + const char c = Read8(vaddr); + if (c == '\0') { + break; + } + string.push_back(c); + ++vaddr; + } + string.shrink_to_fit(); + return string; } - LOG_ERROR(HW_Memory, "Unknown GetPointer @ 0x{:016X}", vaddr); - return nullptr; -} + void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer, + const std::size_t size) { + const auto& page_table = process.VMManager().page_table; + + std::size_t remaining_size = size; + std::size_t page_index = src_addr >> PAGE_BITS; + std::size_t page_offset = src_addr & PAGE_MASK; + + while (remaining_size > 0) { + const std::size_t copy_amount = + std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); + const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); + + switch (page_table.attributes[page_index]) { + case Common::PageType::Unmapped: { + LOG_ERROR(HW_Memory, + "Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", + current_vaddr, src_addr, size); + std::memset(dest_buffer, 0, copy_amount); + break; + } + case Common::PageType::Memory: { + DEBUG_ASSERT(page_table.pointers[page_index]); -std::string ReadCString(VAddr vaddr, std::size_t max_length) { - std::string string; - string.reserve(max_length); - for (std::size_t i = 0; i < max_length; ++i) { - char c = Read8(vaddr); - if (c == '\0') - break; - string.push_back(c); - ++vaddr; + const u8* const src_ptr = page_table.pointers[page_index] + page_offset; + std::memcpy(dest_buffer, src_ptr, copy_amount); + break; + } + case Common::PageType::RasterizerCachedMemory: { + const u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); + system.GPU().FlushRegion(ToCacheAddr(host_ptr), copy_amount); + std::memcpy(dest_buffer, host_ptr, copy_amount); + break; + } + default: + UNREACHABLE(); + } + + page_index++; + page_offset = 0; + dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount; + remaining_size -= copy_amount; + } } - string.shrink_to_fit(); - return string; -} -void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { - if (vaddr == 0) { - return; + void ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) { + ReadBlock(*system.CurrentProcess(), src_addr, dest_buffer, size); } - // Iterate over a contiguous CPU address space, which corresponds to the specified GPU address - // space, marking the region as un/cached. The region is marked un/cached at a granularity of - // CPU pages, hence why we iterate on a CPU page basis (note: GPU page size is different). This - // assumes the specified GPU address region is contiguous as well. + void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const void* src_buffer, + const std::size_t size) { + const auto& page_table = process.VMManager().page_table; + std::size_t remaining_size = size; + std::size_t page_index = dest_addr >> PAGE_BITS; + std::size_t page_offset = dest_addr & PAGE_MASK; + + while (remaining_size > 0) { + const std::size_t copy_amount = + std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); + const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); + + switch (page_table.attributes[page_index]) { + case Common::PageType::Unmapped: { + LOG_ERROR(HW_Memory, + "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", + current_vaddr, dest_addr, size); + break; + } + case Common::PageType::Memory: { + DEBUG_ASSERT(page_table.pointers[page_index]); + + u8* const dest_ptr = page_table.pointers[page_index] + page_offset; + std::memcpy(dest_ptr, src_buffer, copy_amount); + break; + } + case Common::PageType::RasterizerCachedMemory: { + u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); + system.GPU().InvalidateRegion(ToCacheAddr(host_ptr), copy_amount); + std::memcpy(host_ptr, src_buffer, copy_amount); + break; + } + default: + UNREACHABLE(); + } + + page_index++; + page_offset = 0; + src_buffer = static_cast<const u8*>(src_buffer) + copy_amount; + remaining_size -= copy_amount; + } + } - u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1; - for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { - Common::PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS]; + void WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { + WriteBlock(*system.CurrentProcess(), dest_addr, src_buffer, size); + } - if (cached) { - // Switch page type to cached if now cached - switch (page_type) { - case Common::PageType::Unmapped: - // It is not necessary for a process to have this region mapped into its address - // space, for example, a system module need not have a VRAM mapping. + void ZeroBlock(const Kernel::Process& process, const VAddr dest_addr, const std::size_t size) { + const auto& page_table = process.VMManager().page_table; + std::size_t remaining_size = size; + std::size_t page_index = dest_addr >> PAGE_BITS; + std::size_t page_offset = dest_addr & PAGE_MASK; + + while (remaining_size > 0) { + const std::size_t copy_amount = + std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); + const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); + + switch (page_table.attributes[page_index]) { + case Common::PageType::Unmapped: { + LOG_ERROR(HW_Memory, + "Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", + current_vaddr, dest_addr, size); break; - case Common::PageType::Memory: - page_type = Common::PageType::RasterizerCachedMemory; - current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr; + } + case Common::PageType::Memory: { + DEBUG_ASSERT(page_table.pointers[page_index]); + + u8* dest_ptr = page_table.pointers[page_index] + page_offset; + std::memset(dest_ptr, 0, copy_amount); break; - case Common::PageType::RasterizerCachedMemory: - // There can be more than one GPU region mapped per CPU region, so it's common that - // this area is already marked as cached. + } + case Common::PageType::RasterizerCachedMemory: { + u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); + system.GPU().InvalidateRegion(ToCacheAddr(host_ptr), copy_amount); + std::memset(host_ptr, 0, copy_amount); break; + } default: UNREACHABLE(); } - } else { - // Switch page type to uncached if now uncached - switch (page_type) { - case Common::PageType::Unmapped: - // It is not necessary for a process to have this region mapped into its address - // space, for example, a system module need not have a VRAM mapping. + + page_index++; + page_offset = 0; + remaining_size -= copy_amount; + } + } + + void ZeroBlock(const VAddr dest_addr, const std::size_t size) { + ZeroBlock(*system.CurrentProcess(), dest_addr, size); + } + + void CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr, + const std::size_t size) { + const auto& page_table = process.VMManager().page_table; + std::size_t remaining_size = size; + std::size_t page_index = src_addr >> PAGE_BITS; + std::size_t page_offset = src_addr & PAGE_MASK; + + while (remaining_size > 0) { + const std::size_t copy_amount = + std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); + const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); + + switch (page_table.attributes[page_index]) { + case Common::PageType::Unmapped: { + LOG_ERROR(HW_Memory, + "Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", + current_vaddr, src_addr, size); + ZeroBlock(process, dest_addr, copy_amount); break; - case Common::PageType::Memory: - // There can be more than one GPU region mapped per CPU region, so it's common that - // this area is already unmarked as cached. + } + case Common::PageType::Memory: { + DEBUG_ASSERT(page_table.pointers[page_index]); + const u8* src_ptr = page_table.pointers[page_index] + page_offset; + WriteBlock(process, dest_addr, src_ptr, copy_amount); break; + } case Common::PageType::RasterizerCachedMemory: { - u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK); - if (pointer == nullptr) { - // It's possible that this function has been called while updating the pagetable - // after unmapping a VMA. In that case the underlying VMA will no longer exist, - // and we should just leave the pagetable entry blank. - page_type = Common::PageType::Unmapped; - } else { - page_type = Common::PageType::Memory; - current_page_table->pointers[vaddr >> PAGE_BITS] = pointer; - } + const u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); + system.GPU().FlushRegion(ToCacheAddr(host_ptr), copy_amount); + WriteBlock(process, dest_addr, host_ptr, copy_amount); break; } default: UNREACHABLE(); } + + page_index++; + page_offset = 0; + dest_addr += static_cast<VAddr>(copy_amount); + src_addr += static_cast<VAddr>(copy_amount); + remaining_size -= copy_amount; } } -} -u8 Read8(const VAddr addr) { - return Read<u8>(addr); -} + void CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size) { + return CopyBlock(*system.CurrentProcess(), dest_addr, src_addr, size); + } -u16 Read16(const VAddr addr) { - return Read<u16_le>(addr); -} + void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { + if (vaddr == 0) { + return; + } -u32 Read32(const VAddr addr) { - return Read<u32_le>(addr); -} + // Iterate over a contiguous CPU address space, which corresponds to the specified GPU + // address space, marking the region as un/cached. The region is marked un/cached at a + // granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size + // is different). This assumes the specified GPU address region is contiguous as well. + + u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1; + for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { + Common::PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS]; + + if (cached) { + // Switch page type to cached if now cached + switch (page_type) { + case Common::PageType::Unmapped: + // It is not necessary for a process to have this region mapped into its address + // space, for example, a system module need not have a VRAM mapping. + break; + case Common::PageType::Memory: + page_type = Common::PageType::RasterizerCachedMemory; + current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr; + break; + case Common::PageType::RasterizerCachedMemory: + // There can be more than one GPU region mapped per CPU region, so it's common + // that this area is already marked as cached. + break; + default: + UNREACHABLE(); + } + } else { + // Switch page type to uncached if now uncached + switch (page_type) { + case Common::PageType::Unmapped: + // It is not necessary for a process to have this region mapped into its address + // space, for example, a system module need not have a VRAM mapping. + break; + case Common::PageType::Memory: + // There can be more than one GPU region mapped per CPU region, so it's common + // that this area is already unmarked as cached. + break; + case Common::PageType::RasterizerCachedMemory: { + u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK); + if (pointer == nullptr) { + // It's possible that this function has been called while updating the + // pagetable after unmapping a VMA. In that case the underlying VMA will no + // longer exist, and we should just leave the pagetable entry blank. + page_type = Common::PageType::Unmapped; + } else { + page_type = Common::PageType::Memory; + current_page_table->pointers[vaddr >> PAGE_BITS] = pointer; + } + break; + } + default: + UNREACHABLE(); + } + } + } + } -u64 Read64(const VAddr addr) { - return Read<u64_le>(addr); -} + /** + * Maps a region of pages as a specific type. + * + * @param page_table The page table to use to perform the mapping. + * @param base The base address to begin mapping at. + * @param size The total size of the range in bytes. + * @param memory The memory to map. + * @param type The page type to map the memory as. + */ + void MapPages(Common::PageTable& page_table, VAddr base, u64 size, u8* memory, + Common::PageType type) { + LOG_DEBUG(HW_Memory, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * PAGE_SIZE, + (base + size) * PAGE_SIZE); + + // During boot, current_page_table might not be set yet, in which case we need not flush + if (system.IsPoweredOn()) { + auto& gpu = system.GPU(); + for (u64 i = 0; i < size; i++) { + const auto page = base + i; + if (page_table.attributes[page] == Common::PageType::RasterizerCachedMemory) { + gpu.FlushAndInvalidateRegion(page << PAGE_BITS, PAGE_SIZE); + } + } + } -void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer, - const std::size_t size) { - const auto& page_table = process.VMManager().page_table; - - std::size_t remaining_size = size; - std::size_t page_index = src_addr >> PAGE_BITS; - std::size_t page_offset = src_addr & PAGE_MASK; - - while (remaining_size > 0) { - const std::size_t copy_amount = - std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); - const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); - - switch (page_table.attributes[page_index]) { - case Common::PageType::Unmapped: { - LOG_ERROR(HW_Memory, - "Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", - current_vaddr, src_addr, size); - std::memset(dest_buffer, 0, copy_amount); - break; + const VAddr end = base + size; + ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", + base + page_table.pointers.size()); + + std::fill(page_table.attributes.begin() + base, page_table.attributes.begin() + end, type); + + if (memory == nullptr) { + std::fill(page_table.pointers.begin() + base, page_table.pointers.begin() + end, + memory); + } else { + while (base != end) { + page_table.pointers[base] = memory; + + base += 1; + memory += PAGE_SIZE; + } + } + } + + /** + * Reads a particular data type out of memory at the given virtual address. + * + * @param vaddr The virtual address to read the data type from. + * + * @tparam T The data type to read out of memory. This type *must* be + * trivially copyable, otherwise the behavior of this function + * is undefined. + * + * @returns The instance of T read from the specified virtual address. + */ + template <typename T> + T Read(const VAddr vaddr) { + const u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; + if (page_pointer != nullptr) { + // NOTE: Avoid adding any extra logic to this fast-path block + T value; + std::memcpy(&value, &page_pointer[vaddr & PAGE_MASK], sizeof(T)); + return value; } - case Common::PageType::Memory: { - DEBUG_ASSERT(page_table.pointers[page_index]); - const u8* src_ptr = page_table.pointers[page_index] + page_offset; - std::memcpy(dest_buffer, src_ptr, copy_amount); + const Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; + switch (type) { + case Common::PageType::Unmapped: + LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr); + return 0; + case Common::PageType::Memory: + ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); break; + case Common::PageType::RasterizerCachedMemory: { + const u8* const host_ptr = GetPointerFromVMA(vaddr); + system.GPU().FlushRegion(ToCacheAddr(host_ptr), sizeof(T)); + T value; + std::memcpy(&value, host_ptr, sizeof(T)); + return value; + } + default: + UNREACHABLE(); } + return {}; + } + + /** + * Writes a particular data type to memory at the given virtual address. + * + * @param vaddr The virtual address to write the data type to. + * + * @tparam T The data type to write to memory. This type *must* be + * trivially copyable, otherwise the behavior of this function + * is undefined. + * + * @returns The instance of T write to the specified virtual address. + */ + template <typename T> + void Write(const VAddr vaddr, const T data) { + u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; + if (page_pointer != nullptr) { + // NOTE: Avoid adding any extra logic to this fast-path block + std::memcpy(&page_pointer[vaddr & PAGE_MASK], &data, sizeof(T)); + return; + } + + const Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; + switch (type) { + case Common::PageType::Unmapped: + LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8, + static_cast<u32>(data), vaddr); + return; + case Common::PageType::Memory: + ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); + break; case Common::PageType::RasterizerCachedMemory: { - const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)}; - Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), copy_amount); - std::memcpy(dest_buffer, host_ptr, copy_amount); + u8* const host_ptr{GetPointerFromVMA(vaddr)}; + system.GPU().InvalidateRegion(ToCacheAddr(host_ptr), sizeof(T)); + std::memcpy(host_ptr, &data, sizeof(T)); break; } default: UNREACHABLE(); } - - page_index++; - page_offset = 0; - dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount; - remaining_size -= copy_amount; } + + Common::PageTable* current_page_table = nullptr; + Core::System& system; +}; + +Memory::Memory(Core::System& system) : impl{std::make_unique<Impl>(system)} {} +Memory::~Memory() = default; + +void Memory::SetCurrentPageTable(Kernel::Process& process) { + impl->SetCurrentPageTable(process); } -void ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) { - ReadBlock(*Core::System::GetInstance().CurrentProcess(), src_addr, dest_buffer, size); +void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) { + impl->MapMemoryRegion(page_table, base, size, target); } -void Write8(const VAddr addr, const u8 data) { - Write<u8>(addr, data); +void Memory::MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size, + Common::MemoryHookPointer mmio_handler) { + impl->MapIoRegion(page_table, base, size, std::move(mmio_handler)); } -void Write16(const VAddr addr, const u16 data) { - Write<u16_le>(addr, data); +void Memory::UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { + impl->UnmapRegion(page_table, base, size); } -void Write32(const VAddr addr, const u32 data) { - Write<u32_le>(addr, data); +void Memory::AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size, + Common::MemoryHookPointer hook) { + impl->AddDebugHook(page_table, base, size, std::move(hook)); } -void Write64(const VAddr addr, const u64 data) { - Write<u64_le>(addr, data); +void Memory::RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size, + Common::MemoryHookPointer hook) { + impl->RemoveDebugHook(page_table, base, size, std::move(hook)); } -void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const void* src_buffer, - const std::size_t size) { - const auto& page_table = process.VMManager().page_table; - std::size_t remaining_size = size; - std::size_t page_index = dest_addr >> PAGE_BITS; - std::size_t page_offset = dest_addr & PAGE_MASK; - - while (remaining_size > 0) { - const std::size_t copy_amount = - std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); - const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); - - switch (page_table.attributes[page_index]) { - case Common::PageType::Unmapped: { - LOG_ERROR(HW_Memory, - "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", - current_vaddr, dest_addr, size); - break; - } - case Common::PageType::Memory: { - DEBUG_ASSERT(page_table.pointers[page_index]); +bool Memory::IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) const { + return impl->IsValidVirtualAddress(process, vaddr); +} - u8* dest_ptr = page_table.pointers[page_index] + page_offset; - std::memcpy(dest_ptr, src_buffer, copy_amount); - break; - } - case Common::PageType::RasterizerCachedMemory: { - const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)}; - Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), copy_amount); - std::memcpy(host_ptr, src_buffer, copy_amount); - break; - } - default: - UNREACHABLE(); - } +bool Memory::IsValidVirtualAddress(const VAddr vaddr) const { + return impl->IsValidVirtualAddress(vaddr); +} - page_index++; - page_offset = 0; - src_buffer = static_cast<const u8*>(src_buffer) + copy_amount; - remaining_size -= copy_amount; - } +u8* Memory::GetPointer(VAddr vaddr) { + return impl->GetPointer(vaddr); } -void WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { - WriteBlock(*Core::System::GetInstance().CurrentProcess(), dest_addr, src_buffer, size); +const u8* Memory::GetPointer(VAddr vaddr) const { + return impl->GetPointer(vaddr); } -void ZeroBlock(const Kernel::Process& process, const VAddr dest_addr, const std::size_t size) { - const auto& page_table = process.VMManager().page_table; - std::size_t remaining_size = size; - std::size_t page_index = dest_addr >> PAGE_BITS; - std::size_t page_offset = dest_addr & PAGE_MASK; - - while (remaining_size > 0) { - const std::size_t copy_amount = - std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); - const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); - - switch (page_table.attributes[page_index]) { - case Common::PageType::Unmapped: { - LOG_ERROR(HW_Memory, - "Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", - current_vaddr, dest_addr, size); - break; - } - case Common::PageType::Memory: { - DEBUG_ASSERT(page_table.pointers[page_index]); +u8 Memory::Read8(const VAddr addr) { + return impl->Read8(addr); +} - u8* dest_ptr = page_table.pointers[page_index] + page_offset; - std::memset(dest_ptr, 0, copy_amount); - break; - } - case Common::PageType::RasterizerCachedMemory: { - const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)}; - Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), copy_amount); - std::memset(host_ptr, 0, copy_amount); - break; - } - default: - UNREACHABLE(); - } +u16 Memory::Read16(const VAddr addr) { + return impl->Read16(addr); +} - page_index++; - page_offset = 0; - remaining_size -= copy_amount; - } +u32 Memory::Read32(const VAddr addr) { + return impl->Read32(addr); } -void CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr, - const std::size_t size) { - const auto& page_table = process.VMManager().page_table; - std::size_t remaining_size = size; - std::size_t page_index = src_addr >> PAGE_BITS; - std::size_t page_offset = src_addr & PAGE_MASK; - - while (remaining_size > 0) { - const std::size_t copy_amount = - std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); - const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); - - switch (page_table.attributes[page_index]) { - case Common::PageType::Unmapped: { - LOG_ERROR(HW_Memory, - "Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", - current_vaddr, src_addr, size); - ZeroBlock(process, dest_addr, copy_amount); - break; - } - case Common::PageType::Memory: { - DEBUG_ASSERT(page_table.pointers[page_index]); - const u8* src_ptr = page_table.pointers[page_index] + page_offset; - WriteBlock(process, dest_addr, src_ptr, copy_amount); - break; - } - case Common::PageType::RasterizerCachedMemory: { - const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)}; - Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), copy_amount); - WriteBlock(process, dest_addr, host_ptr, copy_amount); - break; - } - default: - UNREACHABLE(); - } +u64 Memory::Read64(const VAddr addr) { + return impl->Read64(addr); +} - page_index++; - page_offset = 0; - dest_addr += static_cast<VAddr>(copy_amount); - src_addr += static_cast<VAddr>(copy_amount); - remaining_size -= copy_amount; - } +void Memory::Write8(VAddr addr, u8 data) { + impl->Write8(addr, data); +} + +void Memory::Write16(VAddr addr, u16 data) { + impl->Write16(addr, data); +} + +void Memory::Write32(VAddr addr, u32 data) { + impl->Write32(addr, data); +} + +void Memory::Write64(VAddr addr, u64 data) { + impl->Write64(addr, data); +} + +std::string Memory::ReadCString(VAddr vaddr, std::size_t max_length) { + return impl->ReadCString(vaddr, max_length); +} + +void Memory::ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer, + const std::size_t size) { + impl->ReadBlock(process, src_addr, dest_buffer, size); +} + +void Memory::ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) { + impl->ReadBlock(src_addr, dest_buffer, size); +} + +void Memory::WriteBlock(const Kernel::Process& process, VAddr dest_addr, const void* src_buffer, + std::size_t size) { + impl->WriteBlock(process, dest_addr, src_buffer, size); +} + +void Memory::WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { + impl->WriteBlock(dest_addr, src_buffer, size); } -void CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size) { - CopyBlock(*Core::System::GetInstance().CurrentProcess(), dest_addr, src_addr, size); +void Memory::ZeroBlock(const Kernel::Process& process, VAddr dest_addr, std::size_t size) { + impl->ZeroBlock(process, dest_addr, size); +} + +void Memory::ZeroBlock(VAddr dest_addr, std::size_t size) { + impl->ZeroBlock(dest_addr, size); +} + +void Memory::CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr, + const std::size_t size) { + impl->CopyBlock(process, dest_addr, src_addr, size); +} + +void Memory::CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size) { + impl->CopyBlock(dest_addr, src_addr, size); +} + +void Memory::RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { + impl->RasterizerMarkRegionCached(vaddr, size, cached); +} + +bool IsKernelVirtualAddress(const VAddr vaddr) { + return KERNEL_REGION_VADDR <= vaddr && vaddr < KERNEL_REGION_END; } } // namespace Memory diff --git a/src/core/memory.h b/src/core/memory.h index 09008e1dd..1428a6d60 100644 --- a/src/core/memory.h +++ b/src/core/memory.h @@ -5,8 +5,18 @@ #pragma once #include <cstddef> +#include <memory> #include <string> #include "common/common_types.h" +#include "common/memory_hook.h" + +namespace Common { +struct PageTable; +} + +namespace Core { +class System; +} namespace Kernel { class Process; @@ -36,41 +46,369 @@ enum : VAddr { KERNEL_REGION_END = KERNEL_REGION_VADDR + KERNEL_REGION_SIZE, }; -/// Changes the currently active page table to that of -/// the given process instance. -void SetCurrentPageTable(Kernel::Process& process); +/// Central class that handles all memory operations and state. +class Memory { +public: + explicit Memory(Core::System& system); + ~Memory(); -/// Determines if the given VAddr is valid for the specified process. -bool IsValidVirtualAddress(const Kernel::Process& process, VAddr vaddr); -bool IsValidVirtualAddress(VAddr vaddr); -/// Determines if the given VAddr is a kernel address -bool IsKernelVirtualAddress(VAddr vaddr); + Memory(const Memory&) = delete; + Memory& operator=(const Memory&) = delete; -u8 Read8(VAddr addr); -u16 Read16(VAddr addr); -u32 Read32(VAddr addr); -u64 Read64(VAddr addr); + Memory(Memory&&) = default; + Memory& operator=(Memory&&) = default; -void Write8(VAddr addr, u8 data); -void Write16(VAddr addr, u16 data); -void Write32(VAddr addr, u32 data); -void Write64(VAddr addr, u64 data); + /** + * Changes the currently active page table to that of the given process instance. + * + * @param process The process to use the page table of. + */ + void SetCurrentPageTable(Kernel::Process& process); -void ReadBlock(const Kernel::Process& process, VAddr src_addr, void* dest_buffer, std::size_t size); -void ReadBlock(VAddr src_addr, void* dest_buffer, std::size_t size); -void WriteBlock(const Kernel::Process& process, VAddr dest_addr, const void* src_buffer, - std::size_t size); -void WriteBlock(VAddr dest_addr, const void* src_buffer, std::size_t size); -void ZeroBlock(const Kernel::Process& process, VAddr dest_addr, std::size_t size); -void CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size); + /** + * Maps an allocated buffer onto a region of the emulated process address space. + * + * @param page_table The page table of the emulated process. + * @param base The address to start mapping at. Must be page-aligned. + * @param size The amount of bytes to map. Must be page-aligned. + * @param target Buffer with the memory backing the mapping. Must be of length at least + * `size`. + */ + void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target); -u8* GetPointer(VAddr vaddr); + /** + * Maps a region of the emulated process address space as a IO region. + * + * @param page_table The page table of the emulated process. + * @param base The address to start mapping at. Must be page-aligned. + * @param size The amount of bytes to map. Must be page-aligned. + * @param mmio_handler The handler that backs the mapping. + */ + void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size, + Common::MemoryHookPointer mmio_handler); -std::string ReadCString(VAddr vaddr, std::size_t max_length); + /** + * Unmaps a region of the emulated process address space. + * + * @param page_table The page table of the emulated process. + * @param base The address to begin unmapping at. + * @param size The amount of bytes to unmap. + */ + void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size); -/** - * Mark each page touching the region as cached. - */ -void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached); + /** + * Adds a memory hook to intercept reads and writes to given region of memory. + * + * @param page_table The page table of the emulated process + * @param base The starting address to apply the hook to. + * @param size The size of the memory region to apply the hook to, in bytes. + * @param hook The hook to apply to the region of memory. + */ + void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size, + Common::MemoryHookPointer hook); + + /** + * Removes a memory hook from a given range of memory. + * + * @param page_table The page table of the emulated process. + * @param base The starting address to remove the hook from. + * @param size The size of the memory region to remove the hook from, in bytes. + * @param hook The hook to remove from the specified region of memory. + */ + void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size, + Common::MemoryHookPointer hook); + + /** + * Checks whether or not the supplied address is a valid virtual + * address for the given process. + * + * @param process The emulated process to check the address against. + * @param vaddr The virtual address to check the validity of. + * + * @returns True if the given virtual address is valid, false otherwise. + */ + bool IsValidVirtualAddress(const Kernel::Process& process, VAddr vaddr) const; + + /** + * Checks whether or not the supplied address is a valid virtual + * address for the current process. + * + * @param vaddr The virtual address to check the validity of. + * + * @returns True if the given virtual address is valid, false otherwise. + */ + bool IsValidVirtualAddress(VAddr vaddr) const; + + /** + * Gets a pointer to the given address. + * + * @param vaddr Virtual address to retrieve a pointer to. + * + * @returns The pointer to the given address, if the address is valid. + * If the address is not valid, nullptr will be returned. + */ + u8* GetPointer(VAddr vaddr); + + /** + * Gets a pointer to the given address. + * + * @param vaddr Virtual address to retrieve a pointer to. + * + * @returns The pointer to the given address, if the address is valid. + * If the address is not valid, nullptr will be returned. + */ + const u8* GetPointer(VAddr vaddr) const; + + /** + * Reads an 8-bit unsigned value from the current process' address space + * at the given virtual address. + * + * @param addr The virtual address to read the 8-bit value from. + * + * @returns the read 8-bit unsigned value. + */ + u8 Read8(VAddr addr); + + /** + * Reads a 16-bit unsigned value from the current process' address space + * at the given virtual address. + * + * @param addr The virtual address to read the 16-bit value from. + * + * @returns the read 16-bit unsigned value. + */ + u16 Read16(VAddr addr); + + /** + * Reads a 32-bit unsigned value from the current process' address space + * at the given virtual address. + * + * @param addr The virtual address to read the 32-bit value from. + * + * @returns the read 32-bit unsigned value. + */ + u32 Read32(VAddr addr); + + /** + * Reads a 64-bit unsigned value from the current process' address space + * at the given virtual address. + * + * @param addr The virtual address to read the 64-bit value from. + * + * @returns the read 64-bit value. + */ + u64 Read64(VAddr addr); + + /** + * Writes an 8-bit unsigned integer to the given virtual address in + * the current process' address space. + * + * @param addr The virtual address to write the 8-bit unsigned integer to. + * @param data The 8-bit unsigned integer to write to the given virtual address. + * + * @post The memory at the given virtual address contains the specified data value. + */ + void Write8(VAddr addr, u8 data); + + /** + * Writes a 16-bit unsigned integer to the given virtual address in + * the current process' address space. + * + * @param addr The virtual address to write the 16-bit unsigned integer to. + * @param data The 16-bit unsigned integer to write to the given virtual address. + * + * @post The memory range [addr, sizeof(data)) contains the given data value. + */ + void Write16(VAddr addr, u16 data); + + /** + * Writes a 32-bit unsigned integer to the given virtual address in + * the current process' address space. + * + * @param addr The virtual address to write the 32-bit unsigned integer to. + * @param data The 32-bit unsigned integer to write to the given virtual address. + * + * @post The memory range [addr, sizeof(data)) contains the given data value. + */ + void Write32(VAddr addr, u32 data); + + /** + * Writes a 64-bit unsigned integer to the given virtual address in + * the current process' address space. + * + * @param addr The virtual address to write the 64-bit unsigned integer to. + * @param data The 64-bit unsigned integer to write to the given virtual address. + * + * @post The memory range [addr, sizeof(data)) contains the given data value. + */ + void Write64(VAddr addr, u64 data); + + /** + * Reads a null-terminated string from the given virtual address. + * This function will continually read characters until either: + * + * - A null character ('\0') is reached. + * - max_length characters have been read. + * + * @note The final null-terminating character (if found) is not included + * in the returned string. + * + * @param vaddr The address to begin reading the string from. + * @param max_length The maximum length of the string to read in characters. + * + * @returns The read string. + */ + std::string ReadCString(VAddr vaddr, std::size_t max_length); + + /** + * Reads a contiguous block of bytes from a specified process' address space. + * + * @param process The process to read the data from. + * @param src_addr The virtual address to begin reading from. + * @param dest_buffer The buffer to place the read bytes into. + * @param size The amount of data to read, in bytes. + * + * @note If a size of 0 is specified, then this function reads nothing and + * no attempts to access memory are made at all. + * + * @pre dest_buffer must be at least size bytes in length, otherwise a + * buffer overrun will occur. + * + * @post The range [dest_buffer, size) contains the read bytes from the + * process' address space. + */ + void ReadBlock(const Kernel::Process& process, VAddr src_addr, void* dest_buffer, + std::size_t size); + + /** + * Reads a contiguous block of bytes from the current process' address space. + * + * @param src_addr The virtual address to begin reading from. + * @param dest_buffer The buffer to place the read bytes into. + * @param size The amount of data to read, in bytes. + * + * @note If a size of 0 is specified, then this function reads nothing and + * no attempts to access memory are made at all. + * + * @pre dest_buffer must be at least size bytes in length, otherwise a + * buffer overrun will occur. + * + * @post The range [dest_buffer, size) contains the read bytes from the + * current process' address space. + */ + void ReadBlock(VAddr src_addr, void* dest_buffer, std::size_t size); + + /** + * Writes a range of bytes into a given process' address space at the specified + * virtual address. + * + * @param process The process to write data into the address space of. + * @param dest_addr The destination virtual address to begin writing the data at. + * @param src_buffer The data to write into the process' address space. + * @param size The size of the data to write, in bytes. + * + * @post The address range [dest_addr, size) in the process' address space + * contains the data that was within src_buffer. + * + * @post If an attempt is made to write into an unmapped region of memory, the writes + * will be ignored and an error will be logged. + * + * @post If a write is performed into a region of memory that is considered cached + * rasterizer memory, will cause the currently active rasterizer to be notified + * and will mark that region as invalidated to caches that the active + * graphics backend may be maintaining over the course of execution. + */ + void WriteBlock(const Kernel::Process& process, VAddr dest_addr, const void* src_buffer, + std::size_t size); + + /** + * Writes a range of bytes into the current process' address space at the specified + * virtual address. + * + * @param dest_addr The destination virtual address to begin writing the data at. + * @param src_buffer The data to write into the current process' address space. + * @param size The size of the data to write, in bytes. + * + * @post The address range [dest_addr, size) in the current process' address space + * contains the data that was within src_buffer. + * + * @post If an attempt is made to write into an unmapped region of memory, the writes + * will be ignored and an error will be logged. + * + * @post If a write is performed into a region of memory that is considered cached + * rasterizer memory, will cause the currently active rasterizer to be notified + * and will mark that region as invalidated to caches that the active + * graphics backend may be maintaining over the course of execution. + */ + void WriteBlock(VAddr dest_addr, const void* src_buffer, std::size_t size); + + /** + * Fills the specified address range within a process' address space with zeroes. + * + * @param process The process that will have a portion of its memory zeroed out. + * @param dest_addr The starting virtual address of the range to zero out. + * @param size The size of the address range to zero out, in bytes. + * + * @post The range [dest_addr, size) within the process' address space is + * filled with zeroes. + */ + void ZeroBlock(const Kernel::Process& process, VAddr dest_addr, std::size_t size); + + /** + * Fills the specified address range within the current process' address space with zeroes. + * + * @param dest_addr The starting virtual address of the range to zero out. + * @param size The size of the address range to zero out, in bytes. + * + * @post The range [dest_addr, size) within the current process' address space is + * filled with zeroes. + */ + void ZeroBlock(VAddr dest_addr, std::size_t size); + + /** + * Copies data within a process' address space to another location within the + * same address space. + * + * @param process The process that will have data copied within its address space. + * @param dest_addr The destination virtual address to begin copying the data into. + * @param src_addr The source virtual address to begin copying the data from. + * @param size The size of the data to copy, in bytes. + * + * @post The range [dest_addr, size) within the process' address space contains the + * same data within the range [src_addr, size). + */ + void CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr, + std::size_t size); + + /** + * Copies data within the current process' address space to another location within the + * same address space. + * + * @param dest_addr The destination virtual address to begin copying the data into. + * @param src_addr The source virtual address to begin copying the data from. + * @param size The size of the data to copy, in bytes. + * + * @post The range [dest_addr, size) within the current process' address space + * contains the same data within the range [src_addr, size). + */ + void CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size); + + /** + * Marks each page within the specified address range as cached or uncached. + * + * @param vaddr The virtual address indicating the start of the address range. + * @param size The size of the address range in bytes. + * @param cached Whether or not any pages within the address range should be + * marked as cached or uncached. + */ + void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached); + +private: + struct Impl; + std::unique_ptr<Impl> impl; +}; + +/// Determines if the given VAddr is a kernel address +bool IsKernelVirtualAddress(VAddr vaddr); } // namespace Memory diff --git a/src/core/memory/cheat_engine.cpp b/src/core/memory/cheat_engine.cpp index 10821d452..d1e6bed93 100644 --- a/src/core/memory/cheat_engine.cpp +++ b/src/core/memory/cheat_engine.cpp @@ -20,18 +20,17 @@ namespace Memory { constexpr s64 CHEAT_ENGINE_TICKS = static_cast<s64>(Core::Timing::BASE_CLOCK_RATE / 12); constexpr u32 KEYPAD_BITMASK = 0x3FFFFFF; -StandardVmCallbacks::StandardVmCallbacks(const Core::System& system, - const CheatProcessMetadata& metadata) +StandardVmCallbacks::StandardVmCallbacks(Core::System& system, const CheatProcessMetadata& metadata) : metadata(metadata), system(system) {} StandardVmCallbacks::~StandardVmCallbacks() = default; void StandardVmCallbacks::MemoryRead(VAddr address, void* data, u64 size) { - ReadBlock(SanitizeAddress(address), data, size); + system.Memory().ReadBlock(SanitizeAddress(address), data, size); } void StandardVmCallbacks::MemoryWrite(VAddr address, const void* data, u64 size) { - WriteBlock(SanitizeAddress(address), data, size); + system.Memory().WriteBlock(SanitizeAddress(address), data, size); } u64 StandardVmCallbacks::HidKeysDown() { @@ -186,7 +185,7 @@ CheatEngine::~CheatEngine() { } void CheatEngine::Initialize() { - event = core_timing.RegisterEvent( + event = Core::Timing::CreateEvent( "CheatEngine::FrameCallback::" + Common::HexToString(metadata.main_nso_build_id), [this](u64 userdata, s64 cycles_late) { FrameCallback(userdata, cycles_late); }); core_timing.ScheduleEvent(CHEAT_ENGINE_TICKS, event); diff --git a/src/core/memory/cheat_engine.h b/src/core/memory/cheat_engine.h index 0f012e9b5..3d6b2298a 100644 --- a/src/core/memory/cheat_engine.h +++ b/src/core/memory/cheat_engine.h @@ -5,6 +5,7 @@ #pragma once #include <atomic> +#include <memory> #include <vector> #include "common/common_types.h" #include "core/memory/dmnt_cheat_types.h" @@ -23,7 +24,7 @@ namespace Memory { class StandardVmCallbacks : public DmntCheatVm::Callbacks { public: - StandardVmCallbacks(const Core::System& system, const CheatProcessMetadata& metadata); + StandardVmCallbacks(Core::System& system, const CheatProcessMetadata& metadata); ~StandardVmCallbacks() override; void MemoryRead(VAddr address, void* data, u64 size) override; @@ -36,7 +37,7 @@ private: VAddr SanitizeAddress(VAddr address) const; const CheatProcessMetadata& metadata; - const Core::System& system; + Core::System& system; }; // Intermediary class that parses a text file or other disk format for storing cheats into a @@ -78,7 +79,7 @@ private: std::vector<CheatEntry> cheats; std::atomic_bool is_pending_reload{false}; - Core::Timing::EventType* event{}; + std::shared_ptr<Core::Timing::EventType> event; Core::Timing::CoreTiming& core_timing; Core::System& system; }; diff --git a/src/core/memory_setup.h b/src/core/memory_setup.h deleted file mode 100644 index 5225ee8e2..000000000 --- a/src/core/memory_setup.h +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2015 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#pragma once - -#include "common/common_types.h" -#include "common/memory_hook.h" - -namespace Common { -struct PageTable; -} - -namespace Memory { - -/** - * Maps an allocated buffer onto a region of the emulated process address space. - * - * @param page_table The page table of the emulated process. - * @param base The address to start mapping at. Must be page-aligned. - * @param size The amount of bytes to map. Must be page-aligned. - * @param target Buffer with the memory backing the mapping. Must be of length at least `size`. - */ -void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target); - -/** - * Maps a region of the emulated process address space as a IO region. - * @param page_table The page table of the emulated process. - * @param base The address to start mapping at. Must be page-aligned. - * @param size The amount of bytes to map. Must be page-aligned. - * @param mmio_handler The handler that backs the mapping. - */ -void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size, - Common::MemoryHookPointer mmio_handler); - -void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size); - -void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size, - Common::MemoryHookPointer hook); -void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size, - Common::MemoryHookPointer hook); - -} // namespace Memory diff --git a/src/core/perf_stats.cpp b/src/core/perf_stats.cpp index d2c69d1a0..f1ae9d4df 100644 --- a/src/core/perf_stats.cpp +++ b/src/core/perf_stats.cpp @@ -81,7 +81,7 @@ double PerfStats::GetMeanFrametime() { return 0; } const double sum = std::accumulate(perf_history.begin() + IgnoreFrames, - perf_history.begin() + current_index, 0); + perf_history.begin() + current_index, 0.0); return sum / (current_index - IgnoreFrames); } diff --git a/src/core/reporter.cpp b/src/core/reporter.cpp index 6f4af77fd..f95eee3b1 100644 --- a/src/core/reporter.cpp +++ b/src/core/reporter.cpp @@ -147,7 +147,7 @@ json GetFullDataAuto(const std::string& timestamp, u64 title_id, Core::System& s } template <bool read_value, typename DescriptorType> -json GetHLEBufferDescriptorData(const std::vector<DescriptorType>& buffer) { +json GetHLEBufferDescriptorData(const std::vector<DescriptorType>& buffer, Memory::Memory& memory) { auto buffer_out = json::array(); for (const auto& desc : buffer) { auto entry = json{ @@ -157,7 +157,7 @@ json GetHLEBufferDescriptorData(const std::vector<DescriptorType>& buffer) { if constexpr (read_value) { std::vector<u8> data(desc.Size()); - Memory::ReadBlock(desc.Address(), data.data(), desc.Size()); + memory.ReadBlock(desc.Address(), data.data(), desc.Size()); entry["data"] = Common::HexToString(data); } @@ -167,7 +167,7 @@ json GetHLEBufferDescriptorData(const std::vector<DescriptorType>& buffer) { return buffer_out; } -json GetHLERequestContextData(Kernel::HLERequestContext& ctx) { +json GetHLERequestContextData(Kernel::HLERequestContext& ctx, Memory::Memory& memory) { json out; auto cmd_buf = json::array(); @@ -177,10 +177,10 @@ json GetHLERequestContextData(Kernel::HLERequestContext& ctx) { out["command_buffer"] = std::move(cmd_buf); - out["buffer_descriptor_a"] = GetHLEBufferDescriptorData<true>(ctx.BufferDescriptorA()); - out["buffer_descriptor_b"] = GetHLEBufferDescriptorData<false>(ctx.BufferDescriptorB()); - out["buffer_descriptor_c"] = GetHLEBufferDescriptorData<false>(ctx.BufferDescriptorC()); - out["buffer_descriptor_x"] = GetHLEBufferDescriptorData<true>(ctx.BufferDescriptorX()); + out["buffer_descriptor_a"] = GetHLEBufferDescriptorData<true>(ctx.BufferDescriptorA(), memory); + out["buffer_descriptor_b"] = GetHLEBufferDescriptorData<false>(ctx.BufferDescriptorB(), memory); + out["buffer_descriptor_c"] = GetHLEBufferDescriptorData<false>(ctx.BufferDescriptorC(), memory); + out["buffer_descriptor_x"] = GetHLEBufferDescriptorData<true>(ctx.BufferDescriptorX(), memory); return out; } @@ -259,7 +259,7 @@ void Reporter::SaveUnimplementedFunctionReport(Kernel::HLERequestContext& ctx, u const auto title_id = system.CurrentProcess()->GetTitleID(); auto out = GetFullDataAuto(timestamp, title_id, system); - auto function_out = GetHLERequestContextData(ctx); + auto function_out = GetHLERequestContextData(ctx, system.Memory()); function_out["command_id"] = command_id; function_out["function_name"] = name; function_out["service_name"] = service_name; diff --git a/src/core/telemetry_session.cpp b/src/core/telemetry_session.cpp index 793d102d3..320e8ad73 100644 --- a/src/core/telemetry_session.cpp +++ b/src/core/telemetry_session.cpp @@ -165,24 +165,20 @@ void TelemetrySession::AddInitialInfo(Loader::AppLoader& app_loader) { Telemetry::AppendOSInfo(field_collection); // Log user configuration information - AddField(Telemetry::FieldType::UserConfig, "Audio_SinkId", Settings::values.sink_id); - AddField(Telemetry::FieldType::UserConfig, "Audio_EnableAudioStretching", - Settings::values.enable_audio_stretching); - AddField(Telemetry::FieldType::UserConfig, "Core_UseMultiCore", - Settings::values.use_multi_core); - AddField(Telemetry::FieldType::UserConfig, "Renderer_ResolutionFactor", - Settings::values.resolution_factor); - AddField(Telemetry::FieldType::UserConfig, "Renderer_UseFrameLimit", - Settings::values.use_frame_limit); - AddField(Telemetry::FieldType::UserConfig, "Renderer_FrameLimit", Settings::values.frame_limit); - AddField(Telemetry::FieldType::UserConfig, "Renderer_UseDiskShaderCache", - Settings::values.use_disk_shader_cache); - AddField(Telemetry::FieldType::UserConfig, "Renderer_UseAccurateGpuEmulation", + constexpr auto field_type = Telemetry::FieldType::UserConfig; + AddField(field_type, "Audio_SinkId", Settings::values.sink_id); + AddField(field_type, "Audio_EnableAudioStretching", Settings::values.enable_audio_stretching); + AddField(field_type, "Core_UseMultiCore", Settings::values.use_multi_core); + AddField(field_type, "Renderer_Backend", "OpenGL"); + AddField(field_type, "Renderer_ResolutionFactor", Settings::values.resolution_factor); + AddField(field_type, "Renderer_UseFrameLimit", Settings::values.use_frame_limit); + AddField(field_type, "Renderer_FrameLimit", Settings::values.frame_limit); + AddField(field_type, "Renderer_UseDiskShaderCache", Settings::values.use_disk_shader_cache); + AddField(field_type, "Renderer_UseAccurateGpuEmulation", Settings::values.use_accurate_gpu_emulation); - AddField(Telemetry::FieldType::UserConfig, "Renderer_UseAsynchronousGpuEmulation", + AddField(field_type, "Renderer_UseAsynchronousGpuEmulation", Settings::values.use_asynchronous_gpu_emulation); - AddField(Telemetry::FieldType::UserConfig, "System_UseDockedMode", - Settings::values.use_docked_mode); + AddField(field_type, "System_UseDockedMode", Settings::values.use_docked_mode); } bool TelemetrySession::SubmitTestcase() { diff --git a/src/core/tools/freezer.cpp b/src/core/tools/freezer.cpp index 17f050068..55e0dbc49 100644 --- a/src/core/tools/freezer.cpp +++ b/src/core/tools/freezer.cpp @@ -11,40 +11,39 @@ #include "core/tools/freezer.h" namespace Tools { - namespace { constexpr s64 MEMORY_FREEZER_TICKS = static_cast<s64>(Core::Timing::BASE_CLOCK_RATE / 60); -u64 MemoryReadWidth(u32 width, VAddr addr) { +u64 MemoryReadWidth(Memory::Memory& memory, u32 width, VAddr addr) { switch (width) { case 1: - return Memory::Read8(addr); + return memory.Read8(addr); case 2: - return Memory::Read16(addr); + return memory.Read16(addr); case 4: - return Memory::Read32(addr); + return memory.Read32(addr); case 8: - return Memory::Read64(addr); + return memory.Read64(addr); default: UNREACHABLE(); return 0; } } -void MemoryWriteWidth(u32 width, VAddr addr, u64 value) { +void MemoryWriteWidth(Memory::Memory& memory, u32 width, VAddr addr, u64 value) { switch (width) { case 1: - Memory::Write8(addr, static_cast<u8>(value)); + memory.Write8(addr, static_cast<u8>(value)); break; case 2: - Memory::Write16(addr, static_cast<u16>(value)); + memory.Write16(addr, static_cast<u16>(value)); break; case 4: - Memory::Write32(addr, static_cast<u32>(value)); + memory.Write32(addr, static_cast<u32>(value)); break; case 8: - Memory::Write64(addr, value); + memory.Write64(addr, value); break; default: UNREACHABLE(); @@ -53,8 +52,9 @@ void MemoryWriteWidth(u32 width, VAddr addr, u64 value) { } // Anonymous namespace -Freezer::Freezer(Core::Timing::CoreTiming& core_timing) : core_timing(core_timing) { - event = core_timing.RegisterEvent( +Freezer::Freezer(Core::Timing::CoreTiming& core_timing_, Memory::Memory& memory_) + : core_timing{core_timing_}, memory{memory_} { + event = Core::Timing::CreateEvent( "MemoryFreezer::FrameCallback", [this](u64 userdata, s64 cycles_late) { FrameCallback(userdata, cycles_late); }); core_timing.ScheduleEvent(MEMORY_FREEZER_TICKS, event); @@ -89,7 +89,7 @@ void Freezer::Clear() { u64 Freezer::Freeze(VAddr address, u32 width) { std::lock_guard lock{entries_mutex}; - const auto current_value = MemoryReadWidth(width, address); + const auto current_value = MemoryReadWidth(memory, width, address); entries.push_back({address, width, current_value}); LOG_DEBUG(Common_Memory, @@ -169,7 +169,7 @@ void Freezer::FrameCallback(u64 userdata, s64 cycles_late) { LOG_DEBUG(Common_Memory, "Enforcing memory freeze at address={:016X}, value={:016X}, width={:02X}", entry.address, entry.value, entry.width); - MemoryWriteWidth(entry.width, entry.address, entry.value); + MemoryWriteWidth(memory, entry.width, entry.address, entry.value); } core_timing.ScheduleEvent(MEMORY_FREEZER_TICKS - cycles_late, event); @@ -181,7 +181,7 @@ void Freezer::FillEntryReads() { LOG_DEBUG(Common_Memory, "Updating memory freeze entries to current values."); for (auto& entry : entries) { - entry.value = MemoryReadWidth(entry.width, entry.address); + entry.value = MemoryReadWidth(memory, entry.width, entry.address); } } diff --git a/src/core/tools/freezer.h b/src/core/tools/freezer.h index b58de5472..916339c6c 100644 --- a/src/core/tools/freezer.h +++ b/src/core/tools/freezer.h @@ -5,6 +5,7 @@ #pragma once #include <atomic> +#include <memory> #include <mutex> #include <optional> #include <vector> @@ -15,6 +16,10 @@ class CoreTiming; struct EventType; } // namespace Core::Timing +namespace Memory { +class Memory; +} + namespace Tools { /** @@ -33,7 +38,7 @@ public: u64 value; }; - explicit Freezer(Core::Timing::CoreTiming& core_timing); + explicit Freezer(Core::Timing::CoreTiming& core_timing_, Memory::Memory& memory_); ~Freezer(); // Enables or disables the entire memory freezer. @@ -75,8 +80,9 @@ private: mutable std::mutex entries_mutex; std::vector<Entry> entries; - Core::Timing::EventType* event; + std::shared_ptr<Core::Timing::EventType> event; Core::Timing::CoreTiming& core_timing; + Memory::Memory& memory; }; } // namespace Tools diff --git a/src/tests/core/arm/arm_test_common.cpp b/src/tests/core/arm/arm_test_common.cpp index ac7ae3e52..17043346b 100644 --- a/src/tests/core/arm/arm_test_common.cpp +++ b/src/tests/core/arm/arm_test_common.cpp @@ -8,7 +8,6 @@ #include "core/core.h" #include "core/hle/kernel/process.h" #include "core/memory.h" -#include "core/memory_setup.h" #include "tests/core/arm/arm_test_common.h" namespace ArmTests { @@ -16,8 +15,9 @@ namespace ArmTests { TestEnvironment::TestEnvironment(bool mutable_memory_) : mutable_memory(mutable_memory_), test_memory(std::make_shared<TestMemory>(this)), kernel{Core::System::GetInstance()} { - auto process = Kernel::Process::Create(Core::System::GetInstance(), "", - Kernel::Process::ProcessType::Userland); + auto& system = Core::System::GetInstance(); + + auto process = Kernel::Process::Create(system, "", Kernel::Process::ProcessType::Userland); page_table = &process->VMManager().page_table; std::fill(page_table->pointers.begin(), page_table->pointers.end(), nullptr); @@ -25,15 +25,16 @@ TestEnvironment::TestEnvironment(bool mutable_memory_) std::fill(page_table->attributes.begin(), page_table->attributes.end(), Common::PageType::Unmapped); - Memory::MapIoRegion(*page_table, 0x00000000, 0x80000000, test_memory); - Memory::MapIoRegion(*page_table, 0x80000000, 0x80000000, test_memory); + system.Memory().MapIoRegion(*page_table, 0x00000000, 0x80000000, test_memory); + system.Memory().MapIoRegion(*page_table, 0x80000000, 0x80000000, test_memory); kernel.MakeCurrentProcess(process.get()); } TestEnvironment::~TestEnvironment() { - Memory::UnmapRegion(*page_table, 0x80000000, 0x80000000); - Memory::UnmapRegion(*page_table, 0x00000000, 0x80000000); + auto& system = Core::System::GetInstance(); + system.Memory().UnmapRegion(*page_table, 0x80000000, 0x80000000); + system.Memory().UnmapRegion(*page_table, 0x00000000, 0x80000000); } void TestEnvironment::SetMemory64(VAddr vaddr, u64 value) { diff --git a/src/tests/core/core_timing.cpp b/src/tests/core/core_timing.cpp index 3443bf05e..1e3940801 100644 --- a/src/tests/core/core_timing.cpp +++ b/src/tests/core/core_timing.cpp @@ -7,7 +7,9 @@ #include <array> #include <bitset> #include <cstdlib> +#include <memory> #include <string> + #include "common/file_util.h" #include "core/core.h" #include "core/core_timing.h" @@ -65,11 +67,16 @@ TEST_CASE("CoreTiming[BasicOrder]", "[core]") { ScopeInit guard; auto& core_timing = guard.core_timing; - Core::Timing::EventType* cb_a = core_timing.RegisterEvent("callbackA", CallbackTemplate<0>); - Core::Timing::EventType* cb_b = core_timing.RegisterEvent("callbackB", CallbackTemplate<1>); - Core::Timing::EventType* cb_c = core_timing.RegisterEvent("callbackC", CallbackTemplate<2>); - Core::Timing::EventType* cb_d = core_timing.RegisterEvent("callbackD", CallbackTemplate<3>); - Core::Timing::EventType* cb_e = core_timing.RegisterEvent("callbackE", CallbackTemplate<4>); + std::shared_ptr<Core::Timing::EventType> cb_a = + Core::Timing::CreateEvent("callbackA", CallbackTemplate<0>); + std::shared_ptr<Core::Timing::EventType> cb_b = + Core::Timing::CreateEvent("callbackB", CallbackTemplate<1>); + std::shared_ptr<Core::Timing::EventType> cb_c = + Core::Timing::CreateEvent("callbackC", CallbackTemplate<2>); + std::shared_ptr<Core::Timing::EventType> cb_d = + Core::Timing::CreateEvent("callbackD", CallbackTemplate<3>); + std::shared_ptr<Core::Timing::EventType> cb_e = + Core::Timing::CreateEvent("callbackE", CallbackTemplate<4>); // Enter slice 0 core_timing.ResetRun(); @@ -99,8 +106,8 @@ TEST_CASE("CoreTiming[FairSharing]", "[core]") { ScopeInit guard; auto& core_timing = guard.core_timing; - Core::Timing::EventType* empty_callback = - core_timing.RegisterEvent("empty_callback", EmptyCallback); + std::shared_ptr<Core::Timing::EventType> empty_callback = + Core::Timing::CreateEvent("empty_callback", EmptyCallback); callbacks_done = 0; u64 MAX_CALLBACKS = 10; @@ -133,8 +140,10 @@ TEST_CASE("Core::Timing[PredictableLateness]", "[core]") { ScopeInit guard; auto& core_timing = guard.core_timing; - Core::Timing::EventType* cb_a = core_timing.RegisterEvent("callbackA", CallbackTemplate<0>); - Core::Timing::EventType* cb_b = core_timing.RegisterEvent("callbackB", CallbackTemplate<1>); + std::shared_ptr<Core::Timing::EventType> cb_a = + Core::Timing::CreateEvent("callbackA", CallbackTemplate<0>); + std::shared_ptr<Core::Timing::EventType> cb_b = + Core::Timing::CreateEvent("callbackB", CallbackTemplate<1>); // Enter slice 0 core_timing.ResetRun(); @@ -145,60 +154,3 @@ TEST_CASE("Core::Timing[PredictableLateness]", "[core]") { AdvanceAndCheck(core_timing, 0, 0, 10, -10); // (100 - 10) AdvanceAndCheck(core_timing, 1, 1, 50, -50); } - -namespace ChainSchedulingTest { -static int reschedules = 0; - -static void RescheduleCallback(Core::Timing::CoreTiming& core_timing, u64 userdata, - s64 cycles_late) { - --reschedules; - REQUIRE(reschedules >= 0); - REQUIRE(lateness == cycles_late); - - if (reschedules > 0) { - core_timing.ScheduleEvent(1000, reinterpret_cast<Core::Timing::EventType*>(userdata), - userdata); - } -} -} // namespace ChainSchedulingTest - -TEST_CASE("CoreTiming[ChainScheduling]", "[core]") { - using namespace ChainSchedulingTest; - - ScopeInit guard; - auto& core_timing = guard.core_timing; - - Core::Timing::EventType* cb_a = core_timing.RegisterEvent("callbackA", CallbackTemplate<0>); - Core::Timing::EventType* cb_b = core_timing.RegisterEvent("callbackB", CallbackTemplate<1>); - Core::Timing::EventType* cb_c = core_timing.RegisterEvent("callbackC", CallbackTemplate<2>); - Core::Timing::EventType* cb_rs = core_timing.RegisterEvent( - "callbackReschedule", [&core_timing](u64 userdata, s64 cycles_late) { - RescheduleCallback(core_timing, userdata, cycles_late); - }); - - // Enter slice 0 - core_timing.ResetRun(); - - core_timing.ScheduleEvent(800, cb_a, CB_IDS[0]); - core_timing.ScheduleEvent(1000, cb_b, CB_IDS[1]); - core_timing.ScheduleEvent(2200, cb_c, CB_IDS[2]); - core_timing.ScheduleEvent(1000, cb_rs, reinterpret_cast<u64>(cb_rs)); - REQUIRE(800 == core_timing.GetDowncount()); - - reschedules = 3; - AdvanceAndCheck(core_timing, 0, 0); // cb_a - AdvanceAndCheck(core_timing, 1, 1); // cb_b, cb_rs - REQUIRE(2 == reschedules); - - core_timing.AddTicks(core_timing.GetDowncount()); - core_timing.Advance(); // cb_rs - core_timing.SwitchContext(3); - REQUIRE(1 == reschedules); - REQUIRE(200 == core_timing.GetDowncount()); - - AdvanceAndCheck(core_timing, 2, 3); // cb_c - - core_timing.AddTicks(core_timing.GetDowncount()); - core_timing.Advance(); // cb_rs - REQUIRE(0 == reschedules); -} diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index c911c6ec4..65d7b9f93 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt @@ -4,8 +4,6 @@ add_library(video_core STATIC buffer_cache/map_interval.h dma_pusher.cpp dma_pusher.h - debug_utils/debug_utils.cpp - debug_utils/debug_utils.h engines/const_buffer_engine_interface.h engines/const_buffer_info.h engines/engine_upload.cpp @@ -22,6 +20,7 @@ add_library(video_core STATIC engines/maxwell_dma.h engines/shader_bytecode.h engines/shader_header.h + engines/shader_type.h gpu.cpp gpu.h gpu_asynch.cpp @@ -127,6 +126,8 @@ add_library(video_core STATIC shader/track.cpp surface.cpp surface.h + texture_cache/format_lookup_table.cpp + texture_cache/format_lookup_table.h texture_cache/surface_base.cpp texture_cache/surface_base.h texture_cache/surface_params.cpp @@ -148,12 +149,16 @@ add_library(video_core STATIC if (ENABLE_VULKAN) target_sources(video_core PRIVATE renderer_vulkan/declarations.h + renderer_vulkan/fixed_pipeline_state.cpp + renderer_vulkan/fixed_pipeline_state.h renderer_vulkan/maxwell_to_vk.cpp renderer_vulkan/maxwell_to_vk.h renderer_vulkan/vk_buffer_cache.cpp renderer_vulkan/vk_buffer_cache.h renderer_vulkan/vk_device.cpp renderer_vulkan/vk_device.h + renderer_vulkan/vk_image.cpp + renderer_vulkan/vk_image.h renderer_vulkan/vk_memory_manager.cpp renderer_vulkan/vk_memory_manager.h renderer_vulkan/vk_resource_manager.cpp @@ -164,6 +169,8 @@ if (ENABLE_VULKAN) renderer_vulkan/vk_scheduler.h renderer_vulkan/vk_shader_decompiler.cpp renderer_vulkan/vk_shader_decompiler.h + renderer_vulkan/vk_staging_buffer_pool.cpp + renderer_vulkan/vk_staging_buffer_pool.h renderer_vulkan/vk_stream_buffer.cpp renderer_vulkan/vk_stream_buffer.h renderer_vulkan/vk_swapchain.cpp @@ -180,3 +187,9 @@ target_link_libraries(video_core PRIVATE glad) if (ENABLE_VULKAN) target_link_libraries(video_core PRIVATE sirit) endif() + +if (MSVC) + target_compile_options(video_core PRIVATE /we4267) +else() + target_compile_options(video_core PRIVATE -Werror=conversion -Wno-error=sign-conversion) +endif() diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index 63b3a8205..0510ed777 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -12,6 +12,10 @@ #include <utility> #include <vector> +#include <boost/icl/interval_map.hpp> +#include <boost/icl/interval_set.hpp> +#include <boost/range/iterator_range.hpp> + #include "common/alignment.h" #include "common/common_types.h" #include "core/core.h" @@ -423,8 +427,8 @@ private: VideoCore::RasterizerInterface& rasterizer; Core::System& system; - std::unique_ptr<StreamBuffer> stream_buffer; + std::unique_ptr<StreamBuffer> stream_buffer; TBufferType stream_buffer_handle{}; bool invalidated = false; @@ -436,18 +440,18 @@ private: using IntervalSet = boost::icl::interval_set<CacheAddr>; using IntervalCache = boost::icl::interval_map<CacheAddr, MapInterval>; using IntervalType = typename IntervalCache::interval_type; - IntervalCache mapped_addresses{}; + IntervalCache mapped_addresses; - static constexpr u64 write_page_bit{11}; - std::unordered_map<u64, u32> written_pages{}; + static constexpr u64 write_page_bit = 11; + std::unordered_map<u64, u32> written_pages; - static constexpr u64 block_page_bits{21}; - static constexpr u64 block_page_size{1 << block_page_bits}; - std::unordered_map<u64, TBuffer> blocks{}; + static constexpr u64 block_page_bits = 21; + static constexpr u64 block_page_size = 1ULL << block_page_bits; + std::unordered_map<u64, TBuffer> blocks; - std::list<TBuffer> pending_destruction{}; - u64 epoch{}; - u64 modified_ticks{}; + std::list<TBuffer> pending_destruction; + u64 epoch = 0; + u64 modified_ticks = 0; std::recursive_mutex mutex; }; diff --git a/src/video_core/debug_utils/debug_utils.cpp b/src/video_core/debug_utils/debug_utils.cpp deleted file mode 100644 index f0ef67535..000000000 --- a/src/video_core/debug_utils/debug_utils.cpp +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2014 Citra Emulator Project -// Licensed under GPLv2 -// Refer to the license.txt file included. - -#include <mutex> - -#include "video_core/debug_utils/debug_utils.h" - -namespace Tegra { - -void DebugContext::DoOnEvent(Event event, void* data) { - { - std::unique_lock lock{breakpoint_mutex}; - - // TODO(Subv): Commit the rasterizer's caches so framebuffers, render targets, etc. will - // show on debug widgets - - // TODO: Should stop the CPU thread here once we multithread emulation. - - active_breakpoint = event; - at_breakpoint = true; - - // Tell all observers that we hit a breakpoint - for (auto& breakpoint_observer : breakpoint_observers) { - breakpoint_observer->OnMaxwellBreakPointHit(event, data); - } - - // Wait until another thread tells us to Resume() - resume_from_breakpoint.wait(lock, [&] { return !at_breakpoint; }); - } -} - -void DebugContext::Resume() { - { - std::lock_guard lock{breakpoint_mutex}; - - // Tell all observers that we are about to resume - for (auto& breakpoint_observer : breakpoint_observers) { - breakpoint_observer->OnMaxwellResume(); - } - - // Resume the waiting thread (i.e. OnEvent()) - at_breakpoint = false; - } - - resume_from_breakpoint.notify_one(); -} - -} // namespace Tegra diff --git a/src/video_core/debug_utils/debug_utils.h b/src/video_core/debug_utils/debug_utils.h deleted file mode 100644 index ac3a2eb01..000000000 --- a/src/video_core/debug_utils/debug_utils.h +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2014 Citra Emulator Project -// Licensed under GPLv2 -// Refer to the license.txt file included. - -#pragma once - -#include <array> -#include <condition_variable> -#include <list> -#include <memory> -#include <mutex> - -namespace Tegra { - -class DebugContext { -public: - enum class Event { - FirstEvent = 0, - - MaxwellCommandLoaded = FirstEvent, - MaxwellCommandProcessed, - IncomingPrimitiveBatch, - FinishedPrimitiveBatch, - - NumEvents - }; - - /** - * Inherit from this class to be notified of events registered to some debug context. - * Most importantly this is used for our debugger GUI. - * - * To implement event handling, override the OnMaxwellBreakPointHit and OnMaxwellResume methods. - * @warning All BreakPointObservers need to be on the same thread to guarantee thread-safe state - * access - * @todo Evaluate an alternative interface, in which there is only one managing observer and - * multiple child observers running (by design) on the same thread. - */ - class BreakPointObserver { - public: - /// Constructs the object such that it observes events of the given DebugContext. - explicit BreakPointObserver(std::shared_ptr<DebugContext> debug_context) - : context_weak(debug_context) { - std::unique_lock lock{debug_context->breakpoint_mutex}; - debug_context->breakpoint_observers.push_back(this); - } - - virtual ~BreakPointObserver() { - auto context = context_weak.lock(); - if (context) { - { - std::unique_lock lock{context->breakpoint_mutex}; - context->breakpoint_observers.remove(this); - } - - // If we are the last observer to be destroyed, tell the debugger context that - // it is free to continue. In particular, this is required for a proper yuzu - // shutdown, when the emulation thread is waiting at a breakpoint. - if (context->breakpoint_observers.empty()) - context->Resume(); - } - } - - /** - * Action to perform when a breakpoint was reached. - * @param event Type of event which triggered the breakpoint - * @param data Optional data pointer (if unused, this is a nullptr) - * @note This function will perform nothing unless it is overridden in the child class. - */ - virtual void OnMaxwellBreakPointHit(Event event, void* data) {} - - /** - * Action to perform when emulation is resumed from a breakpoint. - * @note This function will perform nothing unless it is overridden in the child class. - */ - virtual void OnMaxwellResume() {} - - protected: - /** - * Weak context pointer. This need not be valid, so when requesting a shared_ptr via - * context_weak.lock(), always compare the result against nullptr. - */ - std::weak_ptr<DebugContext> context_weak; - }; - - /** - * Simple structure defining a breakpoint state - */ - struct BreakPoint { - bool enabled = false; - }; - - /** - * Static constructor used to create a shared_ptr of a DebugContext. - */ - static std::shared_ptr<DebugContext> Construct() { - return std::shared_ptr<DebugContext>(new DebugContext); - } - - /** - * Used by the emulation core when a given event has happened. If a breakpoint has been set - * for this event, OnEvent calls the event handlers of the registered breakpoint observers. - * The current thread then is halted until Resume() is called from another thread (or until - * emulation is stopped). - * @param event Event which has happened - * @param data Optional data pointer (pass nullptr if unused). Needs to remain valid until - * Resume() is called. - */ - void OnEvent(Event event, void* data) { - // This check is left in the header to allow the compiler to inline it. - if (!breakpoints[(int)event].enabled) - return; - // For the rest of event handling, call a separate function. - DoOnEvent(event, data); - } - - void DoOnEvent(Event event, void* data); - - /** - * Resume from the current breakpoint. - * @warning Calling this from the same thread that OnEvent was called in will cause a deadlock. - * Calling from any other thread is safe. - */ - void Resume(); - - /** - * Delete all set breakpoints and resume emulation. - */ - void ClearBreakpoints() { - for (auto& bp : breakpoints) { - bp.enabled = false; - } - Resume(); - } - - // TODO: Evaluate if access to these members should be hidden behind a public interface. - std::array<BreakPoint, static_cast<int>(Event::NumEvents)> breakpoints; - Event active_breakpoint{}; - bool at_breakpoint = false; - -private: - /** - * Private default constructor to make sure people always construct this through Construct() - * instead. - */ - DebugContext() = default; - - /// Mutex protecting current breakpoint state and the observer list. - std::mutex breakpoint_mutex; - - /// Used by OnEvent to wait for resumption. - std::condition_variable resume_from_breakpoint; - - /// List of registered observers - std::list<BreakPointObserver*> breakpoint_observers; -}; - -} // namespace Tegra diff --git a/src/video_core/engines/const_buffer_engine_interface.h b/src/video_core/engines/const_buffer_engine_interface.h index ac27b6cbe..44b8b8d22 100644 --- a/src/video_core/engines/const_buffer_engine_interface.h +++ b/src/video_core/engines/const_buffer_engine_interface.h @@ -8,19 +8,11 @@ #include "common/bit_field.h" #include "common/common_types.h" #include "video_core/engines/shader_bytecode.h" +#include "video_core/engines/shader_type.h" #include "video_core/textures/texture.h" namespace Tegra::Engines { -enum class ShaderType : u32 { - Vertex = 0, - TesselationControl = 1, - TesselationEval = 2, - Geometry = 3, - Fragment = 4, - Compute = 5, -}; - struct SamplerDescriptor { union { BitField<0, 20, Tegra::Shader::TextureType> texture_type; diff --git a/src/video_core/engines/kepler_compute.cpp b/src/video_core/engines/kepler_compute.cpp index 3a39aeabe..110406f2f 100644 --- a/src/video_core/engines/kepler_compute.cpp +++ b/src/video_core/engines/kepler_compute.cpp @@ -8,6 +8,7 @@ #include "core/core.h" #include "video_core/engines/kepler_compute.h" #include "video_core/engines/maxwell_3d.h" +#include "video_core/engines/shader_type.h" #include "video_core/memory_manager.h" #include "video_core/rasterizer_interface.h" #include "video_core/renderer_base.h" diff --git a/src/video_core/engines/kepler_compute.h b/src/video_core/engines/kepler_compute.h index 5259d92bd..4ef3e0613 100644 --- a/src/video_core/engines/kepler_compute.h +++ b/src/video_core/engines/kepler_compute.h @@ -12,6 +12,7 @@ #include "common/common_types.h" #include "video_core/engines/const_buffer_engine_interface.h" #include "video_core/engines/engine_upload.h" +#include "video_core/engines/shader_type.h" #include "video_core/gpu.h" #include "video_core/textures/texture.h" @@ -140,7 +141,7 @@ public: INSERT_PADDING_WORDS(0x3); - BitField<0, 16, u32> shared_alloc; + BitField<0, 18, u32> shared_alloc; BitField<16, 16, u32> block_dim_x; union { @@ -178,7 +179,12 @@ public: BitField<24, 5, u32> gpr_alloc; }; - INSERT_PADDING_WORDS(0x11); + union { + BitField<0, 20, u32> local_crs_alloc; + BitField<24, 5, u32> sass_version; + }; + + INSERT_PADDING_WORDS(0x10); } launch_description{}; struct { diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp index 2bed6cb38..1d1f780e7 100644 --- a/src/video_core/engines/maxwell_3d.cpp +++ b/src/video_core/engines/maxwell_3d.cpp @@ -7,8 +7,8 @@ #include "common/assert.h" #include "core/core.h" #include "core/core_timing.h" -#include "video_core/debug_utils/debug_utils.h" #include "video_core/engines/maxwell_3d.h" +#include "video_core/engines/shader_type.h" #include "video_core/memory_manager.h" #include "video_core/rasterizer_interface.h" #include "video_core/textures/texture.h" @@ -87,11 +87,11 @@ void Maxwell3D::InitializeRegisterDefaults() { color_mask.A.Assign(1); } - // Commercial games seem to assume this value is enabled and nouveau sets this value manually. + // NVN games expect these values to be enabled at boot + regs.rasterize_enable = 1; regs.rt_separate_frag_data = 1; - - // Some games (like Super Mario Odyssey) assume that SRGB is enabled. regs.framebuffer_srgb = 1; + mme_inline[MAXWELL3D_REG_INDEX(draw.vertex_end_gl)] = true; mme_inline[MAXWELL3D_REG_INDEX(draw.vertex_begin_gl)] = true; mme_inline[MAXWELL3D_REG_INDEX(vertex_buffer.count)] = true; @@ -261,7 +261,8 @@ void Maxwell3D::CallMacroMethod(u32 method, std::size_t num_parameters, const u3 executing_macro = 0; // Lookup the macro offset - const u32 entry = ((method - MacroRegistersStart) >> 1) % macro_positions.size(); + const u32 entry = + ((method - MacroRegistersStart) >> 1) % static_cast<u32>(macro_positions.size()); // Execute the current macro. macro_interpreter.Execute(macro_positions[entry], num_parameters, parameters); @@ -271,8 +272,6 @@ void Maxwell3D::CallMacroMethod(u32 method, std::size_t num_parameters, const u3 } void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) { - auto debug_context = system.GetGPUDebugContext(); - const u32 method = method_call.method; if (method == cb_data_state.current) { @@ -313,10 +312,6 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) { ASSERT_MSG(method < Regs::NUM_REGS, "Invalid Maxwell3D register, increase the size of the Regs structure"); - if (debug_context) { - debug_context->OnEvent(Tegra::DebugContext::Event::MaxwellCommandLoaded, nullptr); - } - if (regs.reg_array[method] != method_call.argument) { regs.reg_array[method] = method_call.argument; const std::size_t dirty_reg = dirty_pointers[method]; @@ -367,24 +362,24 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) { StartCBData(method); break; } - case MAXWELL3D_REG_INDEX(cb_bind[0].raw_config): { - ProcessCBBind(Regs::ShaderStage::Vertex); + case MAXWELL3D_REG_INDEX(cb_bind[0]): { + ProcessCBBind(0); break; } - case MAXWELL3D_REG_INDEX(cb_bind[1].raw_config): { - ProcessCBBind(Regs::ShaderStage::TesselationControl); + case MAXWELL3D_REG_INDEX(cb_bind[1]): { + ProcessCBBind(1); break; } - case MAXWELL3D_REG_INDEX(cb_bind[2].raw_config): { - ProcessCBBind(Regs::ShaderStage::TesselationEval); + case MAXWELL3D_REG_INDEX(cb_bind[2]): { + ProcessCBBind(2); break; } - case MAXWELL3D_REG_INDEX(cb_bind[3].raw_config): { - ProcessCBBind(Regs::ShaderStage::Geometry); + case MAXWELL3D_REG_INDEX(cb_bind[3]): { + ProcessCBBind(3); break; } - case MAXWELL3D_REG_INDEX(cb_bind[4].raw_config): { - ProcessCBBind(Regs::ShaderStage::Fragment); + case MAXWELL3D_REG_INDEX(cb_bind[4]): { + ProcessCBBind(4); break; } case MAXWELL3D_REG_INDEX(draw.vertex_end_gl): { @@ -422,10 +417,6 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) { default: break; } - - if (debug_context) { - debug_context->OnEvent(Tegra::DebugContext::Event::MaxwellCommandProcessed, nullptr); - } } void Maxwell3D::StepInstance(const MMEDrawMode expected_mode, const u32 count) { @@ -483,12 +474,6 @@ void Maxwell3D::FlushMMEInlineDraw() { ASSERT_MSG(!(regs.index_array.count && regs.vertex_buffer.count), "Both indexed and direct?"); ASSERT(mme_draw.instance_count == mme_draw.gl_end_count); - auto debug_context = system.GetGPUDebugContext(); - - if (debug_context) { - debug_context->OnEvent(Tegra::DebugContext::Event::IncomingPrimitiveBatch, nullptr); - } - // Both instance configuration registers can not be set at the same time. ASSERT_MSG(!regs.draw.instance_next || !regs.draw.instance_cont, "Illegal combination of instancing parameters"); @@ -498,10 +483,6 @@ void Maxwell3D::FlushMMEInlineDraw() { rasterizer.DrawMultiBatch(is_indexed); } - if (debug_context) { - debug_context->OnEvent(Tegra::DebugContext::Event::FinishedPrimitiveBatch, nullptr); - } - // TODO(bunnei): Below, we reset vertex count so that we can use these registers to determine if // the game is trying to draw indexed or direct mode. This needs to be verified on HW still - // it's possible that it is incorrect and that there is some other register used to specify the @@ -648,12 +629,6 @@ void Maxwell3D::DrawArrays() { regs.vertex_buffer.count); ASSERT_MSG(!(regs.index_array.count && regs.vertex_buffer.count), "Both indexed and direct?"); - auto debug_context = system.GetGPUDebugContext(); - - if (debug_context) { - debug_context->OnEvent(Tegra::DebugContext::Event::IncomingPrimitiveBatch, nullptr); - } - // Both instance configuration registers can not be set at the same time. ASSERT_MSG(!regs.draw.instance_next || !regs.draw.instance_cont, "Illegal combination of instancing parameters"); @@ -671,10 +646,6 @@ void Maxwell3D::DrawArrays() { rasterizer.DrawBatch(is_indexed); } - if (debug_context) { - debug_context->OnEvent(Tegra::DebugContext::Event::FinishedPrimitiveBatch, nullptr); - } - // TODO(bunnei): Below, we reset vertex count so that we can use these registers to determine if // the game is trying to draw indexed or direct mode. This needs to be verified on HW still - // it's possible that it is incorrect and that there is some other register used to specify the @@ -686,10 +657,10 @@ void Maxwell3D::DrawArrays() { } } -void Maxwell3D::ProcessCBBind(Regs::ShaderStage stage) { +void Maxwell3D::ProcessCBBind(std::size_t stage_index) { // Bind the buffer currently in CB_ADDRESS to the specified index in the desired shader stage. - auto& shader = state.shader_stages[static_cast<std::size_t>(stage)]; - auto& bind_data = regs.cb_bind[static_cast<std::size_t>(stage)]; + auto& shader = state.shader_stages[stage_index]; + auto& bind_data = regs.cb_bind[stage_index]; ASSERT(bind_data.index < Regs::MaxConstBuffers); auto& buffer = shader.const_buffers[bind_data.index]; @@ -741,14 +712,6 @@ Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const { Texture::TICEntry tic_entry; memory_manager.ReadBlockUnsafe(tic_address_gpu, &tic_entry, sizeof(Texture::TICEntry)); - [[maybe_unused]] const auto r_type{tic_entry.r_type.Value()}; - [[maybe_unused]] const auto g_type{tic_entry.g_type.Value()}; - [[maybe_unused]] const auto b_type{tic_entry.b_type.Value()}; - [[maybe_unused]] const auto a_type{tic_entry.a_type.Value()}; - - // TODO(Subv): Different data types for separate components are not supported - DEBUG_ASSERT(r_type == g_type && r_type == b_type && r_type == a_type); - return tic_entry; } @@ -764,9 +727,9 @@ Texture::FullTextureInfo Maxwell3D::GetTextureInfo(Texture::TextureHandle tex_ha return Texture::FullTextureInfo{GetTICEntry(tex_handle.tic_id), GetTSCEntry(tex_handle.tsc_id)}; } -Texture::FullTextureInfo Maxwell3D::GetStageTexture(Regs::ShaderStage stage, - std::size_t offset) const { - const auto& shader = state.shader_stages[static_cast<std::size_t>(stage)]; +Texture::FullTextureInfo Maxwell3D::GetStageTexture(ShaderType stage, std::size_t offset) const { + const auto stage_index = static_cast<std::size_t>(stage); + const auto& shader = state.shader_stages[stage_index]; const auto& tex_info_buffer = shader.const_buffers[regs.tex_cb_index]; ASSERT(tex_info_buffer.enabled && tex_info_buffer.address != 0); diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h index 1aa7c274f..a35e7a195 100644 --- a/src/video_core/engines/maxwell_3d.h +++ b/src/video_core/engines/maxwell_3d.h @@ -18,6 +18,7 @@ #include "video_core/engines/const_buffer_engine_interface.h" #include "video_core/engines/const_buffer_info.h" #include "video_core/engines/engine_upload.h" +#include "video_core/engines/shader_type.h" #include "video_core/gpu.h" #include "video_core/macro_interpreter.h" #include "video_core/textures/texture.h" @@ -62,7 +63,6 @@ public: static constexpr std::size_t NumVertexArrays = 32; static constexpr std::size_t NumVertexAttributes = 32; static constexpr std::size_t NumVaryings = 31; - static constexpr std::size_t NumTextureSamplers = 32; static constexpr std::size_t NumImages = 8; // TODO(Rodrigo): Investigate this number static constexpr std::size_t NumClipDistances = 8; static constexpr std::size_t MaxShaderProgram = 6; @@ -130,14 +130,6 @@ public: Fragment = 5, }; - enum class ShaderStage : u32 { - Vertex = 0, - TesselationControl = 1, - TesselationEval = 2, - Geometry = 3, - Fragment = 4, - }; - struct VertexAttribute { enum class Size : u32 { Invalid = 0x0, @@ -318,6 +310,11 @@ public: } }; + enum class DepthMode : u32 { + MinusOneToOne = 0, + ZeroToOne = 1, + }; + enum class PrimitiveTopology : u32 { Points = 0x0, Lines = 0x1, @@ -499,6 +496,18 @@ public: INSERT_UNION_PADDING_WORDS(1); }; + enum class TessellationPrimitive : u32 { + Isolines = 0, + Triangles = 1, + Quads = 2, + }; + + enum class TessellationSpacing : u32 { + Equal = 0, + FractionalOdd = 1, + FractionalEven = 2, + }; + struct RenderTargetConfig { u32 address_high; u32 address_low; @@ -636,7 +645,23 @@ public: }; } sync_info; - INSERT_UNION_PADDING_WORDS(0x11E); + INSERT_UNION_PADDING_WORDS(0x15); + + union { + BitField<0, 2, TessellationPrimitive> prim; + BitField<4, 2, TessellationSpacing> spacing; + BitField<8, 1, u32> cw; + BitField<9, 1, u32> connected; + } tess_mode; + + std::array<f32, 4> tess_level_outer; + std::array<f32, 2> tess_level_inner; + + INSERT_UNION_PADDING_WORDS(0x10); + + u32 rasterize_enable; + + INSERT_UNION_PADDING_WORDS(0xF1); u32 tfb_enabled; @@ -655,7 +680,7 @@ public: u32 count; } vertex_buffer; - INSERT_UNION_PADDING_WORDS(1); + DepthMode depth_mode; float clear_color[4]; float clear_depth; @@ -670,27 +695,31 @@ public: u32 polygon_offset_line_enable; u32 polygon_offset_fill_enable; - INSERT_UNION_PADDING_WORDS(0xD); + u32 patch_vertices; + + INSERT_UNION_PADDING_WORDS(0xC); std::array<ScissorTest, NumViewports> scissor_test; INSERT_UNION_PADDING_WORDS(0x15); s32 stencil_back_func_ref; - u32 stencil_back_mask; u32 stencil_back_func_mask; + u32 stencil_back_mask; INSERT_UNION_PADDING_WORDS(0xC); u32 color_mask_common; - INSERT_UNION_PADDING_WORDS(0x6); - - u32 rt_separate_frag_data; + INSERT_UNION_PADDING_WORDS(0x2); f32 depth_bounds[2]; - INSERT_UNION_PADDING_WORDS(0xA); + INSERT_UNION_PADDING_WORDS(0x2); + + u32 rt_separate_frag_data; + + INSERT_UNION_PADDING_WORDS(0xC); struct { u32 address_high; @@ -1007,7 +1036,12 @@ public: BitField<4, 1, u32> depth_clamp_far; } view_volume_clip_control; - INSERT_UNION_PADDING_WORDS(0x21); + INSERT_UNION_PADDING_WORDS(0x1F); + + u32 depth_bounds_enable; + + INSERT_UNION_PADDING_WORDS(1); + struct { u32 enable; LogicOperation operation; @@ -1254,7 +1288,7 @@ public: Texture::FullTextureInfo GetTextureInfo(Texture::TextureHandle tex_handle) const; /// Returns the texture information for a specific texture in a specific shader stage. - Texture::FullTextureInfo GetStageTexture(Regs::ShaderStage stage, std::size_t offset) const; + Texture::FullTextureInfo GetStageTexture(ShaderType stage, std::size_t offset) const; u32 AccessConstBuffer32(ShaderType stage, u64 const_buffer, u64 offset) const override; @@ -1376,7 +1410,7 @@ private: void FinishCBData(); /// Handles a write to the CB_BIND register. - void ProcessCBBind(Regs::ShaderStage stage); + void ProcessCBBind(std::size_t stage_index); /// Handles a write to the VERTEX_END_GL register, triggering a draw. void DrawArrays(); @@ -1394,24 +1428,30 @@ ASSERT_REG_POSITION(upload, 0x60); ASSERT_REG_POSITION(exec_upload, 0x6C); ASSERT_REG_POSITION(data_upload, 0x6D); ASSERT_REG_POSITION(sync_info, 0xB2); +ASSERT_REG_POSITION(tess_mode, 0xC8); +ASSERT_REG_POSITION(tess_level_outer, 0xC9); +ASSERT_REG_POSITION(tess_level_inner, 0xCD); +ASSERT_REG_POSITION(rasterize_enable, 0xDF); ASSERT_REG_POSITION(tfb_enabled, 0x1D1); ASSERT_REG_POSITION(rt, 0x200); ASSERT_REG_POSITION(viewport_transform, 0x280); ASSERT_REG_POSITION(viewports, 0x300); ASSERT_REG_POSITION(vertex_buffer, 0x35D); +ASSERT_REG_POSITION(depth_mode, 0x35F); ASSERT_REG_POSITION(clear_color[0], 0x360); ASSERT_REG_POSITION(clear_depth, 0x364); ASSERT_REG_POSITION(clear_stencil, 0x368); ASSERT_REG_POSITION(polygon_offset_point_enable, 0x370); ASSERT_REG_POSITION(polygon_offset_line_enable, 0x371); ASSERT_REG_POSITION(polygon_offset_fill_enable, 0x372); +ASSERT_REG_POSITION(patch_vertices, 0x373); ASSERT_REG_POSITION(scissor_test, 0x380); ASSERT_REG_POSITION(stencil_back_func_ref, 0x3D5); -ASSERT_REG_POSITION(stencil_back_mask, 0x3D6); -ASSERT_REG_POSITION(stencil_back_func_mask, 0x3D7); +ASSERT_REG_POSITION(stencil_back_func_mask, 0x3D6); +ASSERT_REG_POSITION(stencil_back_mask, 0x3D7); ASSERT_REG_POSITION(color_mask_common, 0x3E4); ASSERT_REG_POSITION(rt_separate_frag_data, 0x3EB); -ASSERT_REG_POSITION(depth_bounds, 0x3EC); +ASSERT_REG_POSITION(depth_bounds, 0x3E7); ASSERT_REG_POSITION(zeta, 0x3F8); ASSERT_REG_POSITION(clear_flags, 0x43E); ASSERT_REG_POSITION(vertex_attrib_format, 0x458); @@ -1467,6 +1507,7 @@ ASSERT_REG_POSITION(cull, 0x646); ASSERT_REG_POSITION(pixel_center_integer, 0x649); ASSERT_REG_POSITION(viewport_transform_enabled, 0x64B); ASSERT_REG_POSITION(view_volume_clip_control, 0x64F); +ASSERT_REG_POSITION(depth_bounds_enable, 0x66F); ASSERT_REG_POSITION(logic_op, 0x671); ASSERT_REG_POSITION(clear_buffers, 0x674); ASSERT_REG_POSITION(color_mask, 0x680); diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h index 8f6bc76eb..57b57c647 100644 --- a/src/video_core/engines/shader_bytecode.h +++ b/src/video_core/engines/shader_bytecode.h @@ -98,10 +98,11 @@ union Attribute { BitField<20, 10, u64> immediate; BitField<22, 2, u64> element; BitField<24, 6, Index> index; + BitField<31, 1, u64> patch; BitField<47, 3, AttributeSize> size; bool IsPhysical() const { - return element == 0 && static_cast<u64>(index.Value()) == 0; + return patch == 0 && element == 0 && static_cast<u64>(index.Value()) == 0; } } fmt20; @@ -383,6 +384,15 @@ enum class IsberdMode : u64 { enum class IsberdShift : u64 { None = 0, U16 = 1, B32 = 2 }; +enum class MembarType : u64 { + CTA = 0, + GL = 1, + SYS = 2, + VC = 3, +}; + +enum class MembarUnknown : u64 { Default = 0, IVALLD = 1, IVALLT = 2, IVALLTD = 3 }; + enum class HalfType : u64 { H0_H1 = 0, F32 = 1, @@ -616,6 +626,14 @@ union Instruction { } shfl; union { + BitField<44, 1, u64> ftz; + BitField<39, 2, u64> tab5cb8_2; + BitField<38, 1, u64> ndv; + BitField<47, 1, u64> cc; + BitField<28, 8, u64> swizzle; + } fswzadd; + + union { BitField<8, 8, Register> gpr; BitField<20, 24, s64> offset; } gmem; @@ -792,6 +810,12 @@ union Instruction { } popc; union { + BitField<41, 1, u64> sh; + BitField<40, 1, u64> invert; + BitField<48, 1, u64> is_signed; + } flo; + + union { BitField<39, 3, u64> pred; BitField<42, 1, u64> neg_pred; } sel; @@ -1027,7 +1051,7 @@ union Instruction { BitField<40, 1, R2pMode> mode; BitField<41, 2, u64> byte; BitField<20, 7, u64> immediate_mask; - } r2p; + } p2r_r2p; union { BitField<39, 3, u64> pred39; @@ -1215,7 +1239,7 @@ union Instruction { BitField<35, 1, u64> ndv_flag; BitField<49, 1, u64> nodep_flag; BitField<50, 1, u64> dc_flag; - BitField<54, 2, u64> info; + BitField<54, 2, u64> offset_mode; BitField<56, 2, u64> component; bool UsesMiscMode(TextureMiscMode mode) const { @@ -1227,9 +1251,9 @@ union Instruction { case TextureMiscMode::DC: return dc_flag != 0; case TextureMiscMode::AOFFI: - return info == 1; + return offset_mode == 1; case TextureMiscMode::PTP: - return info == 2; + return offset_mode == 2; default: break; } @@ -1241,7 +1265,7 @@ union Instruction { BitField<35, 1, u64> ndv_flag; BitField<49, 1, u64> nodep_flag; BitField<50, 1, u64> dc_flag; - BitField<33, 2, u64> info; + BitField<33, 2, u64> offset_mode; BitField<37, 2, u64> component; bool UsesMiscMode(TextureMiscMode mode) const { @@ -1253,9 +1277,9 @@ union Instruction { case TextureMiscMode::DC: return dc_flag != 0; case TextureMiscMode::AOFFI: - return info == 1; + return offset_mode == 1; case TextureMiscMode::PTP: - return info == 2; + return offset_mode == 2; default: break; } @@ -1268,6 +1292,7 @@ union Instruction { BitField<50, 1, u64> dc_flag; BitField<51, 1, u64> aoffi_flag; BitField<52, 2, u64> component; + BitField<55, 1, u64> fp16_flag; bool UsesMiscMode(TextureMiscMode mode) const { switch (mode) { @@ -1432,6 +1457,26 @@ union Instruction { } tlds; union { + BitField<28, 1, u64> is_array; + BitField<29, 2, TextureType> texture_type; + BitField<35, 1, u64> aoffi_flag; + BitField<49, 1, u64> nodep_flag; + + bool UsesMiscMode(TextureMiscMode mode) const { + switch (mode) { + case TextureMiscMode::AOFFI: + return aoffi_flag != 0; + case TextureMiscMode::NODEP: + return nodep_flag != 0; + default: + break; + } + return false; + } + + } txd; + + union { BitField<24, 2, StoreCacheManagement> cache_management; BitField<33, 3, ImageType> image_type; BitField<49, 2, OutOfBoundsStore> out_of_bounds_store; @@ -1478,7 +1523,8 @@ union Instruction { u32 value = static_cast<u32>(target); // The branch offset is relative to the next instruction and is stored in bytes, so // divide it by the size of an instruction and add 1 to it. - return static_cast<s32>((value ^ mask) - mask) / sizeof(Instruction) + 1; + return static_cast<s32>((value ^ mask) - mask) / static_cast<s32>(sizeof(Instruction)) + + 1; } } bra; @@ -1492,7 +1538,8 @@ union Instruction { u32 value = static_cast<u32>(target); // The branch offset is relative to the next instruction and is stored in bytes, so // divide it by the size of an instruction and add 1 to it. - return static_cast<s32>((value ^ mask) - mask) / sizeof(Instruction) + 1; + return static_cast<s32>((value ^ mask) - mask) / static_cast<s32>(sizeof(Instruction)) + + 1; } } brx; @@ -1509,6 +1556,11 @@ union Instruction { } isberd; union { + BitField<8, 2, MembarType> type; + BitField<0, 2, MembarUnknown> unknown; + } membar; + + union { BitField<48, 1, u64> signed_a; BitField<38, 1, u64> is_byte_chunk_a; BitField<36, 2, VideoType> type_a; @@ -1590,6 +1642,7 @@ public: DEPBAR, VOTE, SHFL, + FSWZADD, BFE_C, BFE_R, BFE_IMM, @@ -1621,6 +1674,8 @@ public: TLD4S, // Texture Load 4 with scalar / non - vec4 source / destinations TMML_B, // Texture Mip Map Level TMML, // Texture Mip Map Level + TXD, // Texture Gradient/Load with Derivates + TXD_B, // Texture Gradient/Load with Derivates Bindless SUST, // Surface Store SULD, // Surface Load SUATOM, // Surface Atomic Operation @@ -1629,6 +1684,7 @@ public: IPA, OUT_R, // Emit vertex/primitive ISBERD, + MEMBAR, VMAD, VSETP, FFMA_IMM, // Fused Multiply and Add @@ -1653,6 +1709,9 @@ public: ISCADD_C, // Scale and Add ISCADD_R, ISCADD_IMM, + FLO_R, + FLO_C, + FLO_IMM, LEA_R1, LEA_R2, LEA_RZ, @@ -1716,6 +1775,10 @@ public: SHR_C, SHR_R, SHR_IMM, + SHF_RIGHT_R, + SHF_RIGHT_IMM, + SHF_LEFT_R, + SHF_LEFT_IMM, FMNMX_C, FMNMX_R, FMNMX_IMM, @@ -1738,6 +1801,7 @@ public: PSET, CSETP, R2P_IMM, + P2R_IMM, XMAD_IMM, XMAD_CR, XMAD_RC, @@ -1851,11 +1915,11 @@ private: const std::size_t bit_position = opcode_bitsize - i - 1; switch (bitstring[i]) { case '0': - mask |= 1 << bit_position; + mask |= static_cast<u16>(1U << bit_position); break; case '1': - expect |= 1 << bit_position; - mask |= 1 << bit_position; + expect |= static_cast<u16>(1U << bit_position); + mask |= static_cast<u16>(1U << bit_position); break; default: // Ignore @@ -1883,11 +1947,12 @@ private: INST("111000100100----", Id::BRA, Type::Flow, "BRA"), INST("111000100101----", Id::BRX, Type::Flow, "BRX"), INST("1111000011111---", Id::SYNC, Type::Flow, "SYNC"), - INST("111000110100---", Id::BRK, Type::Flow, "BRK"), + INST("111000110100----", Id::BRK, Type::Flow, "BRK"), INST("111000110000----", Id::EXIT, Type::Flow, "EXIT"), INST("1111000011110---", Id::DEPBAR, Type::Synch, "DEPBAR"), INST("0101000011011---", Id::VOTE, Type::Warp, "VOTE"), INST("1110111100010---", Id::SHFL, Type::Warp, "SHFL"), + INST("0101000011111---", Id::FSWZADD, Type::Warp, "FSWZADD"), INST("1110111111011---", Id::LD_A, Type::Memory, "LD_A"), INST("1110111101001---", Id::LD_S, Type::Memory, "LD_S"), INST("1110111101000---", Id::LD_L, Type::Memory, "LD_L"), @@ -1909,9 +1974,11 @@ private: INST("1101-01---------", Id::TLDS, Type::Texture, "TLDS"), INST("110010----111---", Id::TLD4, Type::Texture, "TLD4"), INST("1101111011111---", Id::TLD4_B, Type::Texture, "TLD4_B"), - INST("1101111100------", Id::TLD4S, Type::Texture, "TLD4S"), + INST("11011111-0------", Id::TLD4S, Type::Texture, "TLD4S"), INST("110111110110----", Id::TMML_B, Type::Texture, "TMML_B"), INST("1101111101011---", Id::TMML, Type::Texture, "TMML"), + INST("11011110011110--", Id::TXD_B, Type::Texture, "TXD_B"), + INST("11011110001110--", Id::TXD, Type::Texture, "TXD"), INST("11101011001-----", Id::SUST, Type::Image, "SUST"), INST("11101011000-----", Id::SULD, Type::Image, "SULD"), INST("1110101000------", Id::SUATOM, Type::Image, "SUATOM_D"), @@ -1919,6 +1986,7 @@ private: INST("11100000--------", Id::IPA, Type::Trivial, "IPA"), INST("1111101111100---", Id::OUT_R, Type::Trivial, "OUT_R"), INST("1110111111010---", Id::ISBERD, Type::Trivial, "ISBERD"), + INST("1110111110011---", Id::MEMBAR, Type::Trivial, "MEMBAR"), INST("01011111--------", Id::VMAD, Type::Video, "VMAD"), INST("0101000011110---", Id::VSETP, Type::Video, "VSETP"), INST("0011001-1-------", Id::FFMA_IMM, Type::Ffma, "FFMA_IMM"), @@ -1953,6 +2021,9 @@ private: INST("010110110100----", Id::ICMP_R, Type::ArithmeticInteger, "ICMP_R"), INST("010010110100----", Id::ICMP_CR, Type::ArithmeticInteger, "ICMP_CR"), INST("0011011-0100----", Id::ICMP_IMM, Type::ArithmeticInteger, "ICMP_IMM"), + INST("0101110000110---", Id::FLO_R, Type::ArithmeticInteger, "FLO_R"), + INST("0100110000110---", Id::FLO_C, Type::ArithmeticInteger, "FLO_C"), + INST("0011100-00110---", Id::FLO_IMM, Type::ArithmeticInteger, "FLO_IMM"), INST("0101101111011---", Id::LEA_R2, Type::ArithmeticInteger, "LEA_R2"), INST("0101101111010---", Id::LEA_R1, Type::ArithmeticInteger, "LEA_R1"), INST("001101101101----", Id::LEA_IMM, Type::ArithmeticInteger, "LEA_IMM"), @@ -2010,6 +2081,10 @@ private: INST("0100110000101---", Id::SHR_C, Type::Shift, "SHR_C"), INST("0101110000101---", Id::SHR_R, Type::Shift, "SHR_R"), INST("0011100-00101---", Id::SHR_IMM, Type::Shift, "SHR_IMM"), + INST("0101110011111---", Id::SHF_RIGHT_R, Type::Shift, "SHF_RIGHT_R"), + INST("0011100-11111---", Id::SHF_RIGHT_IMM, Type::Shift, "SHF_RIGHT_IMM"), + INST("0101101111111---", Id::SHF_LEFT_R, Type::Shift, "SHF_LEFT_R"), + INST("0011011-11111---", Id::SHF_LEFT_IMM, Type::Shift, "SHF_LEFT_IMM"), INST("0100110011100---", Id::I2I_C, Type::Conversion, "I2I_C"), INST("0101110011100---", Id::I2I_R, Type::Conversion, "I2I_R"), INST("0011101-11100---", Id::I2I_IMM, Type::Conversion, "I2I_IMM"), @@ -2032,6 +2107,7 @@ private: INST("0101000010010---", Id::PSETP, Type::PredicateSetPredicate, "PSETP"), INST("010100001010----", Id::CSETP, Type::PredicateSetPredicate, "CSETP"), INST("0011100-11110---", Id::R2P_IMM, Type::RegisterSetPredicate, "R2P_IMM"), + INST("0011100-11101---", Id::P2R_IMM, Type::RegisterSetPredicate, "P2R_IMM"), INST("0011011-00------", Id::XMAD_IMM, Type::Xmad, "XMAD_IMM"), INST("0100111---------", Id::XMAD_CR, Type::Xmad, "XMAD_CR"), INST("010100010-------", Id::XMAD_RC, Type::Xmad, "XMAD_RC"), diff --git a/src/video_core/engines/shader_type.h b/src/video_core/engines/shader_type.h new file mode 100644 index 000000000..49ce5cde5 --- /dev/null +++ b/src/video_core/engines/shader_type.h @@ -0,0 +1,21 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/common_types.h" + +namespace Tegra::Engines { + +enum class ShaderType : u32 { + Vertex = 0, + TesselationControl = 1, + TesselationEval = 2, + Geometry = 3, + Fragment = 4, + Compute = 5, +}; +static constexpr std::size_t MaxShaderTypes = 6; + +} // namespace Tegra::Engines diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp index 758a37f14..2cdf1aa7f 100644 --- a/src/video_core/gpu_thread.cpp +++ b/src/video_core/gpu_thread.cpp @@ -31,24 +31,22 @@ static void RunThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_p CommandDataContainer next; while (state.is_running) { - while (!state.queue.Empty()) { - state.queue.Pop(next); - if (const auto submit_list = std::get_if<SubmitListCommand>(&next.data)) { - dma_pusher.Push(std::move(submit_list->entries)); - dma_pusher.DispatchCalls(); - } else if (const auto data = std::get_if<SwapBuffersCommand>(&next.data)) { - renderer.SwapBuffers(data->framebuffer ? &*data->framebuffer : nullptr); - } else if (const auto data = std::get_if<FlushRegionCommand>(&next.data)) { - renderer.Rasterizer().FlushRegion(data->addr, data->size); - } else if (const auto data = std::get_if<InvalidateRegionCommand>(&next.data)) { - renderer.Rasterizer().InvalidateRegion(data->addr, data->size); - } else if (std::holds_alternative<EndProcessingCommand>(next.data)) { - return; - } else { - UNREACHABLE(); - } - state.signaled_fence.store(next.fence); + next = state.queue.PopWait(); + if (const auto submit_list = std::get_if<SubmitListCommand>(&next.data)) { + dma_pusher.Push(std::move(submit_list->entries)); + dma_pusher.DispatchCalls(); + } else if (const auto data = std::get_if<SwapBuffersCommand>(&next.data)) { + renderer.SwapBuffers(data->framebuffer ? &*data->framebuffer : nullptr); + } else if (const auto data = std::get_if<FlushRegionCommand>(&next.data)) { + renderer.Rasterizer().FlushRegion(data->addr, data->size); + } else if (const auto data = std::get_if<InvalidateRegionCommand>(&next.data)) { + renderer.Rasterizer().InvalidateRegion(data->addr, data->size); + } else if (std::holds_alternative<EndProcessingCommand>(next.data)) { + return; + } else { + UNREACHABLE(); } + state.signaled_fence.store(next.fence); } } @@ -73,8 +71,7 @@ void ThreadManager::SubmitList(Tegra::CommandList&& entries) { } void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { - PushCommand(SwapBuffersCommand(framebuffer ? *framebuffer - : std::optional<const Tegra::FramebufferConfig>{})); + PushCommand(SwapBuffersCommand(framebuffer ? std::make_optional(*framebuffer) : std::nullopt)); } void ThreadManager::FlushRegion(CacheAddr addr, u64 size) { diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index bffae940c..11848fbce 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -52,7 +52,7 @@ GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, u64 size) { const u64 aligned_size{Common::AlignUp(size, page_size)}; const GPUVAddr gpu_addr{FindFreeRegion(address_space_base, aligned_size)}; - MapBackingMemory(gpu_addr, Memory::GetPointer(cpu_addr), aligned_size, cpu_addr); + MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr); ASSERT(system.CurrentProcess() ->VMManager() .SetMemoryAttribute(cpu_addr, size, Kernel::MemoryAttribute::DeviceMapped, @@ -67,7 +67,7 @@ GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size) const u64 aligned_size{Common::AlignUp(size, page_size)}; - MapBackingMemory(gpu_addr, Memory::GetPointer(cpu_addr), aligned_size, cpu_addr); + MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr); ASSERT(system.CurrentProcess() ->VMManager() .SetMemoryAttribute(cpu_addr, size, Kernel::MemoryAttribute::DeviceMapped, diff --git a/src/video_core/rasterizer_accelerated.cpp b/src/video_core/rasterizer_accelerated.cpp index b230dcc18..d01db97da 100644 --- a/src/video_core/rasterizer_accelerated.cpp +++ b/src/video_core/rasterizer_accelerated.cpp @@ -5,6 +5,7 @@ #include <mutex> #include <boost/icl/interval_map.hpp> +#include <boost/range/iterator_range.hpp> #include "common/assert.h" #include "common/common_types.h" @@ -22,7 +23,8 @@ constexpr auto RangeFromInterval(Map& map, const Interval& interval) { } // Anonymous namespace -RasterizerAccelerated::RasterizerAccelerated() = default; +RasterizerAccelerated::RasterizerAccelerated(Memory::Memory& cpu_memory_) + : cpu_memory{cpu_memory_} {} RasterizerAccelerated::~RasterizerAccelerated() = default; @@ -47,9 +49,9 @@ void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int del const u64 interval_size = interval_end_addr - interval_start_addr; if (delta > 0 && count == delta) { - Memory::RasterizerMarkRegionCached(interval_start_addr, interval_size, true); + cpu_memory.RasterizerMarkRegionCached(interval_start_addr, interval_size, true); } else if (delta < 0 && count == -delta) { - Memory::RasterizerMarkRegionCached(interval_start_addr, interval_size, false); + cpu_memory.RasterizerMarkRegionCached(interval_start_addr, interval_size, false); } else { ASSERT(count >= 0); } diff --git a/src/video_core/rasterizer_accelerated.h b/src/video_core/rasterizer_accelerated.h index 8f7e3547e..315798e7c 100644 --- a/src/video_core/rasterizer_accelerated.h +++ b/src/video_core/rasterizer_accelerated.h @@ -11,12 +11,16 @@ #include "common/common_types.h" #include "video_core/rasterizer_interface.h" +namespace Memory { +class Memory; +} + namespace VideoCore { /// Implements the shared part in GPU accelerated rasterizers in RasterizerInterface. class RasterizerAccelerated : public RasterizerInterface { public: - explicit RasterizerAccelerated(); + explicit RasterizerAccelerated(Memory::Memory& cpu_memory_); ~RasterizerAccelerated() override; void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) override; @@ -24,8 +28,9 @@ public: private: using CachedPageMap = boost::icl::interval_map<u64, int>; CachedPageMap cached_pages; - std::mutex pages_mutex; + + Memory::Memory& cpu_memory; }; } // namespace VideoCore diff --git a/src/video_core/renderer_opengl/gl_device.cpp b/src/video_core/renderer_opengl/gl_device.cpp index c65b24c69..1a2e2a9f7 100644 --- a/src/video_core/renderer_opengl/gl_device.cpp +++ b/src/video_core/renderer_opengl/gl_device.cpp @@ -5,7 +5,10 @@ #include <algorithm> #include <array> #include <cstddef> +#include <cstring> +#include <optional> #include <vector> + #include <glad/glad.h> #include "common/logging/log.h" @@ -17,6 +20,30 @@ namespace OpenGL { namespace { +// One uniform block is reserved for emulation purposes +constexpr u32 ReservedUniformBlocks = 1; + +constexpr u32 NumStages = 5; + +constexpr std::array LimitUBOs = {GL_MAX_VERTEX_UNIFORM_BLOCKS, GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS, + GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS, + GL_MAX_GEOMETRY_UNIFORM_BLOCKS, GL_MAX_FRAGMENT_UNIFORM_BLOCKS}; + +constexpr std::array LimitSSBOs = { + GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS, GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS, + GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS, GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS, + GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS}; + +constexpr std::array LimitSamplers = { + GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS, GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS, + GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS, GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS, + GL_MAX_TEXTURE_IMAGE_UNITS}; + +constexpr std::array LimitImages = {GL_MAX_VERTEX_IMAGE_UNIFORMS, + GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS, + GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS, + GL_MAX_GEOMETRY_IMAGE_UNIFORMS, GL_MAX_FRAGMENT_IMAGE_UNIFORMS}; + template <typename T> T GetInteger(GLenum pname) { GLint temporary; @@ -48,13 +75,73 @@ bool HasExtension(const std::vector<std::string_view>& images, std::string_view return std::find(images.begin(), images.end(), extension) != images.end(); } +u32 Extract(u32& base, u32& num, u32 amount, std::optional<GLenum> limit = {}) { + ASSERT(num >= amount); + if (limit) { + amount = std::min(amount, GetInteger<u32>(*limit)); + } + num -= amount; + return std::exchange(base, base + amount); +} + +std::array<Device::BaseBindings, Tegra::Engines::MaxShaderTypes> BuildBaseBindings() noexcept { + std::array<Device::BaseBindings, Tegra::Engines::MaxShaderTypes> bindings; + + static std::array<std::size_t, 5> stage_swizzle = {0, 1, 2, 3, 4}; + const u32 total_ubos = GetInteger<u32>(GL_MAX_UNIFORM_BUFFER_BINDINGS); + const u32 total_ssbos = GetInteger<u32>(GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS); + const u32 total_samplers = GetInteger<u32>(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS); + + u32 num_ubos = total_ubos - ReservedUniformBlocks; + u32 num_ssbos = total_ssbos; + u32 num_samplers = total_samplers; + + u32 base_ubo = ReservedUniformBlocks; + u32 base_ssbo = 0; + u32 base_samplers = 0; + + for (std::size_t i = 0; i < NumStages; ++i) { + const std::size_t stage = stage_swizzle[i]; + bindings[stage] = { + Extract(base_ubo, num_ubos, total_ubos / NumStages, LimitUBOs[stage]), + Extract(base_ssbo, num_ssbos, total_ssbos / NumStages, LimitSSBOs[stage]), + Extract(base_samplers, num_samplers, total_samplers / NumStages, LimitSamplers[stage])}; + } + + u32 num_images = GetInteger<u32>(GL_MAX_IMAGE_UNITS); + u32 base_images = 0; + + // Reserve more image bindings on fragment and vertex stages. + bindings[4].image = + Extract(base_images, num_images, num_images / NumStages + 2, LimitImages[4]); + bindings[0].image = + Extract(base_images, num_images, num_images / NumStages + 1, LimitImages[0]); + + // Reserve the other image bindings. + const u32 total_extracted_images = num_images / (NumStages - 2); + for (std::size_t i = 2; i < NumStages; ++i) { + const std::size_t stage = stage_swizzle[i]; + bindings[stage].image = + Extract(base_images, num_images, total_extracted_images, LimitImages[stage]); + } + + // Compute doesn't care about any of this. + bindings[5] = {0, 0, 0, 0}; + + return bindings; +} + } // Anonymous namespace -Device::Device() { +Device::Device() : base_bindings{BuildBaseBindings()} { const std::string_view vendor = reinterpret_cast<const char*>(glGetString(GL_VENDOR)); + const auto renderer = reinterpret_cast<const char*>(glGetString(GL_RENDERER)); const std::vector extensions = GetExtensions(); const bool is_nvidia = vendor == "NVIDIA Corporation"; + const bool is_amd = vendor == "ATI Technologies Inc."; + const bool is_intel = vendor == "Intel"; + const bool is_intel_proprietary = is_intel && std::strstr(renderer, "Mesa") == nullptr; uniform_buffer_alignment = GetInteger<std::size_t>(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT); shader_storage_alignment = GetInteger<std::size_t>(GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT); @@ -62,11 +149,13 @@ Device::Device() { max_varyings = GetInteger<u32>(GL_MAX_VARYING_VECTORS); has_warp_intrinsics = GLAD_GL_NV_gpu_shader5 && GLAD_GL_NV_shader_thread_group && GLAD_GL_NV_shader_thread_shuffle; + has_shader_ballot = GLAD_GL_ARB_shader_ballot; has_vertex_viewport_layer = GLAD_GL_ARB_shader_viewport_layer_array; has_image_load_formatted = HasExtension(extensions, "GL_EXT_shader_image_load_formatted"); has_variable_aoffi = TestVariableAoffi(); - has_component_indexing_bug = TestComponentIndexingBug(); + has_component_indexing_bug = is_amd; has_precise_bug = TestPreciseBug(); + has_broken_compute = is_intel_proprietary; has_fast_buffer_sub_data = is_nvidia; LOG_INFO(Render_OpenGL, "Renderer_VariableAOFFI: {}", has_variable_aoffi); @@ -79,10 +168,12 @@ Device::Device(std::nullptr_t) { max_vertex_attributes = 16; max_varyings = 15; has_warp_intrinsics = true; + has_shader_ballot = true; has_vertex_viewport_layer = true; has_image_load_formatted = true; has_variable_aoffi = true; has_component_indexing_bug = false; + has_broken_compute = false; has_precise_bug = false; } @@ -97,52 +188,6 @@ void main() { })"); } -bool Device::TestComponentIndexingBug() { - const GLchar* COMPONENT_TEST = R"(#version 430 core -layout (std430, binding = 0) buffer OutputBuffer { - uint output_value; -}; -layout (std140, binding = 0) uniform InputBuffer { - uvec4 input_value[4096]; -}; -layout (location = 0) uniform uint idx; -void main() { - output_value = input_value[idx >> 2][idx & 3]; -})"; - const GLuint shader{glCreateShaderProgramv(GL_VERTEX_SHADER, 1, &COMPONENT_TEST)}; - SCOPE_EXIT({ glDeleteProgram(shader); }); - glUseProgram(shader); - - OGLVertexArray vao; - vao.Create(); - glBindVertexArray(vao.handle); - - constexpr std::array<GLuint, 8> values{0, 0, 0, 0, 0x1236327, 0x985482, 0x872753, 0x2378432}; - OGLBuffer ubo; - ubo.Create(); - glNamedBufferData(ubo.handle, sizeof(values), values.data(), GL_STATIC_DRAW); - glBindBufferBase(GL_UNIFORM_BUFFER, 0, ubo.handle); - - OGLBuffer ssbo; - ssbo.Create(); - glNamedBufferStorage(ssbo.handle, sizeof(GLuint), nullptr, GL_CLIENT_STORAGE_BIT); - - for (GLuint index = 4; index < 8; ++index) { - glInvalidateBufferData(ssbo.handle); - glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, ssbo.handle); - - glProgramUniform1ui(shader, 0, index); - glDrawArrays(GL_POINTS, 0, 1); - - GLuint result; - glGetNamedBufferSubData(ssbo.handle, 0, sizeof(result), &result); - if (result != values.at(index)) { - return true; - } - } - return false; -} - bool Device::TestPreciseBug() { return !TestProgram(R"(#version 430 core in vec3 coords; diff --git a/src/video_core/renderer_opengl/gl_device.h b/src/video_core/renderer_opengl/gl_device.h index bf35bd0b6..d73b099d0 100644 --- a/src/video_core/renderer_opengl/gl_device.h +++ b/src/video_core/renderer_opengl/gl_device.h @@ -6,14 +6,32 @@ #include <cstddef> #include "common/common_types.h" +#include "video_core/engines/shader_type.h" namespace OpenGL { -class Device { +static constexpr u32 EmulationUniformBlockBinding = 0; + +class Device final { public: + struct BaseBindings final { + u32 uniform_buffer{}; + u32 shader_storage_buffer{}; + u32 sampler{}; + u32 image{}; + }; + explicit Device(); explicit Device(std::nullptr_t); + const BaseBindings& GetBaseBindings(std::size_t stage_index) const noexcept { + return base_bindings[stage_index]; + } + + const BaseBindings& GetBaseBindings(Tegra::Engines::ShaderType shader_type) const noexcept { + return GetBaseBindings(static_cast<std::size_t>(shader_type)); + } + std::size_t GetUniformBufferAlignment() const { return uniform_buffer_alignment; } @@ -34,6 +52,10 @@ public: return has_warp_intrinsics; } + bool HasShaderBallot() const { + return has_shader_ballot; + } + bool HasVertexViewportLayer() const { return has_vertex_viewport_layer; } @@ -54,25 +76,31 @@ public: return has_precise_bug; } + bool HasBrokenCompute() const { + return has_broken_compute; + } + bool HasFastBufferSubData() const { return has_fast_buffer_sub_data; } private: static bool TestVariableAoffi(); - static bool TestComponentIndexingBug(); static bool TestPreciseBug(); + std::array<BaseBindings, Tegra::Engines::MaxShaderTypes> base_bindings; std::size_t uniform_buffer_alignment{}; std::size_t shader_storage_alignment{}; u32 max_vertex_attributes{}; u32 max_varyings{}; bool has_warp_intrinsics{}; + bool has_shader_ballot{}; bool has_vertex_viewport_layer{}; bool has_image_load_formatted{}; bool has_variable_aoffi{}; bool has_component_indexing_bug{}; bool has_precise_bug{}; + bool has_broken_compute{}; bool has_fast_buffer_sub_data{}; }; diff --git a/src/video_core/renderer_opengl/gl_framebuffer_cache.cpp b/src/video_core/renderer_opengl/gl_framebuffer_cache.cpp index a5d69d78d..874ed3c6e 100644 --- a/src/video_core/renderer_opengl/gl_framebuffer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_framebuffer_cache.cpp @@ -3,9 +3,12 @@ // Refer to the license.txt file included. #include <tuple> +#include <unordered_map> +#include <utility> -#include "common/cityhash.h" -#include "common/scope_exit.h" +#include <glad/glad.h> + +#include "common/common_types.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/renderer_opengl/gl_framebuffer_cache.h" #include "video_core/renderer_opengl/gl_state.h" @@ -13,6 +16,7 @@ namespace OpenGL { using Maxwell = Tegra::Engines::Maxwell3D::Regs; +using VideoCore::Surface::SurfaceType; FramebufferCacheOpenGL::FramebufferCacheOpenGL() = default; @@ -35,36 +39,49 @@ OGLFramebuffer FramebufferCacheOpenGL::CreateFramebuffer(const FramebufferCacheK local_state.draw.draw_framebuffer = framebuffer.handle; local_state.ApplyFramebufferState(); + if (key.zeta) { + const bool stencil = key.zeta->GetSurfaceParams().type == SurfaceType::DepthStencil; + const GLenum attach_target = stencil ? GL_DEPTH_STENCIL_ATTACHMENT : GL_DEPTH_ATTACHMENT; + key.zeta->Attach(attach_target, GL_DRAW_FRAMEBUFFER); + } + + std::size_t num_buffers = 0; + std::array<GLenum, Maxwell::NumRenderTargets> targets; + for (std::size_t index = 0; index < Maxwell::NumRenderTargets; ++index) { - if (key.colors[index]) { - key.colors[index]->Attach(GL_COLOR_ATTACHMENT0 + static_cast<GLenum>(index), - GL_DRAW_FRAMEBUFFER); + if (!key.colors[index]) { + targets[index] = GL_NONE; + continue; } + const GLenum attach_target = GL_COLOR_ATTACHMENT0 + static_cast<GLenum>(index); + key.colors[index]->Attach(attach_target, GL_DRAW_FRAMEBUFFER); + + const u32 attachment = (key.color_attachments >> (BitsPerAttachment * index)) & 0b1111; + targets[index] = GL_COLOR_ATTACHMENT0 + attachment; + num_buffers = index + 1; } - if (key.colors_count) { - glDrawBuffers(key.colors_count, key.color_attachments.data()); + + if (num_buffers > 0) { + glDrawBuffers(static_cast<GLsizei>(num_buffers), std::data(targets)); } else { glDrawBuffer(GL_NONE); } - if (key.zeta) { - key.zeta->Attach(key.stencil_enable ? GL_DEPTH_STENCIL_ATTACHMENT : GL_DEPTH_ATTACHMENT, - GL_DRAW_FRAMEBUFFER); - } - return framebuffer; } -std::size_t FramebufferCacheKey::Hash() const { - static_assert(sizeof(*this) % sizeof(u64) == 0, "Unaligned struct"); - return static_cast<std::size_t>( - Common::CityHash64(reinterpret_cast<const char*>(this), sizeof(*this))); +std::size_t FramebufferCacheKey::Hash() const noexcept { + std::size_t hash = std::hash<View>{}(zeta); + for (const auto& color : colors) { + hash ^= std::hash<View>{}(color); + } + hash ^= static_cast<std::size_t>(color_attachments) << 16; + return hash; } -bool FramebufferCacheKey::operator==(const FramebufferCacheKey& rhs) const { - return std::tie(stencil_enable, colors_count, color_attachments, colors, zeta) == - std::tie(rhs.stencil_enable, rhs.colors_count, rhs.color_attachments, rhs.colors, - rhs.zeta); +bool FramebufferCacheKey::operator==(const FramebufferCacheKey& rhs) const noexcept { + return std::tie(colors, zeta, color_attachments) == + std::tie(rhs.colors, rhs.zeta, rhs.color_attachments); } } // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_framebuffer_cache.h b/src/video_core/renderer_opengl/gl_framebuffer_cache.h index 424344c48..02ec80ae9 100644 --- a/src/video_core/renderer_opengl/gl_framebuffer_cache.h +++ b/src/video_core/renderer_opengl/gl_framebuffer_cache.h @@ -18,21 +18,24 @@ namespace OpenGL { -struct alignas(sizeof(u64)) FramebufferCacheKey { - bool stencil_enable = false; - u16 colors_count = 0; +constexpr std::size_t BitsPerAttachment = 4; - std::array<GLenum, Tegra::Engines::Maxwell3D::Regs::NumRenderTargets> color_attachments{}; - std::array<View, Tegra::Engines::Maxwell3D::Regs::NumRenderTargets> colors; +struct FramebufferCacheKey { View zeta; + std::array<View, Tegra::Engines::Maxwell3D::Regs::NumRenderTargets> colors; + u32 color_attachments = 0; - std::size_t Hash() const; + std::size_t Hash() const noexcept; - bool operator==(const FramebufferCacheKey& rhs) const; + bool operator==(const FramebufferCacheKey& rhs) const noexcept; - bool operator!=(const FramebufferCacheKey& rhs) const { + bool operator!=(const FramebufferCacheKey& rhs) const noexcept { return !operator==(rhs); } + + void SetAttachment(std::size_t index, u32 attachment) { + color_attachments |= attachment << (BitsPerAttachment * index); + } }; } // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index e560d70d5..672051102 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -19,9 +19,11 @@ #include "common/scope_exit.h" #include "core/core.h" #include "core/hle/kernel/process.h" +#include "core/memory.h" #include "core/settings.h" #include "video_core/engines/kepler_compute.h" #include "video_core/engines/maxwell_3d.h" +#include "video_core/engines/shader_type.h" #include "video_core/memory_manager.h" #include "video_core/renderer_opengl/gl_rasterizer.h" #include "video_core/renderer_opengl/gl_shader_cache.h" @@ -49,8 +51,25 @@ MICROPROFILE_DEFINE(OpenGL_Blits, "OpenGL", "Blits", MP_RGB(128, 128, 192)); MICROPROFILE_DEFINE(OpenGL_CacheManagement, "OpenGL", "Cache Mgmt", MP_RGB(100, 255, 100)); MICROPROFILE_DEFINE(OpenGL_PrimitiveAssembly, "OpenGL", "Prim Asmbl", MP_RGB(255, 100, 100)); -static std::size_t GetConstBufferSize(const Tegra::Engines::ConstBufferInfo& buffer, - const GLShader::ConstBufferEntry& entry) { +namespace { + +template <typename Engine, typename Entry> +Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry& entry, + Tegra::Engines::ShaderType shader_type) { + if (entry.IsBindless()) { + const Tegra::Texture::TextureHandle tex_handle = + engine.AccessConstBuffer32(shader_type, entry.GetBuffer(), entry.GetOffset()); + return engine.GetTextureInfo(tex_handle); + } + if constexpr (std::is_same_v<Engine, Tegra::Engines::Maxwell3D>) { + return engine.GetStageTexture(shader_type, entry.GetOffset()); + } else { + return engine.GetTexture(entry.GetOffset()); + } +} + +std::size_t GetConstBufferSize(const Tegra::Engines::ConstBufferInfo& buffer, + const GLShader::ConstBufferEntry& entry) { if (!entry.IsIndirect()) { return entry.GetSize(); } @@ -64,14 +83,16 @@ static std::size_t GetConstBufferSize(const Tegra::Engines::ConstBufferInfo& buf return buffer.size; } +} // Anonymous namespace + RasterizerOpenGL::RasterizerOpenGL(Core::System& system, Core::Frontend::EmuWindow& emu_window, ScreenInfo& info) - : texture_cache{system, *this, device}, shader_cache{*this, system, emu_window, device}, - system{system}, screen_info{info}, buffer_cache{*this, system, device, STREAM_BUFFER_SIZE} { + : RasterizerAccelerated{system.Memory()}, texture_cache{system, *this, device}, + shader_cache{*this, system, emu_window, device}, system{system}, screen_info{info}, + buffer_cache{*this, system, device, STREAM_BUFFER_SIZE} { shader_program_manager = std::make_unique<GLShader::ProgramManager>(); state.draw.shader_program = 0; state.Apply(); - clear_framebuffer.Create(); LOG_DEBUG(Render_OpenGL, "Sync fixed function OpenGL state here"); CheckExtensions(); @@ -238,12 +259,11 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) { MICROPROFILE_SCOPE(OpenGL_Shader); auto& gpu = system.GPU().Maxwell3D(); - BaseBindings base_bindings; std::array<bool, Maxwell::NumClipDistances> clip_distances{}; for (std::size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) { const auto& shader_config = gpu.regs.shader_config[index]; - const Maxwell::ShaderProgram program{static_cast<Maxwell::ShaderProgram>(index)}; + const auto program{static_cast<Maxwell::ShaderProgram>(index)}; // Skip stages that are not enabled if (!gpu.regs.IsShaderConfigEnabled(index)) { @@ -251,31 +271,34 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) { case Maxwell::ShaderProgram::Geometry: shader_program_manager->UseTrivialGeometryShader(); break; + case Maxwell::ShaderProgram::Fragment: + shader_program_manager->UseTrivialFragmentShader(); + break; default: break; } continue; } - const std::size_t stage{index == 0 ? 0 : index - 1}; // Stage indices are 0 - 5 - - GLShader::MaxwellUniformData ubo{}; - ubo.SetFromRegs(gpu, stage); - const auto [buffer, offset] = - buffer_cache.UploadHostMemory(&ubo, sizeof(ubo), device.GetUniformBufferAlignment()); - - // Bind the emulation info buffer - bind_ubo_pushbuffer.Push(buffer, offset, static_cast<GLsizeiptr>(sizeof(ubo))); + // Currently this stages are not supported in the OpenGL backend. + // Todo(Blinkhawk): Port tesselation shaders from Vulkan to OpenGL + if (program == Maxwell::ShaderProgram::TesselationControl) { + continue; + } else if (program == Maxwell::ShaderProgram::TesselationEval) { + continue; + } Shader shader{shader_cache.GetStageProgram(program)}; - const auto stage_enum = static_cast<Maxwell::ShaderStage>(stage); - SetupDrawConstBuffers(stage_enum, shader); - SetupDrawGlobalMemory(stage_enum, shader); - const auto texture_buffer_usage{SetupDrawTextures(stage_enum, shader, base_bindings)}; + // Stage indices are 0 - 5 + const std::size_t stage = index == 0 ? 0 : index - 1; + SetupDrawConstBuffers(stage, shader); + SetupDrawGlobalMemory(stage, shader); + SetupDrawTextures(stage, shader); + SetupDrawImages(stage, shader); - const ProgramVariant variant{base_bindings, primitive_mode, texture_buffer_usage}; - const auto [program_handle, next_bindings] = shader->GetProgramHandle(variant); + const ProgramVariant variant(primitive_mode); + const auto program_handle = shader->GetHandle(variant); switch (program) { case Maxwell::ShaderProgram::VertexA: @@ -304,10 +327,8 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) { // When VertexA is enabled, we have dual vertex shaders if (program == Maxwell::ShaderProgram::VertexA) { // VertexB was combined with VertexA, so we skip the VertexB iteration - index++; + ++index; } - - base_bindings = next_bindings; } SyncClipEnabled(clip_distances); @@ -362,78 +383,58 @@ void RasterizerOpenGL::ConfigureFramebuffers() { UNIMPLEMENTED_IF(regs.rt_separate_frag_data == 0); // Bind the framebuffer surfaces - FramebufferCacheKey fbkey; - for (std::size_t index = 0; index < Maxwell::NumRenderTargets; ++index) { + FramebufferCacheKey key; + const auto colors_count = static_cast<std::size_t>(regs.rt_control.count); + for (std::size_t index = 0; index < colors_count; ++index) { View color_surface{texture_cache.GetColorBufferSurface(index, true)}; - - if (color_surface) { - // Assume that a surface will be written to if it is used as a framebuffer, even - // if the shader doesn't actually write to it. - texture_cache.MarkColorBufferInUse(index); + if (!color_surface) { + continue; } + // Assume that a surface will be written to if it is used as a framebuffer, even + // if the shader doesn't actually write to it. + texture_cache.MarkColorBufferInUse(index); - fbkey.color_attachments[index] = GL_COLOR_ATTACHMENT0 + regs.rt_control.GetMap(index); - fbkey.colors[index] = std::move(color_surface); + key.SetAttachment(index, regs.rt_control.GetMap(index)); + key.colors[index] = std::move(color_surface); } - fbkey.colors_count = regs.rt_control.count; if (depth_surface) { // Assume that a surface will be written to if it is used as a framebuffer, even if // the shader doesn't actually write to it. texture_cache.MarkDepthBufferInUse(); - - fbkey.stencil_enable = depth_surface->GetSurfaceParams().type == SurfaceType::DepthStencil; - fbkey.zeta = std::move(depth_surface); + key.zeta = std::move(depth_surface); } texture_cache.GuardRenderTargets(false); - state.draw.draw_framebuffer = framebuffer_cache.GetFramebuffer(fbkey); + state.draw.draw_framebuffer = framebuffer_cache.GetFramebuffer(key); SyncViewport(state); } void RasterizerOpenGL::ConfigureClearFramebuffer(OpenGLState& current_state, bool using_color_fb, bool using_depth_fb, bool using_stencil_fb) { + using VideoCore::Surface::SurfaceType; + auto& gpu = system.GPU().Maxwell3D(); const auto& regs = gpu.regs; texture_cache.GuardRenderTargets(true); - View color_surface{}; + View color_surface; if (using_color_fb) { color_surface = texture_cache.GetColorBufferSurface(regs.clear_buffers.RT, false); } - View depth_surface{}; + View depth_surface; if (using_depth_fb || using_stencil_fb) { depth_surface = texture_cache.GetDepthBufferSurface(false); } texture_cache.GuardRenderTargets(false); - current_state.draw.draw_framebuffer = clear_framebuffer.handle; - current_state.ApplyFramebufferState(); - - if (color_surface) { - color_surface->Attach(GL_COLOR_ATTACHMENT0, GL_DRAW_FRAMEBUFFER); - } else { - glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0); - } + FramebufferCacheKey key; + key.colors[0] = color_surface; + key.zeta = depth_surface; - if (depth_surface) { - const auto& params = depth_surface->GetSurfaceParams(); - switch (params.type) { - case VideoCore::Surface::SurfaceType::Depth: - depth_surface->Attach(GL_DEPTH_ATTACHMENT, GL_DRAW_FRAMEBUFFER); - glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0); - break; - case VideoCore::Surface::SurfaceType::DepthStencil: - depth_surface->Attach(GL_DEPTH_STENCIL_ATTACHMENT, GL_DRAW_FRAMEBUFFER); - break; - default: - UNIMPLEMENTED(); - } - } else { - glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, - 0); - } + current_state.draw.draw_framebuffer = framebuffer_cache.GetFramebuffer(key); + current_state.ApplyFramebufferState(); } void RasterizerOpenGL::Clear() { @@ -516,6 +517,7 @@ void RasterizerOpenGL::Clear() { ConfigureClearFramebuffer(clear_state, use_color, use_depth, use_stencil); SyncViewport(clear_state); + SyncRasterizeEnable(clear_state); if (regs.clear_flags.scissor) { SyncScissorTest(clear_state); } @@ -543,6 +545,7 @@ void RasterizerOpenGL::Clear() { void RasterizerOpenGL::DrawPrelude() { auto& gpu = system.GPU().Maxwell3D(); + SyncRasterizeEnable(state); SyncColorMask(); SyncFragmentColorClampState(); SyncMultiSampleState(); @@ -592,8 +595,16 @@ void RasterizerOpenGL::DrawPrelude() { index_buffer_offset = SetupIndexBuffer(); // Prepare packed bindings. - bind_ubo_pushbuffer.Setup(0); - bind_ssbo_pushbuffer.Setup(0); + bind_ubo_pushbuffer.Setup(); + bind_ssbo_pushbuffer.Setup(); + + // Setup emulation uniform buffer. + GLShader::MaxwellUniformData ubo; + ubo.SetFromRegs(gpu); + const auto [buffer, offset] = + buffer_cache.UploadHostMemory(&ubo, sizeof(ubo), device.GetUniformBufferAlignment()); + bind_ubo_pushbuffer.Push(EmulationUniformBlockBinding, buffer, offset, + static_cast<GLsizeiptr>(sizeof(ubo))); // Setup shaders and their used resources. texture_cache.GuardSamplers(true); @@ -726,19 +737,21 @@ bool RasterizerOpenGL::DrawMultiBatch(bool is_indexed) { } void RasterizerOpenGL::DispatchCompute(GPUVAddr code_addr) { - if (!GLAD_GL_ARB_compute_variable_group_size) { - LOG_ERROR(Render_OpenGL, "Compute is currently not supported on this device due to the " - "lack of GL_ARB_compute_variable_group_size"); + if (device.HasBrokenCompute()) { return; } + buffer_cache.Acquire(); + auto kernel = shader_cache.GetComputeKernel(code_addr); - ProgramVariant variant; - variant.texture_buffer_usage = SetupComputeTextures(kernel); + SetupComputeTextures(kernel); SetupComputeImages(kernel); - const auto [program, next_bindings] = kernel->GetProgramHandle(variant); - state.draw.shader_program = program; + const auto& launch_desc = system.GPU().KeplerCompute().launch_description; + const ProgramVariant variant(launch_desc.block_dim_x, launch_desc.block_dim_y, + launch_desc.block_dim_z, launch_desc.shared_alloc, + launch_desc.local_pos_alloc); + state.draw.shader_program = kernel->GetHandle(variant); state.draw.program_pipeline = 0; const std::size_t buffer_size = @@ -746,8 +759,8 @@ void RasterizerOpenGL::DispatchCompute(GPUVAddr code_addr) { (Maxwell::MaxConstBufferSize + device.GetUniformBufferAlignment()); buffer_cache.Map(buffer_size); - bind_ubo_pushbuffer.Setup(0); - bind_ssbo_pushbuffer.Setup(0); + bind_ubo_pushbuffer.Setup(); + bind_ssbo_pushbuffer.Setup(); SetupComputeConstBuffers(kernel); SetupComputeGlobalMemory(kernel); @@ -762,10 +775,7 @@ void RasterizerOpenGL::DispatchCompute(GPUVAddr code_addr) { state.ApplyShaderProgram(); state.ApplyProgramPipeline(); - const auto& launch_desc = system.GPU().KeplerCompute().launch_description; - glDispatchComputeGroupSizeARB(launch_desc.grid_dim_x, launch_desc.grid_dim_y, - launch_desc.grid_dim_z, launch_desc.block_dim_x, - launch_desc.block_dim_y, launch_desc.block_dim_z); + glDispatchCompute(launch_desc.grid_dim_x, launch_desc.grid_dim_y, launch_desc.grid_dim_z); } void RasterizerOpenGL::FlushAll() {} @@ -821,7 +831,7 @@ bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& config, MICROPROFILE_SCOPE(OpenGL_CacheManagement); const auto surface{ - texture_cache.TryFindFramebufferSurface(Memory::GetPointer(framebuffer_addr))}; + texture_cache.TryFindFramebufferSurface(system.Memory().GetPointer(framebuffer_addr))}; if (!surface) { return {}; } @@ -834,7 +844,7 @@ bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& config, ASSERT_MSG(params.height == config.height, "Framebuffer height is different"); if (params.pixel_format != pixel_format) { - LOG_WARNING(Render_OpenGL, "Framebuffer pixel_format is different"); + LOG_DEBUG(Render_OpenGL, "Framebuffer pixel_format is different"); } screen_info.display_texture = surface->GetTexture(); @@ -843,20 +853,23 @@ bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& config, return true; } -void RasterizerOpenGL::SetupDrawConstBuffers(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage, - const Shader& shader) { +void RasterizerOpenGL::SetupDrawConstBuffers(std::size_t stage_index, const Shader& shader) { MICROPROFILE_SCOPE(OpenGL_UBO); const auto& stages = system.GPU().Maxwell3D().state.shader_stages; - const auto& shader_stage = stages[static_cast<std::size_t>(stage)]; + const auto& shader_stage = stages[stage_index]; + + u32 binding = device.GetBaseBindings(stage_index).uniform_buffer; for (const auto& entry : shader->GetShaderEntries().const_buffers) { const auto& buffer = shader_stage.const_buffers[entry.GetIndex()]; - SetupConstBuffer(buffer, entry); + SetupConstBuffer(binding++, buffer, entry); } } void RasterizerOpenGL::SetupComputeConstBuffers(const Shader& kernel) { MICROPROFILE_SCOPE(OpenGL_UBO); const auto& launch_desc = system.GPU().KeplerCompute().launch_description; + + u32 binding = 0; for (const auto& entry : kernel->GetShaderEntries().const_buffers) { const auto& config = launch_desc.const_buffer_config[entry.GetIndex()]; const std::bitset<8> mask = launch_desc.const_buffer_enable_mask.Value(); @@ -864,15 +877,16 @@ void RasterizerOpenGL::SetupComputeConstBuffers(const Shader& kernel) { buffer.address = config.Address(); buffer.size = config.size; buffer.enabled = mask[entry.GetIndex()]; - SetupConstBuffer(buffer, entry); + SetupConstBuffer(binding++, buffer, entry); } } -void RasterizerOpenGL::SetupConstBuffer(const Tegra::Engines::ConstBufferInfo& buffer, +void RasterizerOpenGL::SetupConstBuffer(u32 binding, const Tegra::Engines::ConstBufferInfo& buffer, const GLShader::ConstBufferEntry& entry) { if (!buffer.enabled) { // Set values to zero to unbind buffers - bind_ubo_pushbuffer.Push(buffer_cache.GetEmptyBuffer(sizeof(float)), 0, sizeof(float)); + bind_ubo_pushbuffer.Push(binding, buffer_cache.GetEmptyBuffer(sizeof(float)), 0, + sizeof(float)); return; } @@ -883,19 +897,20 @@ void RasterizerOpenGL::SetupConstBuffer(const Tegra::Engines::ConstBufferInfo& b const auto alignment = device.GetUniformBufferAlignment(); const auto [cbuf, offset] = buffer_cache.UploadMemory(buffer.address, size, alignment, false, device.HasFastBufferSubData()); - bind_ubo_pushbuffer.Push(cbuf, offset, size); + bind_ubo_pushbuffer.Push(binding, cbuf, offset, size); } -void RasterizerOpenGL::SetupDrawGlobalMemory(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage, - const Shader& shader) { +void RasterizerOpenGL::SetupDrawGlobalMemory(std::size_t stage_index, const Shader& shader) { auto& gpu{system.GPU()}; auto& memory_manager{gpu.MemoryManager()}; - const auto cbufs{gpu.Maxwell3D().state.shader_stages[static_cast<std::size_t>(stage)]}; + const auto cbufs{gpu.Maxwell3D().state.shader_stages[stage_index]}; + + u32 binding = device.GetBaseBindings(stage_index).shader_storage_buffer; for (const auto& entry : shader->GetShaderEntries().global_memory_entries) { const auto addr{cbufs.const_buffers[entry.GetCbufIndex()].address + entry.GetCbufOffset()}; const auto gpu_addr{memory_manager.Read<u64>(addr)}; const auto size{memory_manager.Read<u32>(addr + 8)}; - SetupGlobalMemory(entry, gpu_addr, size); + SetupGlobalMemory(binding++, entry, gpu_addr, size); } } @@ -903,120 +918,82 @@ void RasterizerOpenGL::SetupComputeGlobalMemory(const Shader& kernel) { auto& gpu{system.GPU()}; auto& memory_manager{gpu.MemoryManager()}; const auto cbufs{gpu.KeplerCompute().launch_description.const_buffer_config}; + + u32 binding = 0; for (const auto& entry : kernel->GetShaderEntries().global_memory_entries) { const auto addr{cbufs[entry.GetCbufIndex()].Address() + entry.GetCbufOffset()}; const auto gpu_addr{memory_manager.Read<u64>(addr)}; const auto size{memory_manager.Read<u32>(addr + 8)}; - SetupGlobalMemory(entry, gpu_addr, size); + SetupGlobalMemory(binding++, entry, gpu_addr, size); } } -void RasterizerOpenGL::SetupGlobalMemory(const GLShader::GlobalMemoryEntry& entry, +void RasterizerOpenGL::SetupGlobalMemory(u32 binding, const GLShader::GlobalMemoryEntry& entry, GPUVAddr gpu_addr, std::size_t size) { const auto alignment{device.GetShaderStorageBufferAlignment()}; const auto [ssbo, buffer_offset] = buffer_cache.UploadMemory(gpu_addr, size, alignment, entry.IsWritten()); - bind_ssbo_pushbuffer.Push(ssbo, buffer_offset, static_cast<GLsizeiptr>(size)); + bind_ssbo_pushbuffer.Push(binding, ssbo, buffer_offset, static_cast<GLsizeiptr>(size)); } -TextureBufferUsage RasterizerOpenGL::SetupDrawTextures(Maxwell::ShaderStage stage, - const Shader& shader, - BaseBindings base_bindings) { +void RasterizerOpenGL::SetupDrawTextures(std::size_t stage_index, const Shader& shader) { MICROPROFILE_SCOPE(OpenGL_Texture); - const auto& gpu = system.GPU(); - const auto& maxwell3d = gpu.Maxwell3D(); - const auto& entries = shader->GetShaderEntries().samplers; - - ASSERT_MSG(base_bindings.sampler + entries.size() <= std::size(state.textures), - "Exceeded the number of active textures."); - - TextureBufferUsage texture_buffer_usage{0}; - - for (u32 bindpoint = 0; bindpoint < entries.size(); ++bindpoint) { - const auto& entry = entries[bindpoint]; - const auto texture = [&] { - if (!entry.IsBindless()) { - return maxwell3d.GetStageTexture(stage, entry.GetOffset()); - } - const auto shader_type = static_cast<Tegra::Engines::ShaderType>(stage); - const Tegra::Texture::TextureHandle tex_handle = - maxwell3d.AccessConstBuffer32(shader_type, entry.GetBuffer(), entry.GetOffset()); - return maxwell3d.GetTextureInfo(tex_handle); - }(); - - if (SetupTexture(base_bindings.sampler + bindpoint, texture, entry)) { - texture_buffer_usage.set(bindpoint); - } + const auto& maxwell3d = system.GPU().Maxwell3D(); + u32 binding = device.GetBaseBindings(stage_index).sampler; + for (const auto& entry : shader->GetShaderEntries().samplers) { + const auto shader_type = static_cast<Tegra::Engines::ShaderType>(stage_index); + const auto texture = GetTextureInfo(maxwell3d, entry, shader_type); + SetupTexture(binding++, texture, entry); } - - return texture_buffer_usage; } -TextureBufferUsage RasterizerOpenGL::SetupComputeTextures(const Shader& kernel) { +void RasterizerOpenGL::SetupComputeTextures(const Shader& kernel) { MICROPROFILE_SCOPE(OpenGL_Texture); const auto& compute = system.GPU().KeplerCompute(); - const auto& entries = kernel->GetShaderEntries().samplers; - - ASSERT_MSG(entries.size() <= std::size(state.textures), - "Exceeded the number of active textures."); - - TextureBufferUsage texture_buffer_usage{0}; - - for (u32 bindpoint = 0; bindpoint < entries.size(); ++bindpoint) { - const auto& entry = entries[bindpoint]; - const auto texture = [&] { - if (!entry.IsBindless()) { - return compute.GetTexture(entry.GetOffset()); - } - const Tegra::Texture::TextureHandle tex_handle = compute.AccessConstBuffer32( - Tegra::Engines::ShaderType::Compute, entry.GetBuffer(), entry.GetOffset()); - return compute.GetTextureInfo(tex_handle); - }(); - - if (SetupTexture(bindpoint, texture, entry)) { - texture_buffer_usage.set(bindpoint); - } + u32 binding = 0; + for (const auto& entry : kernel->GetShaderEntries().samplers) { + const auto texture = GetTextureInfo(compute, entry, Tegra::Engines::ShaderType::Compute); + SetupTexture(binding++, texture, entry); } - - return texture_buffer_usage; } -bool RasterizerOpenGL::SetupTexture(u32 binding, const Tegra::Texture::FullTextureInfo& texture, +void RasterizerOpenGL::SetupTexture(u32 binding, const Tegra::Texture::FullTextureInfo& texture, const GLShader::SamplerEntry& entry) { - state.samplers[binding] = sampler_cache.GetSampler(texture.tsc); - const auto view = texture_cache.GetTextureSurface(texture.tic, entry); if (!view) { // Can occur when texture addr is null or its memory is unmapped/invalid + state.samplers[binding] = 0; state.textures[binding] = 0; - return false; + return; } state.textures[binding] = view->GetTexture(); if (view->GetSurfaceParams().IsBuffer()) { - return true; + return; } + state.samplers[binding] = sampler_cache.GetSampler(texture.tsc); // Apply swizzle to textures that are not buffers. view->ApplySwizzle(texture.tic.x_source, texture.tic.y_source, texture.tic.z_source, texture.tic.w_source); - return false; +} + +void RasterizerOpenGL::SetupDrawImages(std::size_t stage_index, const Shader& shader) { + const auto& maxwell3d = system.GPU().Maxwell3D(); + u32 binding = device.GetBaseBindings(stage_index).image; + for (const auto& entry : shader->GetShaderEntries().images) { + const auto shader_type = static_cast<Tegra::Engines::ShaderType>(stage_index); + const auto tic = GetTextureInfo(maxwell3d, entry, shader_type).tic; + SetupImage(binding++, tic, entry); + } } void RasterizerOpenGL::SetupComputeImages(const Shader& shader) { const auto& compute = system.GPU().KeplerCompute(); - const auto& entries = shader->GetShaderEntries().images; - for (u32 bindpoint = 0; bindpoint < entries.size(); ++bindpoint) { - const auto& entry = entries[bindpoint]; - const auto tic = [&] { - if (!entry.IsBindless()) { - return compute.GetTexture(entry.GetOffset()).tic; - } - const Tegra::Texture::TextureHandle tex_handle = compute.AccessConstBuffer32( - Tegra::Engines::ShaderType::Compute, entry.GetBuffer(), entry.GetOffset()); - return compute.GetTextureInfo(tex_handle).tic; - }(); - SetupImage(bindpoint, tic, entry); + u32 binding = 0; + for (const auto& entry : shader->GetShaderEntries().images) { + const auto tic = GetTextureInfo(compute, entry, Tegra::Engines::ShaderType::Compute).tic; + SetupImage(binding++, tic, entry); } } @@ -1055,6 +1032,19 @@ void RasterizerOpenGL::SyncViewport(OpenGLState& current_state) { } state.depth_clamp.far_plane = regs.view_volume_clip_control.depth_clamp_far != 0; state.depth_clamp.near_plane = regs.view_volume_clip_control.depth_clamp_near != 0; + + bool flip_y = false; + if (regs.viewport_transform[0].scale_y < 0.0) { + flip_y = !flip_y; + } + if (regs.screen_y_control.y_negate != 0) { + flip_y = !flip_y; + } + state.clip_control.origin = flip_y ? GL_UPPER_LEFT : GL_LOWER_LEFT; + state.clip_control.depth_mode = + regs.depth_mode == Tegra::Engines::Maxwell3D::Regs::DepthMode::ZeroToOne + ? GL_ZERO_TO_ONE + : GL_NEGATIVE_ONE_TO_ONE; } void RasterizerOpenGL::SyncClipEnabled( @@ -1077,28 +1067,14 @@ void RasterizerOpenGL::SyncClipCoef() { } void RasterizerOpenGL::SyncCullMode() { - auto& maxwell3d = system.GPU().Maxwell3D(); - - const auto& regs = maxwell3d.regs; + const auto& regs = system.GPU().Maxwell3D().regs; state.cull.enabled = regs.cull.enabled != 0; if (state.cull.enabled) { - state.cull.front_face = MaxwellToGL::FrontFace(regs.cull.front_face); state.cull.mode = MaxwellToGL::CullFace(regs.cull.cull_face); - - const bool flip_triangles{regs.screen_y_control.triangle_rast_flip == 0 || - regs.viewport_transform[0].scale_y < 0.0f}; - - // If the GPU is configured to flip the rasterized triangles, then we need to flip the - // notion of front and back. Note: We flip the triangles when the value of the register is 0 - // because OpenGL already does it for us. - if (flip_triangles) { - if (state.cull.front_face == GL_CCW) - state.cull.front_face = GL_CW; - else if (state.cull.front_face == GL_CW) - state.cull.front_face = GL_CCW; - } } + + state.cull.front_face = MaxwellToGL::FrontFace(regs.cull.front_face); } void RasterizerOpenGL::SyncPrimitiveRestart() { @@ -1162,6 +1138,11 @@ void RasterizerOpenGL::SyncStencilTestState() { } } +void RasterizerOpenGL::SyncRasterizeEnable(OpenGLState& current_state) { + const auto& regs = system.GPU().Maxwell3D().regs; + current_state.rasterizer_discard = regs.rasterize_enable == 0; +} + void RasterizerOpenGL::SyncColorMask() { auto& maxwell3d = system.GPU().Maxwell3D(); if (!maxwell3d.dirty.color_mask) { diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index bd6fe5c3a..6a27cf497 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h @@ -83,42 +83,41 @@ private: bool using_depth_fb, bool using_stencil_fb); /// Configures the current constbuffers to use for the draw command. - void SetupDrawConstBuffers(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage, - const Shader& shader); + void SetupDrawConstBuffers(std::size_t stage_index, const Shader& shader); /// Configures the current constbuffers to use for the kernel invocation. void SetupComputeConstBuffers(const Shader& kernel); /// Configures a constant buffer. - void SetupConstBuffer(const Tegra::Engines::ConstBufferInfo& buffer, + void SetupConstBuffer(u32 binding, const Tegra::Engines::ConstBufferInfo& buffer, const GLShader::ConstBufferEntry& entry); /// Configures the current global memory entries to use for the draw command. - void SetupDrawGlobalMemory(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage, - const Shader& shader); + void SetupDrawGlobalMemory(std::size_t stage_index, const Shader& shader); /// Configures the current global memory entries to use for the kernel invocation. void SetupComputeGlobalMemory(const Shader& kernel); /// Configures a constant buffer. - void SetupGlobalMemory(const GLShader::GlobalMemoryEntry& entry, GPUVAddr gpu_addr, + void SetupGlobalMemory(u32 binding, const GLShader::GlobalMemoryEntry& entry, GPUVAddr gpu_addr, std::size_t size); /// Syncs all the state, shaders, render targets and textures setting before a draw call. void DrawPrelude(); - /// Configures the current textures to use for the draw command. Returns shaders texture buffer - /// usage. - TextureBufferUsage SetupDrawTextures(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage, - const Shader& shader, BaseBindings base_bindings); + /// Configures the current textures to use for the draw command. + void SetupDrawTextures(std::size_t stage_index, const Shader& shader); - /// Configures the textures used in a compute shader. Returns texture buffer usage. - TextureBufferUsage SetupComputeTextures(const Shader& kernel); + /// Configures the textures used in a compute shader. + void SetupComputeTextures(const Shader& kernel); - /// Configures a texture. Returns true when the texture is a texture buffer. - bool SetupTexture(u32 binding, const Tegra::Texture::FullTextureInfo& texture, + /// Configures a texture. + void SetupTexture(u32 binding, const Tegra::Texture::FullTextureInfo& texture, const GLShader::SamplerEntry& entry); + /// Configures images in a graphics shader. + void SetupDrawImages(std::size_t stage_index, const Shader& shader); + /// Configures images in a compute shader. void SetupComputeImages(const Shader& shader); @@ -169,6 +168,9 @@ private: /// Syncs the point state to match the guest state void SyncPointState(); + /// Syncs the rasterizer enable state to match the guest state + void SyncRasterizeEnable(OpenGLState& current_state); + /// Syncs Color Mask void SyncColorMask(); @@ -224,8 +226,6 @@ private: enum class AccelDraw { Disabled, Arrays, Indexed }; AccelDraw accelerate_draw = AccelDraw::Disabled; - - OGLFramebuffer clear_framebuffer; }; } // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp index f1b89165d..de742d11c 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp @@ -8,12 +8,15 @@ #include <thread> #include <unordered_set> #include <boost/functional/hash.hpp> +#include "common/alignment.h" #include "common/assert.h" +#include "common/logging/log.h" #include "common/scope_exit.h" #include "core/core.h" #include "core/frontend/emu_window.h" #include "video_core/engines/kepler_compute.h" #include "video_core/engines/maxwell_3d.h" +#include "video_core/engines/shader_type.h" #include "video_core/memory_manager.h" #include "video_core/renderer_opengl/gl_rasterizer.h" #include "video_core/renderer_opengl/gl_shader_cache.h" @@ -82,28 +85,26 @@ std::size_t CalculateProgramSize(const GLShader::ProgramCode& program) { /// Gets the shader program code from memory for the specified address ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, const GPUVAddr gpu_addr, const u8* host_ptr) { - ProgramCode program_code(VideoCommon::Shader::MAX_PROGRAM_LENGTH); + ProgramCode code(VideoCommon::Shader::MAX_PROGRAM_LENGTH); ASSERT_OR_EXECUTE(host_ptr != nullptr, { - std::fill(program_code.begin(), program_code.end(), 0); - return program_code; + std::fill(code.begin(), code.end(), 0); + return code; }); - memory_manager.ReadBlockUnsafe(gpu_addr, program_code.data(), - program_code.size() * sizeof(u64)); - program_code.resize(CalculateProgramSize(program_code)); - return program_code; + memory_manager.ReadBlockUnsafe(gpu_addr, code.data(), code.size() * sizeof(u64)); + code.resize(CalculateProgramSize(code)); + return code; } /// Gets the shader type from a Maxwell program type -constexpr GLenum GetShaderType(ProgramType program_type) { - switch (program_type) { - case ProgramType::VertexA: - case ProgramType::VertexB: +constexpr GLenum GetGLShaderType(ShaderType shader_type) { + switch (shader_type) { + case ShaderType::Vertex: return GL_VERTEX_SHADER; - case ProgramType::Geometry: + case ShaderType::Geometry: return GL_GEOMETRY_SHADER; - case ProgramType::Fragment: + case ShaderType::Fragment: return GL_FRAGMENT_SHADER; - case ProgramType::Compute: + case ShaderType::Compute: return GL_COMPUTE_SHADER; default: return GL_NONE; @@ -111,52 +112,33 @@ constexpr GLenum GetShaderType(ProgramType program_type) { } /// Describes primitive behavior on geometry shaders -constexpr std::tuple<const char*, const char*, u32> GetPrimitiveDescription(GLenum primitive_mode) { +constexpr std::pair<const char*, u32> GetPrimitiveDescription(GLenum primitive_mode) { switch (primitive_mode) { case GL_POINTS: - return {"points", "Points", 1}; + return {"points", 1}; case GL_LINES: case GL_LINE_STRIP: - return {"lines", "Lines", 2}; + return {"lines", 2}; case GL_LINES_ADJACENCY: case GL_LINE_STRIP_ADJACENCY: - return {"lines_adjacency", "LinesAdj", 4}; + return {"lines_adjacency", 4}; case GL_TRIANGLES: case GL_TRIANGLE_STRIP: case GL_TRIANGLE_FAN: - return {"triangles", "Triangles", 3}; + return {"triangles", 3}; case GL_TRIANGLES_ADJACENCY: case GL_TRIANGLE_STRIP_ADJACENCY: - return {"triangles_adjacency", "TrianglesAdj", 6}; + return {"triangles_adjacency", 6}; default: - return {"points", "Invalid", 1}; + return {"points", 1}; } } -ProgramType GetProgramType(Maxwell::ShaderProgram program) { - switch (program) { - case Maxwell::ShaderProgram::VertexA: - return ProgramType::VertexA; - case Maxwell::ShaderProgram::VertexB: - return ProgramType::VertexB; - case Maxwell::ShaderProgram::TesselationControl: - return ProgramType::TessellationControl; - case Maxwell::ShaderProgram::TesselationEval: - return ProgramType::TessellationEval; - case Maxwell::ShaderProgram::Geometry: - return ProgramType::Geometry; - case Maxwell::ShaderProgram::Fragment: - return ProgramType::Fragment; - } - UNREACHABLE(); - return {}; -} - /// Hashes one (or two) program streams -u64 GetUniqueIdentifier(ProgramType program_type, const ProgramCode& code, +u64 GetUniqueIdentifier(ShaderType shader_type, bool is_a, const ProgramCode& code, const ProgramCode& code_b) { u64 unique_identifier = boost::hash_value(code); - if (program_type == ProgramType::VertexA) { + if (is_a) { // VertexA programs include two programs boost::hash_combine(unique_identifier, boost::hash_value(code_b)); } @@ -164,79 +146,74 @@ u64 GetUniqueIdentifier(ProgramType program_type, const ProgramCode& code, } /// Creates an unspecialized program from code streams -std::string GenerateGLSL(const Device& device, ProgramType program_type, const ShaderIR& ir, +std::string GenerateGLSL(const Device& device, ShaderType shader_type, const ShaderIR& ir, const std::optional<ShaderIR>& ir_b) { - switch (program_type) { - case ProgramType::VertexA: - case ProgramType::VertexB: + switch (shader_type) { + case ShaderType::Vertex: return GLShader::GenerateVertexShader(device, ir, ir_b ? &*ir_b : nullptr); - case ProgramType::Geometry: + case ShaderType::Geometry: return GLShader::GenerateGeometryShader(device, ir); - case ProgramType::Fragment: + case ShaderType::Fragment: return GLShader::GenerateFragmentShader(device, ir); - case ProgramType::Compute: + case ShaderType::Compute: return GLShader::GenerateComputeShader(device, ir); default: - UNIMPLEMENTED_MSG("Unimplemented program_type={}", static_cast<u32>(program_type)); + UNIMPLEMENTED_MSG("Unimplemented shader_type={}", static_cast<u32>(shader_type)); return {}; } } -constexpr const char* GetProgramTypeName(ProgramType program_type) { - switch (program_type) { - case ProgramType::VertexA: - case ProgramType::VertexB: +constexpr const char* GetShaderTypeName(ShaderType shader_type) { + switch (shader_type) { + case ShaderType::Vertex: return "VS"; - case ProgramType::TessellationControl: - return "TCS"; - case ProgramType::TessellationEval: - return "TES"; - case ProgramType::Geometry: + case ShaderType::TesselationControl: + return "HS"; + case ShaderType::TesselationEval: + return "DS"; + case ShaderType::Geometry: return "GS"; - case ProgramType::Fragment: + case ShaderType::Fragment: return "FS"; - case ProgramType::Compute: + case ShaderType::Compute: return "CS"; } return "UNK"; } -Tegra::Engines::ShaderType GetEnginesShaderType(ProgramType program_type) { +constexpr ShaderType GetShaderType(Maxwell::ShaderProgram program_type) { switch (program_type) { - case ProgramType::VertexA: - case ProgramType::VertexB: - return Tegra::Engines::ShaderType::Vertex; - case ProgramType::TessellationControl: - return Tegra::Engines::ShaderType::TesselationControl; - case ProgramType::TessellationEval: - return Tegra::Engines::ShaderType::TesselationEval; - case ProgramType::Geometry: - return Tegra::Engines::ShaderType::Geometry; - case ProgramType::Fragment: - return Tegra::Engines::ShaderType::Fragment; - case ProgramType::Compute: - return Tegra::Engines::ShaderType::Compute; - } - UNREACHABLE(); + case Maxwell::ShaderProgram::VertexA: + case Maxwell::ShaderProgram::VertexB: + return ShaderType::Vertex; + case Maxwell::ShaderProgram::TesselationControl: + return ShaderType::TesselationControl; + case Maxwell::ShaderProgram::TesselationEval: + return ShaderType::TesselationEval; + case Maxwell::ShaderProgram::Geometry: + return ShaderType::Geometry; + case Maxwell::ShaderProgram::Fragment: + return ShaderType::Fragment; + } return {}; } -std::string GetShaderId(u64 unique_identifier, ProgramType program_type) { - return fmt::format("{}{:016X}", GetProgramTypeName(program_type), unique_identifier); +std::string GetShaderId(u64 unique_identifier, ShaderType shader_type) { + return fmt::format("{}{:016X}", GetShaderTypeName(shader_type), unique_identifier); } -Tegra::Engines::ConstBufferEngineInterface& GetConstBufferEngineInterface( - Core::System& system, ProgramType program_type) { - if (program_type == ProgramType::Compute) { +Tegra::Engines::ConstBufferEngineInterface& GetConstBufferEngineInterface(Core::System& system, + ShaderType shader_type) { + if (shader_type == ShaderType::Compute) { return system.GPU().KeplerCompute(); } else { return system.GPU().Maxwell3D(); } } -std::unique_ptr<ConstBufferLocker> MakeLocker(Core::System& system, ProgramType program_type) { - return std::make_unique<ConstBufferLocker>(GetEnginesShaderType(program_type), - GetConstBufferEngineInterface(system, program_type)); +std::unique_ptr<ConstBufferLocker> MakeLocker(Core::System& system, ShaderType shader_type) { + return std::make_unique<ConstBufferLocker>(shader_type, + GetConstBufferEngineInterface(system, shader_type)); } void FillLocker(ConstBufferLocker& locker, const ShaderDiskCacheUsage& usage) { @@ -253,88 +230,66 @@ void FillLocker(ConstBufferLocker& locker, const ShaderDiskCacheUsage& usage) { } } -CachedProgram BuildShader(const Device& device, u64 unique_identifier, ProgramType program_type, - const ProgramCode& program_code, const ProgramCode& program_code_b, - const ProgramVariant& variant, ConstBufferLocker& locker, +CachedProgram BuildShader(const Device& device, u64 unique_identifier, ShaderType shader_type, + const ProgramCode& code, const ProgramCode& code_b, + ConstBufferLocker& locker, const ProgramVariant& variant, bool hint_retrievable = false) { - LOG_INFO(Render_OpenGL, "called. {}", GetShaderId(unique_identifier, program_type)); + LOG_INFO(Render_OpenGL, "called. {}", GetShaderId(unique_identifier, shader_type)); - const bool is_compute = program_type == ProgramType::Compute; + const bool is_compute = shader_type == ShaderType::Compute; const u32 main_offset = is_compute ? KERNEL_MAIN_OFFSET : STAGE_MAIN_OFFSET; - const ShaderIR ir(program_code, main_offset, COMPILER_SETTINGS, locker); + const ShaderIR ir(code, main_offset, COMPILER_SETTINGS, locker); std::optional<ShaderIR> ir_b; - if (!program_code_b.empty()) { - ir_b.emplace(program_code_b, main_offset, COMPILER_SETTINGS, locker); + if (!code_b.empty()) { + ir_b.emplace(code_b, main_offset, COMPILER_SETTINGS, locker); } const auto entries = GLShader::GetEntries(ir); - auto base_bindings{variant.base_bindings}; - const auto primitive_mode{variant.primitive_mode}; - const auto texture_buffer_usage{variant.texture_buffer_usage}; - std::string source = fmt::format(R"(// {} #version 430 core #extension GL_ARB_separate_shader_objects : enable -#extension GL_ARB_shader_viewport_layer_array : enable -#extension GL_EXT_shader_image_load_formatted : enable -#extension GL_NV_gpu_shader5 : enable -#extension GL_NV_shader_thread_group : enable -#extension GL_NV_shader_thread_shuffle : enable )", - GetShaderId(unique_identifier, program_type)); - if (is_compute) { - source += "#extension GL_ARB_compute_variable_group_size : require\n"; + GetShaderId(unique_identifier, shader_type)); + if (device.HasShaderBallot()) { + source += "#extension GL_ARB_shader_ballot : require\n"; } - source += '\n'; - - if (!is_compute) { - source += fmt::format("#define EMULATION_UBO_BINDING {}\n", base_bindings.cbuf++); + if (device.HasVertexViewportLayer()) { + source += "#extension GL_ARB_shader_viewport_layer_array : require\n"; } - - for (const auto& cbuf : entries.const_buffers) { - source += - fmt::format("#define CBUF_BINDING_{} {}\n", cbuf.GetIndex(), base_bindings.cbuf++); + if (device.HasImageLoadFormatted()) { + source += "#extension GL_EXT_shader_image_load_formatted : require\n"; } - for (const auto& gmem : entries.global_memory_entries) { - source += fmt::format("#define GMEM_BINDING_{}_{} {}\n", gmem.GetCbufIndex(), - gmem.GetCbufOffset(), base_bindings.gmem++); + if (device.HasWarpIntrinsics()) { + source += "#extension GL_NV_gpu_shader5 : require\n" + "#extension GL_NV_shader_thread_group : require\n" + "#extension GL_NV_shader_thread_shuffle : require\n"; } - for (const auto& sampler : entries.samplers) { - source += fmt::format("#define SAMPLER_BINDING_{} {}\n", sampler.GetIndex(), - base_bindings.sampler++); + + if (shader_type == ShaderType::Geometry) { + const auto [glsl_topology, max_vertices] = GetPrimitiveDescription(variant.primitive_mode); + source += fmt::format("#define MAX_VERTEX_INPUT {}\n", max_vertices); + source += fmt::format("layout ({}) in;\n", glsl_topology); } - for (const auto& image : entries.images) { + if (shader_type == ShaderType::Compute) { + if (variant.local_memory_size > 0) { + source += fmt::format("#define LOCAL_MEMORY_SIZE {}\n", + Common::AlignUp(variant.local_memory_size, 4) / 4); + } source += - fmt::format("#define IMAGE_BINDING_{} {}\n", image.GetIndex(), base_bindings.image++); - } + fmt::format("layout (local_size_x = {}, local_size_y = {}, local_size_z = {}) in;\n", + variant.block_x, variant.block_y, variant.block_z); - // Transform 1D textures to texture samplers by declaring its preprocessor macros. - for (std::size_t i = 0; i < texture_buffer_usage.size(); ++i) { - if (!texture_buffer_usage.test(i)) { - continue; + if (variant.shared_memory_size > 0) { + // shared_memory_size is described in number of words + source += fmt::format("shared uint smem[{}];\n", variant.shared_memory_size); } - source += fmt::format("#define SAMPLER_{}_IS_BUFFER\n", i); - } - if (texture_buffer_usage.any()) { - source += '\n'; - } - - if (program_type == ProgramType::Geometry) { - const auto [glsl_topology, debug_name, max_vertices] = - GetPrimitiveDescription(primitive_mode); - - source += "layout (" + std::string(glsl_topology) + ") in;\n\n"; - source += "#define MAX_VERTEX_INPUT " + std::to_string(max_vertices) + '\n'; - } - if (program_type == ProgramType::Compute) { - source += "layout (local_size_variable) in;\n"; } source += '\n'; - source += GenerateGLSL(device, program_type, ir, ir_b); + source += GenerateGLSL(device, shader_type, ir, ir_b); OGLShader shader; - shader.Create(source.c_str(), GetShaderType(program_type)); + shader.Create(source.c_str(), GetGLShaderType(shader_type)); auto program = std::make_shared<OGLProgram>(); program->Create(true, hint_retrievable, shader.handle); @@ -357,18 +312,16 @@ std::unordered_set<GLenum> GetSupportedFormats() { } // Anonymous namespace -CachedShader::CachedShader(const ShaderParameters& params, ProgramType program_type, - GLShader::ShaderEntries entries, ProgramCode program_code, - ProgramCode program_code_b) - : RasterizerCacheObject{params.host_ptr}, system{params.system}, - disk_cache{params.disk_cache}, device{params.device}, cpu_addr{params.cpu_addr}, - unique_identifier{params.unique_identifier}, program_type{program_type}, entries{entries}, - program_code{std::move(program_code)}, program_code_b{std::move(program_code_b)} { +CachedShader::CachedShader(const ShaderParameters& params, ShaderType shader_type, + GLShader::ShaderEntries entries, ProgramCode code, ProgramCode code_b) + : RasterizerCacheObject{params.host_ptr}, system{params.system}, disk_cache{params.disk_cache}, + device{params.device}, cpu_addr{params.cpu_addr}, unique_identifier{params.unique_identifier}, + shader_type{shader_type}, entries{entries}, code{std::move(code)}, code_b{std::move(code_b)} { if (!params.precompiled_variants) { return; } for (const auto& pair : *params.precompiled_variants) { - auto locker = MakeLocker(system, program_type); + auto locker = MakeLocker(system, shader_type); const auto& usage = pair->first; FillLocker(*locker, usage); @@ -389,92 +342,83 @@ CachedShader::CachedShader(const ShaderParameters& params, ProgramType program_t } Shader CachedShader::CreateStageFromMemory(const ShaderParameters& params, - Maxwell::ShaderProgram program_type, - ProgramCode program_code, ProgramCode program_code_b) { - params.disk_cache.SaveRaw(ShaderDiskCacheRaw( - params.unique_identifier, GetProgramType(program_type), program_code, program_code_b)); + Maxwell::ShaderProgram program_type, ProgramCode code, + ProgramCode code_b) { + const auto shader_type = GetShaderType(program_type); + params.disk_cache.SaveRaw( + ShaderDiskCacheRaw(params.unique_identifier, shader_type, code, code_b)); - ConstBufferLocker locker(GetEnginesShaderType(GetProgramType(program_type))); - const ShaderIR ir(program_code, STAGE_MAIN_OFFSET, COMPILER_SETTINGS, locker); + ConstBufferLocker locker(shader_type, params.system.GPU().Maxwell3D()); + const ShaderIR ir(code, STAGE_MAIN_OFFSET, COMPILER_SETTINGS, locker); // TODO(Rodrigo): Handle VertexA shaders // std::optional<ShaderIR> ir_b; - // if (!program_code_b.empty()) { - // ir_b.emplace(program_code_b, STAGE_MAIN_OFFSET); + // if (!code_b.empty()) { + // ir_b.emplace(code_b, STAGE_MAIN_OFFSET); // } - return std::shared_ptr<CachedShader>( - new CachedShader(params, GetProgramType(program_type), GLShader::GetEntries(ir), - std::move(program_code), std::move(program_code_b))); + return std::shared_ptr<CachedShader>(new CachedShader( + params, shader_type, GLShader::GetEntries(ir), std::move(code), std::move(code_b))); } Shader CachedShader::CreateKernelFromMemory(const ShaderParameters& params, ProgramCode code) { params.disk_cache.SaveRaw( - ShaderDiskCacheRaw(params.unique_identifier, ProgramType::Compute, code)); + ShaderDiskCacheRaw(params.unique_identifier, ShaderType::Compute, code)); - ConstBufferLocker locker(Tegra::Engines::ShaderType::Compute); + ConstBufferLocker locker(Tegra::Engines::ShaderType::Compute, + params.system.GPU().KeplerCompute()); const ShaderIR ir(code, KERNEL_MAIN_OFFSET, COMPILER_SETTINGS, locker); return std::shared_ptr<CachedShader>(new CachedShader( - params, ProgramType::Compute, GLShader::GetEntries(ir), std::move(code), {})); + params, ShaderType::Compute, GLShader::GetEntries(ir), std::move(code), {})); } Shader CachedShader::CreateFromCache(const ShaderParameters& params, const UnspecializedShader& unspecialized) { - return std::shared_ptr<CachedShader>(new CachedShader(params, unspecialized.program_type, + return std::shared_ptr<CachedShader>(new CachedShader(params, unspecialized.type, unspecialized.entries, unspecialized.code, unspecialized.code_b)); } -std::tuple<GLuint, BaseBindings> CachedShader::GetProgramHandle(const ProgramVariant& variant) { - UpdateVariant(); +GLuint CachedShader::GetHandle(const ProgramVariant& variant) { + EnsureValidLockerVariant(); - const auto [entry, is_cache_miss] = curr_variant->programs.try_emplace(variant); + const auto [entry, is_cache_miss] = curr_locker_variant->programs.try_emplace(variant); auto& program = entry->second; - if (is_cache_miss) { - program = BuildShader(device, unique_identifier, program_type, program_code, program_code_b, - variant, *curr_variant->locker); - disk_cache.SaveUsage(GetUsage(variant, *curr_variant->locker)); - - LabelGLObject(GL_PROGRAM, program->handle, cpu_addr); + if (!is_cache_miss) { + return program->handle; } - auto base_bindings = variant.base_bindings; - base_bindings.cbuf += static_cast<u32>(entries.const_buffers.size()); - if (program_type != ProgramType::Compute) { - base_bindings.cbuf += STAGE_RESERVED_UBOS; - } - base_bindings.gmem += static_cast<u32>(entries.global_memory_entries.size()); - base_bindings.sampler += static_cast<u32>(entries.samplers.size()); + program = BuildShader(device, unique_identifier, shader_type, code, code_b, + *curr_locker_variant->locker, variant); + disk_cache.SaveUsage(GetUsage(variant, *curr_locker_variant->locker)); - return {program->handle, base_bindings}; + LabelGLObject(GL_PROGRAM, program->handle, cpu_addr); + return program->handle; } -void CachedShader::UpdateVariant() { - if (curr_variant && !curr_variant->locker->IsConsistent()) { - curr_variant = nullptr; +bool CachedShader::EnsureValidLockerVariant() { + const auto previous_variant = curr_locker_variant; + if (curr_locker_variant && !curr_locker_variant->locker->IsConsistent()) { + curr_locker_variant = nullptr; } - if (!curr_variant) { + if (!curr_locker_variant) { for (auto& variant : locker_variants) { if (variant->locker->IsConsistent()) { - curr_variant = variant.get(); + curr_locker_variant = variant.get(); } } } - if (!curr_variant) { + if (!curr_locker_variant) { auto& new_variant = locker_variants.emplace_back(); new_variant = std::make_unique<LockerVariant>(); - new_variant->locker = MakeLocker(system, program_type); - curr_variant = new_variant.get(); + new_variant->locker = MakeLocker(system, shader_type); + curr_locker_variant = new_variant.get(); } + return previous_variant == curr_locker_variant; } ShaderDiskCacheUsage CachedShader::GetUsage(const ProgramVariant& variant, const ConstBufferLocker& locker) const { - ShaderDiskCacheUsage usage; - usage.unique_identifier = unique_identifier; - usage.variant = variant; - usage.keys = locker.GetKeys(); - usage.bound_samplers = locker.GetBoundSamplers(); - usage.bindless_samplers = locker.GetBindlessSamplers(); - return usage; + return ShaderDiskCacheUsage{unique_identifier, variant, locker.GetKeys(), + locker.GetBoundSamplers(), locker.GetBindlessSamplers()}; } ShaderCacheOpenGL::ShaderCacheOpenGL(RasterizerOpenGL& rasterizer, Core::System& system, @@ -533,11 +477,12 @@ void ShaderCacheOpenGL::LoadDiskCache(const std::atomic_bool& stop_loading, } } if (!shader) { - auto locker{MakeLocker(system, unspecialized.program_type)}; + auto locker{MakeLocker(system, unspecialized.type)}; FillLocker(*locker, usage); - shader = BuildShader(device, usage.unique_identifier, unspecialized.program_type, - unspecialized.code, unspecialized.code_b, usage.variant, - *locker, true); + + shader = BuildShader(device, usage.unique_identifier, unspecialized.type, + unspecialized.code, unspecialized.code_b, *locker, + usage.variant, true); } std::scoped_lock lock{mutex}; @@ -640,7 +585,7 @@ bool ShaderCacheOpenGL::GenerateUnspecializedShaders( const auto& raw{raws[i]}; const u64 unique_identifier{raw.GetUniqueIdentifier()}; const u64 calculated_hash{ - GetUniqueIdentifier(raw.GetProgramType(), raw.GetProgramCode(), raw.GetProgramCodeB())}; + GetUniqueIdentifier(raw.GetType(), raw.HasProgramA(), raw.GetCode(), raw.GetCodeB())}; if (unique_identifier != calculated_hash) { LOG_ERROR(Render_OpenGL, "Invalid hash in entry={:016x} (obtained hash={:016x}) - " @@ -651,9 +596,9 @@ bool ShaderCacheOpenGL::GenerateUnspecializedShaders( } const u32 main_offset = - raw.GetProgramType() == ProgramType::Compute ? KERNEL_MAIN_OFFSET : STAGE_MAIN_OFFSET; - ConstBufferLocker locker(GetEnginesShaderType(raw.GetProgramType())); - const ShaderIR ir(raw.GetProgramCode(), main_offset, COMPILER_SETTINGS, locker); + raw.GetType() == ShaderType::Compute ? KERNEL_MAIN_OFFSET : STAGE_MAIN_OFFSET; + ConstBufferLocker locker(raw.GetType()); + const ShaderIR ir(raw.GetCode(), main_offset, COMPILER_SETTINGS, locker); // TODO(Rodrigo): Handle VertexA shaders // std::optional<ShaderIR> ir_b; // if (raw.HasProgramA()) { @@ -662,9 +607,9 @@ bool ShaderCacheOpenGL::GenerateUnspecializedShaders( UnspecializedShader unspecialized; unspecialized.entries = GLShader::GetEntries(ir); - unspecialized.program_type = raw.GetProgramType(); - unspecialized.code = raw.GetProgramCode(); - unspecialized.code_b = raw.GetProgramCodeB(); + unspecialized.type = raw.GetType(); + unspecialized.code = raw.GetCode(); + unspecialized.code_b = raw.GetCodeB(); unspecialized_shaders.emplace(raw.GetUniqueIdentifier(), unspecialized); if (callback) { @@ -697,7 +642,8 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) { code_b = GetShaderCode(memory_manager, address_b, memory_manager.GetPointer(address_b)); } - const auto unique_identifier = GetUniqueIdentifier(GetProgramType(program), code, code_b); + const auto unique_identifier = GetUniqueIdentifier( + GetShaderType(program), program == Maxwell::ShaderProgram::VertexA, code, code_b); const auto precompiled_variants = GetPrecompiledVariants(unique_identifier); const auto cpu_addr{*memory_manager.GpuToCpuAddress(address)}; const ShaderParameters params{system, disk_cache, precompiled_variants, device, @@ -725,7 +671,7 @@ Shader ShaderCacheOpenGL::GetComputeKernel(GPUVAddr code_addr) { // No kernel found - create a new one auto code{GetShaderCode(memory_manager, code_addr, host_ptr)}; - const auto unique_identifier{GetUniqueIdentifier(ProgramType::Compute, code, {})}; + const auto unique_identifier{GetUniqueIdentifier(ShaderType::Compute, false, code, {})}; const auto precompiled_variants = GetPrecompiledVariants(unique_identifier); const auto cpu_addr{*memory_manager.GpuToCpuAddress(code_addr)}; const ShaderParameters params{system, disk_cache, precompiled_variants, device, diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h index 6bd7c9cf1..7b1470db3 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.h +++ b/src/video_core/renderer_opengl/gl_shader_cache.h @@ -17,6 +17,7 @@ #include <glad/glad.h> #include "common/common_types.h" +#include "video_core/engines/shader_type.h" #include "video_core/rasterizer_cache.h" #include "video_core/renderer_opengl/gl_resource_manager.h" #include "video_core/renderer_opengl/gl_shader_decompiler.h" @@ -47,7 +48,7 @@ using PrecompiledVariants = std::vector<PrecompiledPrograms::iterator>; struct UnspecializedShader { GLShader::ShaderEntries entries; - ProgramType program_type; + Tegra::Engines::ShaderType type; ProgramCode code; ProgramCode code_b; }; @@ -77,7 +78,7 @@ public: } std::size_t GetSizeInBytes() const override { - return program_code.size() * sizeof(u64); + return code.size() * sizeof(u64); } /// Gets the shader entries for the shader @@ -86,7 +87,7 @@ public: } /// Gets the GL program handle for the shader - std::tuple<GLuint, BaseBindings> GetProgramHandle(const ProgramVariant& variant); + GLuint GetHandle(const ProgramVariant& variant); private: struct LockerVariant { @@ -94,11 +95,11 @@ private: std::unordered_map<ProgramVariant, CachedProgram> programs; }; - explicit CachedShader(const ShaderParameters& params, ProgramType program_type, + explicit CachedShader(const ShaderParameters& params, Tegra::Engines::ShaderType shader_type, GLShader::ShaderEntries entries, ProgramCode program_code, ProgramCode program_code_b); - void UpdateVariant(); + bool EnsureValidLockerVariant(); ShaderDiskCacheUsage GetUsage(const ProgramVariant& variant, const VideoCommon::Shader::ConstBufferLocker& locker) const; @@ -110,14 +111,14 @@ private: VAddr cpu_addr{}; u64 unique_identifier{}; - ProgramType program_type{}; + Tegra::Engines::ShaderType shader_type{}; GLShader::ShaderEntries entries; - ProgramCode program_code; - ProgramCode program_code_b; + ProgramCode code; + ProgramCode code_b; - LockerVariant* curr_variant = nullptr; + LockerVariant* curr_locker_variant = nullptr; std::vector<std::unique_ptr<LockerVariant>> locker_variants; }; diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp index 92ee8459e..f9f7a97b5 100644 --- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp +++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp @@ -16,6 +16,7 @@ #include "common/common_types.h" #include "common/logging/log.h" #include "video_core/engines/maxwell_3d.h" +#include "video_core/engines/shader_type.h" #include "video_core/renderer_opengl/gl_device.h" #include "video_core/renderer_opengl/gl_rasterizer.h" #include "video_core/renderer_opengl/gl_shader_decompiler.h" @@ -27,6 +28,7 @@ namespace OpenGL::GLShader { namespace { +using Tegra::Engines::ShaderType; using Tegra::Shader::Attribute; using Tegra::Shader::AttributeUse; using Tegra::Shader::Header; @@ -41,11 +43,15 @@ using namespace VideoCommon::Shader; using Maxwell = Tegra::Engines::Maxwell3D::Regs; using Operation = const OperationNode&; +class ASTDecompiler; +class ExprDecompiler; + enum class Type { Void, Bool, Bool2, Float, Int, Uint, HalfFloat }; -struct TextureAoffi {}; +struct TextureOffset {}; +struct TextureDerivates {}; using TextureArgument = std::pair<Type, Node>; -using TextureIR = std::variant<TextureAoffi, TextureArgument>; +using TextureIR = std::variant<TextureOffset, TextureDerivates, TextureArgument>; constexpr u32 MAX_CONSTBUFFER_ELEMENTS = static_cast<u32>(Maxwell::MaxConstBufferSize) / (4 * sizeof(float)); @@ -223,7 +229,7 @@ private: Type type{}; }; -constexpr const char* GetTypeString(Type type) { +const char* GetTypeString(Type type) { switch (type) { case Type::Bool: return "bool"; @@ -243,7 +249,7 @@ constexpr const char* GetTypeString(Type type) { } } -constexpr const char* GetImageTypeDeclaration(Tegra::Shader::ImageType image_type) { +const char* GetImageTypeDeclaration(Tegra::Shader::ImageType image_type) { switch (image_type) { case Tegra::Shader::ImageType::Texture1D: return "1D"; @@ -331,16 +337,13 @@ std::string FlowStackTopName(MetaStackClass stack) { return fmt::format("{}_flow_stack_top", GetFlowStackPrefix(stack)); } -constexpr bool IsVertexShader(ProgramType stage) { - return stage == ProgramType::VertexA || stage == ProgramType::VertexB; +[[deprecated]] constexpr bool IsVertexShader(ShaderType stage) { + return stage == ShaderType::Vertex; } -class ASTDecompiler; -class ExprDecompiler; - class GLSLDecompiler final { public: - explicit GLSLDecompiler(const Device& device, const ShaderIR& ir, ProgramType stage, + explicit GLSLDecompiler(const Device& device, const ShaderIR& ir, ShaderType stage, std::string suffix) : device{device}, ir{ir}, stage{stage}, suffix{suffix}, header{ir.GetHeader()} {} @@ -396,6 +399,7 @@ public: DeclareConstantBuffers(); DeclareGlobalMemory(); DeclareSamplers(); + DeclareImages(); DeclarePhysicalAttributeReader(); code.AddLine("void execute_{}() {{", suffix); @@ -427,7 +431,7 @@ private: } void DeclareGeometry() { - if (stage != ProgramType::Geometry) { + if (stage != ShaderType::Geometry) { return; } @@ -510,10 +514,14 @@ private: } void DeclareLocalMemory() { - // TODO(Rodrigo): Unstub kernel local memory size and pass it from a register at - // specialization time. - const u64 local_memory_size = - stage == ProgramType::Compute ? 0x400 : header.GetLocalMemorySize(); + if (stage == ShaderType::Compute) { + code.AddLine("#ifdef LOCAL_MEMORY_SIZE"); + code.AddLine("uint {}[LOCAL_MEMORY_SIZE];", GetLocalMemory()); + code.AddLine("#endif"); + return; + } + + const u64 local_memory_size = header.GetLocalMemorySize(); if (local_memory_size == 0) { return; } @@ -522,13 +530,6 @@ private: code.AddNewLine(); } - void DeclareSharedMemory() { - if (stage != ProgramType::Compute) { - return; - } - code.AddLine("shared uint {}[];", GetSharedMemory()); - } - void DeclareInternalFlags() { for (u32 flag = 0; flag < static_cast<u32>(InternalFlag::Amount); flag++) { const auto flag_code = static_cast<InternalFlag>(flag); @@ -578,12 +579,12 @@ private: const u32 location{GetGenericAttributeIndex(index)}; std::string name{GetInputAttribute(index)}; - if (stage == ProgramType::Geometry) { + if (stage == ShaderType::Geometry) { name = "gs_" + name + "[]"; } std::string suffix; - if (stage == ProgramType::Fragment) { + if (stage == ShaderType::Fragment) { const auto input_mode{header.ps.GetAttributeUse(location)}; if (skip_unused && input_mode == AttributeUse::Unused) { return; @@ -595,7 +596,7 @@ private: } void DeclareOutputAttributes() { - if (ir.HasPhysicalAttributes() && stage != ProgramType::Fragment) { + if (ir.HasPhysicalAttributes() && stage != ShaderType::Fragment) { for (u32 i = 0; i < GetNumPhysicalVaryings(); ++i) { DeclareOutputAttribute(ToGenericAttribute(i)); } @@ -620,9 +621,9 @@ private: } void DeclareConstantBuffers() { - for (const auto& entry : ir.GetConstantBuffers()) { - const auto [index, size] = entry; - code.AddLine("layout (std140, binding = CBUF_BINDING_{}) uniform {} {{", index, + u32 binding = device.GetBaseBindings(stage).uniform_buffer; + for (const auto& [index, cbuf] : ir.GetConstantBuffers()) { + code.AddLine("layout (std140, binding = {}) uniform {} {{", binding++, GetConstBufferBlock(index)); code.AddLine(" uvec4 {}[{}];", GetConstBuffer(index), MAX_CONSTBUFFER_ELEMENTS); code.AddLine("}};"); @@ -631,9 +632,8 @@ private: } void DeclareGlobalMemory() { - for (const auto& gmem : ir.GetGlobalMemory()) { - const auto& [base, usage] = gmem; - + u32 binding = device.GetBaseBindings(stage).shader_storage_buffer; + for (const auto& [base, usage] : ir.GetGlobalMemory()) { // Since we don't know how the shader will use the shader, hint the driver to disable as // much optimizations as possible std::string qualifier = "coherent volatile"; @@ -643,8 +643,8 @@ private: qualifier += " writeonly"; } - code.AddLine("layout (std430, binding = GMEM_BINDING_{}_{}) {} buffer {} {{", - base.cbuf_index, base.cbuf_offset, qualifier, GetGlobalMemoryBlock(base)); + code.AddLine("layout (std430, binding = {}) {} buffer {} {{", binding++, qualifier, + GetGlobalMemoryBlock(base)); code.AddLine(" uint {}[];", GetGlobalMemory(base)); code.AddLine("}};"); code.AddNewLine(); @@ -652,15 +652,17 @@ private: } void DeclareSamplers() { - const auto& samplers = ir.GetSamplers(); - for (const auto& sampler : samplers) { - const std::string name{GetSampler(sampler)}; - const std::string description{"layout (binding = SAMPLER_BINDING_" + - std::to_string(sampler.GetIndex()) + ") uniform"}; + u32 binding = device.GetBaseBindings(stage).sampler; + for (const auto& sampler : ir.GetSamplers()) { + const std::string name = GetSampler(sampler); + const std::string description = fmt::format("layout (binding = {}) uniform", binding++); + std::string sampler_type = [&]() { + if (sampler.IsBuffer()) { + return "samplerBuffer"; + } switch (sampler.GetType()) { case Tegra::Shader::TextureType::Texture1D: - // Special cased, read below. return "sampler1D"; case Tegra::Shader::TextureType::Texture2D: return "sampler2D"; @@ -680,21 +682,9 @@ private: sampler_type += "Shadow"; } - if (sampler.GetType() == Tegra::Shader::TextureType::Texture1D) { - // 1D textures can be aliased to texture buffers, hide the declarations behind a - // preprocessor flag and use one or the other from the GPU state. This has to be - // done because shaders don't have enough information to determine the texture type. - EmitIfdefIsBuffer(sampler); - code.AddLine("{} samplerBuffer {};", description, name); - code.AddLine("#else"); - code.AddLine("{} {} {};", description, sampler_type, name); - code.AddLine("#endif"); - } else { - // The other texture types (2D, 3D and cubes) don't have this issue. - code.AddLine("{} {} {};", description, sampler_type, name); - } + code.AddLine("{} {} {};", description, sampler_type, name); } - if (!samplers.empty()) { + if (!ir.GetSamplers().empty()) { code.AddNewLine(); } } @@ -717,7 +707,7 @@ private: constexpr u32 element_stride = 4; const u32 address{generic_base + index * generic_stride + element * element_stride}; - const bool declared = stage != ProgramType::Fragment || + const bool declared = stage != ShaderType::Fragment || header.ps.GetAttributeUse(index) != AttributeUse::Unused; const std::string value = declared ? ReadAttribute(attribute, element).AsFloat() : "0.0f"; @@ -734,8 +724,8 @@ private: } void DeclareImages() { - const auto& images{ir.GetImages()}; - for (const auto& image : images) { + u32 binding = device.GetBaseBindings(stage).image; + for (const auto& image : ir.GetImages()) { std::string qualifier = "coherent volatile"; if (image.IsRead() && !image.IsWritten()) { qualifier += " readonly"; @@ -745,10 +735,10 @@ private: const char* format = image.IsAtomic() ? "r32ui, " : ""; const char* type_declaration = GetImageTypeDeclaration(image.GetType()); - code.AddLine("layout ({}binding = IMAGE_BINDING_{}) {} uniform uimage{} {};", format, - image.GetIndex(), qualifier, type_declaration, GetImage(image)); + code.AddLine("layout ({}binding = {}) {} uniform uimage{} {};", format, binding++, + qualifier, type_declaration, GetImage(image)); } - if (!images.empty()) { + if (!ir.GetImages().empty()) { code.AddNewLine(); } } @@ -761,6 +751,9 @@ private: Expression Visit(const Node& node) { if (const auto operation = std::get_if<OperationNode>(&*node)) { + if (const auto amend_index = operation->GetAmendIndex()) { + Visit(ir.GetAmendNode(*amend_index)).CheckVoid(); + } const auto operation_index = static_cast<std::size_t>(operation->GetCode()); if (operation_index >= operation_decompilers.size()) { UNREACHABLE_MSG("Out of bounds operation: {}", operation_index); @@ -809,7 +802,7 @@ private: } if (const auto abuf = std::get_if<AbufNode>(&*node)) { - UNIMPLEMENTED_IF_MSG(abuf->IsPhysicalBuffer() && stage == ProgramType::Geometry, + UNIMPLEMENTED_IF_MSG(abuf->IsPhysicalBuffer() && stage == ShaderType::Geometry, "Physical attributes in geometry shaders are not implemented"); if (abuf->IsPhysicalBuffer()) { return {fmt::format("ReadPhysicalAttribute({})", @@ -868,18 +861,13 @@ private: } if (const auto lmem = std::get_if<LmemNode>(&*node)) { - if (stage == ProgramType::Compute) { - LOG_WARNING(Render_OpenGL, "Local memory is stubbed on compute shaders"); - } return { fmt::format("{}[{} >> 2]", GetLocalMemory(), Visit(lmem->GetAddress()).AsUint()), Type::Uint}; } if (const auto smem = std::get_if<SmemNode>(&*node)) { - return { - fmt::format("{}[{} >> 2]", GetSharedMemory(), Visit(smem->GetAddress()).AsUint()), - Type::Uint}; + return {fmt::format("smem[{} >> 2]", Visit(smem->GetAddress()).AsUint()), Type::Uint}; } if (const auto internal_flag = std::get_if<InternalFlagNode>(&*node)) { @@ -887,6 +875,9 @@ private: } if (const auto conditional = std::get_if<ConditionalNode>(&*node)) { + if (const auto amend_index = conditional->GetAmendIndex()) { + Visit(ir.GetAmendNode(*amend_index)).CheckVoid(); + } // It's invalid to call conditional on nested nodes, use an operation instead code.AddLine("if ({}) {{", Visit(conditional->GetCondition()).AsBool()); ++code.scope; @@ -909,7 +900,7 @@ private: Expression ReadAttribute(Attribute::Index attribute, u32 element, const Node& buffer = {}) { const auto GeometryPass = [&](std::string_view name) { - if (stage == ProgramType::Geometry && buffer) { + if (stage == ShaderType::Geometry && buffer) { // TODO(Rodrigo): Guard geometry inputs against out of bound reads. Some games // set an 0x80000000 index for those and the shader fails to build. Find out why // this happens and what's its intent. @@ -921,11 +912,11 @@ private: switch (attribute) { case Attribute::Index::Position: switch (stage) { - case ProgramType::Geometry: + case ShaderType::Geometry: return {fmt::format("gl_in[{}].gl_Position{}", Visit(buffer).AsUint(), GetSwizzle(element)), Type::Float}; - case ProgramType::Fragment: + case ShaderType::Fragment: return {element == 3 ? "1.0f" : ("gl_FragCoord"s + GetSwizzle(element)), Type::Float}; default: @@ -959,7 +950,7 @@ private: return {"0", Type::Int}; case Attribute::Index::FrontFacing: // TODO(Subv): Find out what the values are for the other elements. - ASSERT(stage == ProgramType::Fragment); + ASSERT(stage == ShaderType::Fragment); switch (element) { case 3: return {"(gl_FrontFacing ? -1 : 0)", Type::Int}; @@ -985,7 +976,7 @@ private: // be found in fragment shaders, so we disable precise there. There are vertex shaders that // also fail to build but nobody seems to care about those. // Note: Only bugged drivers will skip precise. - const bool disable_precise = device.HasPreciseBug() && stage == ProgramType::Fragment; + const bool disable_precise = device.HasPreciseBug() && stage == ShaderType::Fragment; std::string temporary = code.GenerateTemporary(); code.AddLine("{}{} {} = {};", disable_precise ? "" : "precise ", GetTypeString(type), @@ -1092,7 +1083,7 @@ private: } std::string GenerateTexture(Operation operation, const std::string& function_suffix, - const std::vector<TextureIR>& extras) { + const std::vector<TextureIR>& extras, bool separate_dc = false) { constexpr std::array coord_constructors = {"float", "vec2", "vec3", "vec4"}; const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); @@ -1105,9 +1096,12 @@ private: std::string expr = "texture" + function_suffix; if (!meta->aoffi.empty()) { expr += "Offset"; + } else if (!meta->ptp.empty()) { + expr += "Offsets"; } expr += '(' + GetSampler(meta->sampler) + ", "; - expr += coord_constructors.at(count + (has_array ? 1 : 0) + (has_shadow ? 1 : 0) - 1); + expr += coord_constructors.at(count + (has_array ? 1 : 0) + + (has_shadow && !separate_dc ? 1 : 0) - 1); expr += '('; for (std::size_t i = 0; i < count; ++i) { expr += Visit(operation[i]).AsFloat(); @@ -1120,15 +1114,26 @@ private: expr += ", float(" + Visit(meta->array).AsInt() + ')'; } if (has_shadow) { - expr += ", " + Visit(meta->depth_compare).AsFloat(); + if (separate_dc) { + expr += "), " + Visit(meta->depth_compare).AsFloat(); + } else { + expr += ", " + Visit(meta->depth_compare).AsFloat() + ')'; + } + } else { + expr += ')'; } - expr += ')'; for (const auto& variant : extras) { if (const auto argument = std::get_if<TextureArgument>(&variant)) { expr += GenerateTextureArgument(*argument); - } else if (std::holds_alternative<TextureAoffi>(variant)) { - expr += GenerateTextureAoffi(meta->aoffi); + } else if (std::holds_alternative<TextureOffset>(variant)) { + if (!meta->aoffi.empty()) { + expr += GenerateTextureAoffi(meta->aoffi); + } else if (!meta->ptp.empty()) { + expr += GenerateTexturePtp(meta->ptp); + } + } else if (std::holds_alternative<TextureDerivates>(variant)) { + expr += GenerateTextureDerivates(meta->derivates); } else { UNREACHABLE(); } @@ -1167,6 +1172,20 @@ private: return expr; } + std::string ReadTextureOffset(const Node& value) { + if (const auto immediate = std::get_if<ImmediateNode>(&*value)) { + // Inline the string as an immediate integer in GLSL (AOFFI arguments are required + // to be constant by the standard). + return std::to_string(static_cast<s32>(immediate->GetValue())); + } else if (device.HasVariableAoffi()) { + // Avoid using variable AOFFI on unsupported devices. + return Visit(value).AsInt(); + } else { + // Insert 0 on devices not supporting variable AOFFI. + return "0"; + } + } + std::string GenerateTextureAoffi(const std::vector<Node>& aoffi) { if (aoffi.empty()) { return {}; @@ -1177,18 +1196,7 @@ private: expr += '('; for (std::size_t index = 0; index < aoffi.size(); ++index) { - const auto operand{aoffi.at(index)}; - if (const auto immediate = std::get_if<ImmediateNode>(&*operand)) { - // Inline the string as an immediate integer in GLSL (AOFFI arguments are required - // to be constant by the standard). - expr += std::to_string(static_cast<s32>(immediate->GetValue())); - } else if (device.HasVariableAoffi()) { - // Avoid using variable AOFFI on unsupported devices. - expr += Visit(operand).AsInt(); - } else { - // Insert 0 on devices not supporting variable AOFFI. - expr += '0'; - } + expr += ReadTextureOffset(aoffi.at(index)); if (index + 1 < aoffi.size()) { expr += ", "; } @@ -1198,6 +1206,50 @@ private: return expr; } + std::string GenerateTexturePtp(const std::vector<Node>& ptp) { + static constexpr std::size_t num_vectors = 4; + ASSERT(ptp.size() == num_vectors * 2); + + std::string expr = ", ivec2[]("; + for (std::size_t vector = 0; vector < num_vectors; ++vector) { + const bool has_next = vector + 1 < num_vectors; + expr += fmt::format("ivec2({}, {}){}", ReadTextureOffset(ptp.at(vector * 2)), + ReadTextureOffset(ptp.at(vector * 2 + 1)), has_next ? ", " : ""); + } + expr += ')'; + return expr; + } + + std::string GenerateTextureDerivates(const std::vector<Node>& derivates) { + if (derivates.empty()) { + return {}; + } + constexpr std::array coord_constructors = {"float", "vec2", "vec3"}; + std::string expr = ", "; + const std::size_t components = derivates.size() / 2; + std::string dx = coord_constructors.at(components - 1); + std::string dy = coord_constructors.at(components - 1); + dx += '('; + dy += '('; + + for (std::size_t index = 0; index < components; ++index) { + const auto operand_x{derivates.at(index * 2)}; + const auto operand_y{derivates.at(index * 2 + 1)}; + dx += Visit(operand_x).AsFloat(); + dy += Visit(operand_y).AsFloat(); + + if (index + 1 < components) { + dx += ", "; + dy += ", "; + } + } + dx += ')'; + dy += ')'; + expr += dx + ", " + dy; + + return expr; + } + std::string BuildIntegerCoordinates(Operation operation) { constexpr std::array constructors{"int(", "ivec2(", "ivec3(", "ivec4("}; const std::size_t coords_count{operation.GetOperandsCount()}; @@ -1247,17 +1299,12 @@ private: } target = std::move(*output); } else if (const auto lmem = std::get_if<LmemNode>(&*dest)) { - if (stage == ProgramType::Compute) { - LOG_WARNING(Render_OpenGL, "Local memory is stubbed on compute shaders"); - } target = { fmt::format("{}[{} >> 2]", GetLocalMemory(), Visit(lmem->GetAddress()).AsUint()), Type::Uint}; } else if (const auto smem = std::get_if<SmemNode>(&*dest)) { - ASSERT(stage == ProgramType::Compute); - target = { - fmt::format("{}[{} >> 2]", GetSharedMemory(), Visit(smem->GetAddress()).AsUint()), - Type::Uint}; + ASSERT(stage == ShaderType::Compute); + target = {fmt::format("smem[{} >> 2]", Visit(smem->GetAddress()).AsUint()), Type::Uint}; } else if (const auto gmem = std::get_if<GmemNode>(&*dest)) { const std::string real = Visit(gmem->GetRealAddress()).AsUint(); const std::string base = Visit(gmem->GetBaseAddress()).AsUint(); @@ -1379,6 +1426,26 @@ private: return GenerateUnary(operation, "float", Type::Float, type); } + Expression FSwizzleAdd(Operation operation) { + const std::string op_a = VisitOperand(operation, 0).AsFloat(); + const std::string op_b = VisitOperand(operation, 1).AsFloat(); + + if (!device.HasShaderBallot()) { + LOG_ERROR(Render_OpenGL, "Shader ballot is unavailable but required by the shader"); + return {fmt::format("{} + {}", op_a, op_b), Type::Float}; + } + + const std::string instr_mask = VisitOperand(operation, 2).AsUint(); + const std::string mask = code.GenerateTemporary(); + code.AddLine("uint {} = ({} >> ((gl_SubGroupInvocationARB & 3) << 1)) & 3;", mask, + instr_mask); + + const std::string modifier_a = fmt::format("fswzadd_modifiers_a[{}]", mask); + const std::string modifier_b = fmt::format("fswzadd_modifiers_b[{}]", mask); + return {fmt::format("(({} * {}) + ({} * {}))", op_a, modifier_a, op_b, modifier_b), + Type::Float}; + } + Expression ICastFloat(Operation operation) { return GenerateUnary(operation, "int", Type::Int, Type::Float); } @@ -1452,6 +1519,11 @@ private: return GenerateUnary(operation, "bitCount", type, type); } + template <Type type> + Expression BitMSB(Operation operation) { + return GenerateUnary(operation, "findMSB", type, type); + } + Expression HNegate(Operation operation) { const auto GetNegate = [&](std::size_t index) { return VisitOperand(operation, index).AsBool() + " ? -1 : 1"; @@ -1471,7 +1543,8 @@ private: } Expression HCastFloat(Operation operation) { - return {fmt::format("vec2({})", VisitOperand(operation, 0).AsFloat()), Type::HalfFloat}; + return {fmt::format("vec2({}, 0.0f)", VisitOperand(operation, 0).AsFloat()), + Type::HalfFloat}; } Expression HUnpack(Operation operation) { @@ -1645,7 +1718,7 @@ private: ASSERT(meta); std::string expr = GenerateTexture( - operation, "", {TextureAoffi{}, TextureArgument{Type::Float, meta->bias}}); + operation, "", {TextureOffset{}, TextureArgument{Type::Float, meta->bias}}); if (meta->sampler.IsShadow()) { expr = "vec4(" + expr + ')'; } @@ -1657,7 +1730,7 @@ private: ASSERT(meta); std::string expr = GenerateTexture( - operation, "Lod", {TextureArgument{Type::Float, meta->lod}, TextureAoffi{}}); + operation, "Lod", {TextureArgument{Type::Float, meta->lod}, TextureOffset{}}); if (meta->sampler.IsShadow()) { expr = "vec4(" + expr + ')'; } @@ -1665,13 +1738,18 @@ private: } Expression TextureGather(Operation operation) { - const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); - ASSERT(meta); + const auto& meta = std::get<MetaTexture>(operation.GetMeta()); - const auto type = meta->sampler.IsShadow() ? Type::Float : Type::Int; - return {GenerateTexture(operation, "Gather", - {TextureArgument{type, meta->component}, TextureAoffi{}}) + - GetSwizzle(meta->element), + const auto type = meta.sampler.IsShadow() ? Type::Float : Type::Int; + const bool separate_dc = meta.sampler.IsShadow(); + + std::vector<TextureIR> ir; + if (meta.sampler.IsShadow()) { + ir = {TextureOffset{}}; + } else { + ir = {TextureOffset{}, TextureArgument{type, meta.component}}; + } + return {GenerateTexture(operation, "Gather", ir, separate_dc) + GetSwizzle(meta.element), Type::Float}; } @@ -1729,27 +1807,23 @@ private: expr += ", "; } - // Store a copy of the expression without the lod to be used with texture buffers - std::string expr_buffer = expr; - - if (meta->lod) { + if (meta->lod && !meta->sampler.IsBuffer()) { expr += ", "; expr += Visit(meta->lod).AsInt(); } expr += ')'; expr += GetSwizzle(meta->element); - expr_buffer += ')'; - expr_buffer += GetSwizzle(meta->element); + return {std::move(expr), Type::Float}; + } - const std::string tmp{code.GenerateTemporary()}; - EmitIfdefIsBuffer(meta->sampler); - code.AddLine("float {} = {};", tmp, expr_buffer); - code.AddLine("#else"); - code.AddLine("float {} = {};", tmp, expr); - code.AddLine("#endif"); + Expression TextureGradient(Operation operation) { + const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); + ASSERT(meta); - return {tmp, Type::Float}; + std::string expr = + GenerateTexture(operation, "Grad", {TextureDerivates{}, TextureOffset{}}); + return {std::move(expr) + GetSwizzle(meta->element), Type::Float}; } Expression ImageLoad(Operation operation) { @@ -1817,7 +1891,7 @@ private: } void PreExit() { - if (stage != ProgramType::Fragment) { + if (stage != ShaderType::Fragment) { return; } const auto& used_registers = ir.GetRegisters(); @@ -1870,27 +1944,25 @@ private: } Expression EmitVertex(Operation operation) { - ASSERT_MSG(stage == ProgramType::Geometry, + ASSERT_MSG(stage == ShaderType::Geometry, "EmitVertex is expected to be used in a geometry shader."); - - // If a geometry shader is attached, it will always flip (it's the last stage before - // fragment). For more info about flipping, refer to gl_shader_gen.cpp. - code.AddLine("gl_Position.xy *= viewport_flip.xy;"); code.AddLine("EmitVertex();"); return {}; } Expression EndPrimitive(Operation operation) { - ASSERT_MSG(stage == ProgramType::Geometry, + ASSERT_MSG(stage == ShaderType::Geometry, "EndPrimitive is expected to be used in a geometry shader."); - code.AddLine("EndPrimitive();"); return {}; } + Expression InvocationId(Operation operation) { + return {"gl_InvocationID", Type::Int}; + } + Expression YNegate(Operation operation) { - // Config pack's third value is Y_NEGATE's state. - return {"config_pack[2]", Type::Uint}; + return {"y_direction", Type::Float}; } template <u32 element> @@ -1942,34 +2014,29 @@ private: return Vote(operation, "allThreadsEqualNV"); } - template <const std::string_view& func> - Expression Shuffle(Operation operation) { - const std::string value = VisitOperand(operation, 0).AsFloat(); - if (!device.HasWarpIntrinsics()) { - LOG_ERROR(Render_OpenGL, "Nvidia shuffle intrinsics are required by this shader"); - // On a "single-thread" device we are either on the same thread or out of bounds. Both - // cases return the passed value. - return {value, Type::Float}; + Expression ThreadId(Operation operation) { + if (!device.HasShaderBallot()) { + LOG_ERROR(Render_OpenGL, "Shader ballot is unavailable but required by the shader"); + return {"0U", Type::Uint}; } - - const std::string index = VisitOperand(operation, 1).AsUint(); - const std::string width = VisitOperand(operation, 2).AsUint(); - return {fmt::format("{}({}, {}, {})", func, value, index, width), Type::Float}; + return {"gl_SubGroupInvocationARB", Type::Uint}; } - template <const std::string_view& func> - Expression InRangeShuffle(Operation operation) { - const std::string index = VisitOperand(operation, 0).AsUint(); - const std::string width = VisitOperand(operation, 1).AsUint(); - if (!device.HasWarpIntrinsics()) { - // On a "single-thread" device we are only in bounds when the requested index is 0. - return {fmt::format("({} == 0U)", index), Type::Bool}; + Expression ShuffleIndexed(Operation operation) { + std::string value = VisitOperand(operation, 0).AsFloat(); + + if (!device.HasShaderBallot()) { + LOG_ERROR(Render_OpenGL, "Shader ballot is unavailable but required by the shader"); + return {std::move(value), Type::Float}; } - const std::string in_range = code.GenerateTemporary(); - code.AddLine("bool {};", in_range); - code.AddLine("{}(0U, {}, {}, {});", func, index, width, in_range); - return {in_range, Type::Bool}; + const std::string index = VisitOperand(operation, 1).AsUint(); + return {fmt::format("readInvocationARB({}, {})", value, index), Type::Float}; + } + + Expression MemoryBarrierGL(Operation) { + code.AddLine("memoryBarrier();"); + return {}; } struct Func final { @@ -1981,11 +2048,6 @@ private: static constexpr std::string_view Or = "Or"; static constexpr std::string_view Xor = "Xor"; static constexpr std::string_view Exchange = "Exchange"; - - static constexpr std::string_view ShuffleIndexed = "shuffleNV"; - static constexpr std::string_view ShuffleUp = "shuffleUpNV"; - static constexpr std::string_view ShuffleDown = "shuffleDownNV"; - static constexpr std::string_view ShuffleButterfly = "shuffleXorNV"; }; static constexpr std::array operation_decompilers = { @@ -2016,6 +2078,7 @@ private: &GLSLDecompiler::FTrunc, &GLSLDecompiler::FCastInteger<Type::Int>, &GLSLDecompiler::FCastInteger<Type::Uint>, + &GLSLDecompiler::FSwizzleAdd, &GLSLDecompiler::Add<Type::Int>, &GLSLDecompiler::Mul<Type::Int>, @@ -2037,6 +2100,7 @@ private: &GLSLDecompiler::BitfieldInsert<Type::Int>, &GLSLDecompiler::BitfieldExtract<Type::Int>, &GLSLDecompiler::BitCount<Type::Int>, + &GLSLDecompiler::BitMSB<Type::Int>, &GLSLDecompiler::Add<Type::Uint>, &GLSLDecompiler::Mul<Type::Uint>, @@ -2055,6 +2119,7 @@ private: &GLSLDecompiler::BitfieldInsert<Type::Uint>, &GLSLDecompiler::BitfieldExtract<Type::Uint>, &GLSLDecompiler::BitCount<Type::Uint>, + &GLSLDecompiler::BitMSB<Type::Uint>, &GLSLDecompiler::Add<Type::HalfFloat>, &GLSLDecompiler::Mul<Type::HalfFloat>, @@ -2118,6 +2183,7 @@ private: &GLSLDecompiler::TextureQueryDimensions, &GLSLDecompiler::TextureQueryLod, &GLSLDecompiler::TexelFetch, + &GLSLDecompiler::TextureGradient, &GLSLDecompiler::ImageLoad, &GLSLDecompiler::ImageStore, @@ -2138,6 +2204,7 @@ private: &GLSLDecompiler::EmitVertex, &GLSLDecompiler::EndPrimitive, + &GLSLDecompiler::InvocationId, &GLSLDecompiler::YNegate, &GLSLDecompiler::LocalInvocationId<0>, &GLSLDecompiler::LocalInvocationId<1>, @@ -2151,15 +2218,10 @@ private: &GLSLDecompiler::VoteAny, &GLSLDecompiler::VoteEqual, - &GLSLDecompiler::Shuffle<Func::ShuffleIndexed>, - &GLSLDecompiler::Shuffle<Func::ShuffleUp>, - &GLSLDecompiler::Shuffle<Func::ShuffleDown>, - &GLSLDecompiler::Shuffle<Func::ShuffleButterfly>, + &GLSLDecompiler::ThreadId, + &GLSLDecompiler::ShuffleIndexed, - &GLSLDecompiler::InRangeShuffle<Func::ShuffleIndexed>, - &GLSLDecompiler::InRangeShuffle<Func::ShuffleUp>, - &GLSLDecompiler::InRangeShuffle<Func::ShuffleDown>, - &GLSLDecompiler::InRangeShuffle<Func::ShuffleButterfly>, + &GLSLDecompiler::MemoryBarrierGL, }; static_assert(operation_decompilers.size() == static_cast<std::size_t>(OperationCode::Amount)); @@ -2200,10 +2262,6 @@ private: return "lmem_" + suffix; } - std::string GetSharedMemory() const { - return fmt::format("smem_{}", suffix); - } - std::string GetInternalFlag(InternalFlag flag) const { constexpr std::array InternalFlagNames = {"zero_flag", "sign_flag", "carry_flag", "overflow_flag"}; @@ -2221,10 +2279,6 @@ private: return GetDeclarationWithSuffix(static_cast<u32>(image.GetIndex()), "image"); } - void EmitIfdefIsBuffer(const Sampler& sampler) { - code.AddLine("#ifdef SAMPLER_{}_IS_BUFFER", sampler.GetIndex()); - } - std::string GetDeclarationWithSuffix(u32 index, std::string_view name) const { return fmt::format("{}_{}_{}", name, index, suffix); } @@ -2243,7 +2297,7 @@ private: const Device& device; const ShaderIR& ir; - const ProgramType stage; + const ShaderType stage; const std::string suffix; const Header header; @@ -2492,10 +2546,13 @@ bvec2 HalfFloatNanComparison(bvec2 comparison, vec2 pair1, vec2 pair2) { bvec2 is_nan2 = isnan(pair2); return bvec2(comparison.x || is_nan1.x || is_nan2.x, comparison.y || is_nan1.y || is_nan2.y); } + +const float fswzadd_modifiers_a[] = float[4](-1.0f, 1.0f, -1.0f, 0.0f ); +const float fswzadd_modifiers_b[] = float[4](-1.0f, -1.0f, 1.0f, -1.0f ); )"; } -std::string Decompile(const Device& device, const ShaderIR& ir, ProgramType stage, +std::string Decompile(const Device& device, const ShaderIR& ir, ShaderType stage, const std::string& suffix) { GLSLDecompiler decompiler(device, ir, stage, suffix); decompiler.Decompile(); diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.h b/src/video_core/renderer_opengl/gl_shader_decompiler.h index b1e75e6cc..7876f48d6 100644 --- a/src/video_core/renderer_opengl/gl_shader_decompiler.h +++ b/src/video_core/renderer_opengl/gl_shader_decompiler.h @@ -10,6 +10,7 @@ #include <vector> #include "common/common_types.h" #include "video_core/engines/maxwell_3d.h" +#include "video_core/engines/shader_type.h" #include "video_core/shader/shader_ir.h" namespace VideoCommon::Shader { @@ -17,20 +18,8 @@ class ShaderIR; } namespace OpenGL { - class Device; - -enum class ProgramType : u32 { - VertexA = 0, - VertexB = 1, - TessellationControl = 2, - TessellationEval = 3, - Geometry = 4, - Fragment = 5, - Compute = 6 -}; - -} // namespace OpenGL +} namespace OpenGL::GLShader { @@ -94,6 +83,6 @@ ShaderEntries GetEntries(const VideoCommon::Shader::ShaderIR& ir); std::string GetCommonDeclarations(); std::string Decompile(const Device& device, const VideoCommon::Shader::ShaderIR& ir, - ProgramType stage, const std::string& suffix); + Tegra::Engines::ShaderType stage, const std::string& suffix); } // namespace OpenGL::GLShader diff --git a/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp b/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp index 184a565e6..cf874a09a 100644 --- a/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp @@ -3,6 +3,7 @@ // Refer to the license.txt file included. #include <cstring> + #include <fmt/format.h> #include "common/assert.h" @@ -12,50 +13,50 @@ #include "common/logging/log.h" #include "common/scm_rev.h" #include "common/zstd_compression.h" - #include "core/core.h" #include "core/hle/kernel/process.h" #include "core/settings.h" - +#include "video_core/engines/shader_type.h" #include "video_core/renderer_opengl/gl_shader_cache.h" #include "video_core/renderer_opengl/gl_shader_disk_cache.h" namespace OpenGL { +using Tegra::Engines::ShaderType; using VideoCommon::Shader::BindlessSamplerMap; using VideoCommon::Shader::BoundSamplerMap; using VideoCommon::Shader::KeyMap; namespace { +using ShaderCacheVersionHash = std::array<u8, 64>; + +enum class TransferableEntryKind : u32 { + Raw, + Usage, +}; + struct ConstBufferKey { - u32 cbuf; - u32 offset; - u32 value; + u32 cbuf{}; + u32 offset{}; + u32 value{}; }; struct BoundSamplerKey { - u32 offset; - Tegra::Engines::SamplerDescriptor sampler; + u32 offset{}; + Tegra::Engines::SamplerDescriptor sampler{}; }; struct BindlessSamplerKey { - u32 cbuf; - u32 offset; - Tegra::Engines::SamplerDescriptor sampler; -}; - -using ShaderCacheVersionHash = std::array<u8, 64>; - -enum class TransferableEntryKind : u32 { - Raw, - Usage, + u32 cbuf{}; + u32 offset{}; + Tegra::Engines::SamplerDescriptor sampler{}; }; -constexpr u32 NativeVersion = 5; +constexpr u32 NativeVersion = 11; // Making sure sizes doesn't change by accident -static_assert(sizeof(BaseBindings) == 16); +static_assert(sizeof(ProgramVariant) == 20); ShaderCacheVersionHash GetShaderCacheVersionHash() { ShaderCacheVersionHash hash{}; @@ -66,10 +67,10 @@ ShaderCacheVersionHash GetShaderCacheVersionHash() { } // Anonymous namespace -ShaderDiskCacheRaw::ShaderDiskCacheRaw(u64 unique_identifier, ProgramType program_type, - ProgramCode program_code, ProgramCode program_code_b) - : unique_identifier{unique_identifier}, program_type{program_type}, - program_code{std::move(program_code)}, program_code_b{std::move(program_code_b)} {} +ShaderDiskCacheRaw::ShaderDiskCacheRaw(u64 unique_identifier, ShaderType type, ProgramCode code, + ProgramCode code_b) + : unique_identifier{unique_identifier}, type{type}, code{std::move(code)}, code_b{std::move( + code_b)} {} ShaderDiskCacheRaw::ShaderDiskCacheRaw() = default; @@ -77,42 +78,39 @@ ShaderDiskCacheRaw::~ShaderDiskCacheRaw() = default; bool ShaderDiskCacheRaw::Load(FileUtil::IOFile& file) { if (file.ReadBytes(&unique_identifier, sizeof(u64)) != sizeof(u64) || - file.ReadBytes(&program_type, sizeof(u32)) != sizeof(u32)) { + file.ReadBytes(&type, sizeof(u32)) != sizeof(u32)) { return false; } - u32 program_code_size{}; - u32 program_code_size_b{}; - if (file.ReadBytes(&program_code_size, sizeof(u32)) != sizeof(u32) || - file.ReadBytes(&program_code_size_b, sizeof(u32)) != sizeof(u32)) { + u32 code_size{}; + u32 code_size_b{}; + if (file.ReadBytes(&code_size, sizeof(u32)) != sizeof(u32) || + file.ReadBytes(&code_size_b, sizeof(u32)) != sizeof(u32)) { return false; } - program_code.resize(program_code_size); - program_code_b.resize(program_code_size_b); + code.resize(code_size); + code_b.resize(code_size_b); - if (file.ReadArray(program_code.data(), program_code_size) != program_code_size) + if (file.ReadArray(code.data(), code_size) != code_size) return false; - if (HasProgramA() && - file.ReadArray(program_code_b.data(), program_code_size_b) != program_code_size_b) { + if (HasProgramA() && file.ReadArray(code_b.data(), code_size_b) != code_size_b) { return false; } return true; } bool ShaderDiskCacheRaw::Save(FileUtil::IOFile& file) const { - if (file.WriteObject(unique_identifier) != 1 || - file.WriteObject(static_cast<u32>(program_type)) != 1 || - file.WriteObject(static_cast<u32>(program_code.size())) != 1 || - file.WriteObject(static_cast<u32>(program_code_b.size())) != 1) { + if (file.WriteObject(unique_identifier) != 1 || file.WriteObject(static_cast<u32>(type)) != 1 || + file.WriteObject(static_cast<u32>(code.size())) != 1 || + file.WriteObject(static_cast<u32>(code_b.size())) != 1) { return false; } - if (file.WriteArray(program_code.data(), program_code.size()) != program_code.size()) + if (file.WriteArray(code.data(), code.size()) != code.size()) return false; - if (HasProgramA() && - file.WriteArray(program_code_b.data(), program_code_b.size()) != program_code_b.size()) { + if (HasProgramA() && file.WriteArray(code_b.data(), code_b.size()) != code_b.size()) { return false; } return true; diff --git a/src/video_core/renderer_opengl/gl_shader_disk_cache.h b/src/video_core/renderer_opengl/gl_shader_disk_cache.h index db23ada93..69a2fbdda 100644 --- a/src/video_core/renderer_opengl/gl_shader_disk_cache.h +++ b/src/video_core/renderer_opengl/gl_shader_disk_cache.h @@ -4,7 +4,6 @@ #pragma once -#include <bitset> #include <optional> #include <string> #include <tuple> @@ -19,6 +18,7 @@ #include "common/assert.h" #include "common/common_types.h" #include "core/file_sys/vfs_vector.h" +#include "video_core/engines/shader_type.h" #include "video_core/renderer_opengl/gl_shader_gen.h" #include "video_core/shader/const_buffer_locker.h" @@ -37,42 +37,42 @@ struct ShaderDiskCacheDump; using ProgramCode = std::vector<u64>; using ShaderDumpsMap = std::unordered_map<ShaderDiskCacheUsage, ShaderDiskCacheDump>; -using TextureBufferUsage = std::bitset<64>; - -/// Allocated bindings used by an OpenGL shader program -struct BaseBindings { - u32 cbuf{}; - u32 gmem{}; - u32 sampler{}; - u32 image{}; - - bool operator==(const BaseBindings& rhs) const { - return std::tie(cbuf, gmem, sampler, image) == - std::tie(rhs.cbuf, rhs.gmem, rhs.sampler, rhs.image); - } - bool operator!=(const BaseBindings& rhs) const { - return !operator==(rhs); - } -}; -static_assert(std::is_trivially_copyable_v<BaseBindings>); +/// Describes the different variants a program can be compiled with. +struct ProgramVariant final { + ProgramVariant() = default; + + /// Graphics constructor. + explicit constexpr ProgramVariant(GLenum primitive_mode) noexcept + : primitive_mode{primitive_mode} {} + + /// Compute constructor. + explicit constexpr ProgramVariant(u32 block_x, u32 block_y, u32 block_z, u32 shared_memory_size, + u32 local_memory_size) noexcept + : block_x{block_x}, block_y{static_cast<u16>(block_y)}, block_z{static_cast<u16>(block_z)}, + shared_memory_size{shared_memory_size}, local_memory_size{local_memory_size} {} -/// Describes the different variants a single program can be compiled. -struct ProgramVariant { - BaseBindings base_bindings; + // Graphics specific parameters. GLenum primitive_mode{}; - TextureBufferUsage texture_buffer_usage{}; - bool operator==(const ProgramVariant& rhs) const { - return std::tie(base_bindings, primitive_mode, texture_buffer_usage) == - std::tie(rhs.base_bindings, rhs.primitive_mode, rhs.texture_buffer_usage); + // Compute specific parameters. + u32 block_x{}; + u16 block_y{}; + u16 block_z{}; + u32 shared_memory_size{}; + u32 local_memory_size{}; + + bool operator==(const ProgramVariant& rhs) const noexcept { + return std::tie(primitive_mode, block_x, block_y, block_z, shared_memory_size, + local_memory_size) == std::tie(rhs.primitive_mode, rhs.block_x, rhs.block_y, + rhs.block_z, rhs.shared_memory_size, + rhs.local_memory_size); } - bool operator!=(const ProgramVariant& rhs) const { + bool operator!=(const ProgramVariant& rhs) const noexcept { return !operator==(rhs); } }; - static_assert(std::is_trivially_copyable_v<ProgramVariant>); /// Describes how a shader is used. @@ -99,21 +99,14 @@ struct ShaderDiskCacheUsage { namespace std { template <> -struct hash<OpenGL::BaseBindings> { - std::size_t operator()(const OpenGL::BaseBindings& bindings) const noexcept { - return static_cast<std::size_t>(bindings.cbuf) ^ - (static_cast<std::size_t>(bindings.gmem) << 8) ^ - (static_cast<std::size_t>(bindings.sampler) << 16) ^ - (static_cast<std::size_t>(bindings.image) << 24); - } -}; - -template <> struct hash<OpenGL::ProgramVariant> { std::size_t operator()(const OpenGL::ProgramVariant& variant) const noexcept { - return std::hash<OpenGL::BaseBindings>()(variant.base_bindings) ^ - std::hash<OpenGL::TextureBufferUsage>()(variant.texture_buffer_usage) ^ - (static_cast<std::size_t>(variant.primitive_mode) << 6); + return (static_cast<std::size_t>(variant.primitive_mode) << 6) ^ + static_cast<std::size_t>(variant.block_x) ^ + (static_cast<std::size_t>(variant.block_y) << 32) ^ + (static_cast<std::size_t>(variant.block_z) << 48) ^ + (static_cast<std::size_t>(variant.shared_memory_size) << 16) ^ + (static_cast<std::size_t>(variant.local_memory_size) << 36); } }; @@ -121,7 +114,7 @@ template <> struct hash<OpenGL::ShaderDiskCacheUsage> { std::size_t operator()(const OpenGL::ShaderDiskCacheUsage& usage) const noexcept { return static_cast<std::size_t>(usage.unique_identifier) ^ - std::hash<OpenGL::ProgramVariant>()(usage.variant); + std::hash<OpenGL::ProgramVariant>{}(usage.variant); } }; @@ -132,8 +125,8 @@ namespace OpenGL { /// Describes a shader how it's used by the guest GPU class ShaderDiskCacheRaw { public: - explicit ShaderDiskCacheRaw(u64 unique_identifier, ProgramType program_type, - ProgramCode program_code, ProgramCode program_code_b = {}); + explicit ShaderDiskCacheRaw(u64 unique_identifier, Tegra::Engines::ShaderType type, + ProgramCode code, ProgramCode code_b = {}); ShaderDiskCacheRaw(); ~ShaderDiskCacheRaw(); @@ -146,27 +139,26 @@ public: } bool HasProgramA() const { - return program_type == ProgramType::VertexA; + return !code.empty() && !code_b.empty(); } - ProgramType GetProgramType() const { - return program_type; + Tegra::Engines::ShaderType GetType() const { + return type; } - const ProgramCode& GetProgramCode() const { - return program_code; + const ProgramCode& GetCode() const { + return code; } - const ProgramCode& GetProgramCodeB() const { - return program_code_b; + const ProgramCode& GetCodeB() const { + return code_b; } private: u64 unique_identifier{}; - ProgramType program_type{}; - - ProgramCode program_code; - ProgramCode program_code_b; + Tegra::Engines::ShaderType type{}; + ProgramCode code; + ProgramCode code_b; }; /// Contains an OpenGL dumped binary program diff --git a/src/video_core/renderer_opengl/gl_shader_gen.cpp b/src/video_core/renderer_opengl/gl_shader_gen.cpp index 0e22eede9..34946fb47 100644 --- a/src/video_core/renderer_opengl/gl_shader_gen.cpp +++ b/src/video_core/renderer_opengl/gl_shader_gen.cpp @@ -2,8 +2,13 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include <string> + #include <fmt/format.h> + #include "video_core/engines/maxwell_3d.h" +#include "video_core/engines/shader_type.h" +#include "video_core/renderer_opengl/gl_device.h" #include "video_core/renderer_opengl/gl_shader_decompiler.h" #include "video_core/renderer_opengl/gl_shader_gen.h" #include "video_core/shader/shader_ir.h" @@ -11,6 +16,7 @@ namespace OpenGL::GLShader { using Tegra::Engines::Maxwell3D; +using Tegra::Engines::ShaderType; using VideoCommon::Shader::CompileDepth; using VideoCommon::Shader::CompilerSettings; using VideoCommon::Shader::ProgramCode; @@ -18,53 +24,40 @@ using VideoCommon::Shader::ShaderIR; std::string GenerateVertexShader(const Device& device, const ShaderIR& ir, const ShaderIR* ir_b) { std::string out = GetCommonDeclarations(); - out += R"( -layout (std140, binding = EMULATION_UBO_BINDING) uniform vs_config { - vec4 viewport_flip; - uvec4 config_pack; // instance_id, flip_stage, y_direction, padding -}; - -)"; - const auto stage = ir_b ? ProgramType::VertexA : ProgramType::VertexB; - out += Decompile(device, ir, stage, "vertex"); + out += fmt::format(R"( +layout (std140, binding = {}) uniform vs_config {{ + float y_direction; +}}; + +)", + EmulationUniformBlockBinding); + out += Decompile(device, ir, ShaderType::Vertex, "vertex"); if (ir_b) { - out += Decompile(device, *ir_b, ProgramType::VertexB, "vertex_b"); + out += Decompile(device, *ir_b, ShaderType::Vertex, "vertex_b"); } out += R"( void main() { + gl_Position = vec4(0.0f, 0.0f, 0.0f, 1.0f); execute_vertex(); )"; - if (ir_b) { out += " execute_vertex_b();"; } - - out += R"( - - // Set Position Y direction - gl_Position.y *= utof(config_pack[2]); - // Check if the flip stage is VertexB - // Config pack's second value is flip_stage - if (config_pack[1] == 1) { - // Viewport can be flipped, which is unsupported by glViewport - gl_Position.xy *= viewport_flip.xy; - } -} -)"; + out += "}\n"; return out; } std::string GenerateGeometryShader(const Device& device, const ShaderIR& ir) { std::string out = GetCommonDeclarations(); - out += R"( -layout (std140, binding = EMULATION_UBO_BINDING) uniform gs_config { - vec4 viewport_flip; - uvec4 config_pack; // instance_id, flip_stage, y_direction, padding -}; + out += fmt::format(R"( +layout (std140, binding = {}) uniform gs_config {{ + float y_direction; +}}; -)"; - out += Decompile(device, ir, ProgramType::Geometry, "geometry"); +)", + EmulationUniformBlockBinding); + out += Decompile(device, ir, ShaderType::Geometry, "geometry"); out += R"( void main() { @@ -76,7 +69,7 @@ void main() { std::string GenerateFragmentShader(const Device& device, const ShaderIR& ir) { std::string out = GetCommonDeclarations(); - out += R"( + out += fmt::format(R"( layout (location = 0) out vec4 FragColor0; layout (location = 1) out vec4 FragColor1; layout (location = 2) out vec4 FragColor2; @@ -86,13 +79,13 @@ layout (location = 5) out vec4 FragColor5; layout (location = 6) out vec4 FragColor6; layout (location = 7) out vec4 FragColor7; -layout (std140, binding = EMULATION_UBO_BINDING) uniform fs_config { - vec4 viewport_flip; - uvec4 config_pack; // instance_id, flip_stage, y_direction, padding -}; +layout (std140, binding = {}) uniform fs_config {{ + float y_direction; +}}; -)"; - out += Decompile(device, ir, ProgramType::Fragment, "fragment"); +)", + EmulationUniformBlockBinding); + out += Decompile(device, ir, ShaderType::Fragment, "fragment"); out += R"( void main() { @@ -104,7 +97,7 @@ void main() { std::string GenerateComputeShader(const Device& device, const ShaderIR& ir) { std::string out = GetCommonDeclarations(); - out += Decompile(device, ir, ProgramType::Compute, "compute"); + out += Decompile(device, ir, ShaderType::Compute, "compute"); out += R"( void main() { execute_compute(); diff --git a/src/video_core/renderer_opengl/gl_shader_manager.cpp b/src/video_core/renderer_opengl/gl_shader_manager.cpp index b05f90f20..75d3fac04 100644 --- a/src/video_core/renderer_opengl/gl_shader_manager.cpp +++ b/src/video_core/renderer_opengl/gl_shader_manager.cpp @@ -40,27 +40,11 @@ void ProgramManager::UpdatePipeline() { old_state = current_state; } -void MaxwellUniformData::SetFromRegs(const Maxwell3D& maxwell, std::size_t shader_stage) { +void MaxwellUniformData::SetFromRegs(const Maxwell3D& maxwell) { const auto& regs = maxwell.regs; - const auto& state = maxwell.state; - - // TODO(bunnei): Support more than one viewport - viewport_flip[0] = regs.viewport_transform[0].scale_x < 0.0 ? -1.0f : 1.0f; - viewport_flip[1] = regs.viewport_transform[0].scale_y < 0.0 ? -1.0f : 1.0f; - - instance_id = state.current_instance; - - // Assign in which stage the position has to be flipped - // (the last stage before the fragment shader). - constexpr u32 geometry_index = static_cast<u32>(Maxwell3D::Regs::ShaderProgram::Geometry); - if (maxwell.regs.shader_config[geometry_index].enable) { - flip_stage = geometry_index; - } else { - flip_stage = static_cast<u32>(Maxwell3D::Regs::ShaderProgram::VertexB); - } // Y_NEGATE controls what value S2R returns for the Y_DIRECTION system value. - y_direction = regs.screen_y_control.y_negate == 0 ? 1.f : -1.f; + y_direction = regs.screen_y_control.y_negate == 0 ? 1.0f : -1.0f; } } // namespace OpenGL::GLShader diff --git a/src/video_core/renderer_opengl/gl_shader_manager.h b/src/video_core/renderer_opengl/gl_shader_manager.h index 6961e702a..478c165ce 100644 --- a/src/video_core/renderer_opengl/gl_shader_manager.h +++ b/src/video_core/renderer_opengl/gl_shader_manager.h @@ -18,17 +18,12 @@ namespace OpenGL::GLShader { /// @note Always keep a vec4 at the end. The GL spec is not clear whether the alignment at /// the end of a uniform block is included in UNIFORM_BLOCK_DATA_SIZE or not. /// Not following that rule will cause problems on some AMD drivers. -struct MaxwellUniformData { - void SetFromRegs(const Tegra::Engines::Maxwell3D& maxwell, std::size_t shader_stage); - - alignas(16) GLvec4 viewport_flip; - struct alignas(16) { - GLuint instance_id; - GLuint flip_stage; - GLfloat y_direction; - }; +struct alignas(16) MaxwellUniformData { + void SetFromRegs(const Tegra::Engines::Maxwell3D& maxwell); + + GLfloat y_direction; }; -static_assert(sizeof(MaxwellUniformData) == 32, "MaxwellUniformData structure size is incorrect"); +static_assert(sizeof(MaxwellUniformData) == 16, "MaxwellUniformData structure size is incorrect"); static_assert(sizeof(MaxwellUniformData) < 16384, "MaxwellUniformData structure must be less than 16kb as per the OpenGL spec"); @@ -55,6 +50,10 @@ public: current_state.geometry_shader = 0; } + void UseTrivialFragmentShader() { + current_state.fragment_shader = 0; + } + private: struct PipelineState { bool operator==(const PipelineState& rhs) const { diff --git a/src/video_core/renderer_opengl/gl_state.cpp b/src/video_core/renderer_opengl/gl_state.cpp index f25148362..df2e2395a 100644 --- a/src/video_core/renderer_opengl/gl_state.cpp +++ b/src/video_core/renderer_opengl/gl_state.cpp @@ -182,6 +182,10 @@ void OpenGLState::ApplyCulling() { } } +void OpenGLState::ApplyRasterizerDiscard() { + Enable(GL_RASTERIZER_DISCARD, cur_state.rasterizer_discard, rasterizer_discard); +} + void OpenGLState::ApplyColorMask() { if (!dirty.color_mask) { return; @@ -410,15 +414,32 @@ void OpenGLState::ApplyAlphaTest() { } } +void OpenGLState::ApplyClipControl() { + if (UpdateTie(std::tie(cur_state.clip_control.origin, cur_state.clip_control.depth_mode), + std::tie(clip_control.origin, clip_control.depth_mode))) { + glClipControl(clip_control.origin, clip_control.depth_mode); + } +} + void OpenGLState::ApplyTextures() { - if (const auto update = UpdateArray(cur_state.textures, textures)) { - glBindTextures(update->first, update->second, textures.data() + update->first); + const std::size_t size = std::size(textures); + for (std::size_t i = 0; i < size; ++i) { + if (UpdateValue(cur_state.textures[i], textures[i])) { + // BindTextureUnit doesn't support binding null textures, skip those binds. + // TODO(Rodrigo): Stop using null textures + if (textures[i] != 0) { + glBindTextureUnit(static_cast<GLuint>(i), textures[i]); + } + } } } void OpenGLState::ApplySamplers() { - if (const auto update = UpdateArray(cur_state.samplers, samplers)) { - glBindSamplers(update->first, update->second, samplers.data() + update->first); + const std::size_t size = std::size(samplers); + for (std::size_t i = 0; i < size; ++i) { + if (UpdateValue(cur_state.samplers[i], samplers[i])) { + glBindSampler(static_cast<GLuint>(i), samplers[i]); + } } } @@ -438,6 +459,7 @@ void OpenGLState::Apply() { ApplyPointSize(); ApplyFragmentColorClamp(); ApplyMultisample(); + ApplyRasterizerDiscard(); ApplyColorMask(); ApplyDepthClamp(); ApplyViewport(); @@ -453,6 +475,7 @@ void OpenGLState::Apply() { ApplyImages(); ApplyPolygonOffset(); ApplyAlphaTest(); + ApplyClipControl(); } void OpenGLState::EmulateViewportWithScissor() { diff --git a/src/video_core/renderer_opengl/gl_state.h b/src/video_core/renderer_opengl/gl_state.h index cca25206b..fb180f302 100644 --- a/src/video_core/renderer_opengl/gl_state.h +++ b/src/video_core/renderer_opengl/gl_state.h @@ -48,6 +48,8 @@ public: GLuint index = 0; } primitive_restart; // GL_PRIMITIVE_RESTART + bool rasterizer_discard = false; // GL_RASTERIZER_DISCARD + struct ColorMask { GLboolean red_enabled = GL_TRUE; GLboolean green_enabled = GL_TRUE; @@ -56,6 +58,7 @@ public: }; std::array<ColorMask, Tegra::Engines::Maxwell3D::Regs::NumRenderTargets> color_mask; // GL_COLOR_WRITEMASK + struct { bool test_enabled = false; // GL_STENCIL_TEST struct { @@ -96,9 +99,11 @@ public: GLenum operation = GL_COPY; } logic_op; - std::array<GLuint, Tegra::Engines::Maxwell3D::Regs::NumTextureSamplers> textures = {}; - std::array<GLuint, Tegra::Engines::Maxwell3D::Regs::NumTextureSamplers> samplers = {}; - std::array<GLuint, Tegra::Engines::Maxwell3D::Regs::NumImages> images = {}; + static constexpr std::size_t NumSamplers = 32 * 5; + static constexpr std::size_t NumImages = 8 * 5; + std::array<GLuint, NumSamplers> textures = {}; + std::array<GLuint, NumSamplers> samplers = {}; + std::array<GLuint, NumImages> images = {}; struct { GLuint read_framebuffer = 0; // GL_READ_FRAMEBUFFER_BINDING @@ -146,6 +151,11 @@ public: std::array<bool, 8> clip_distance = {}; // GL_CLIP_DISTANCE + struct { + GLenum origin = GL_LOWER_LEFT; + GLenum depth_mode = GL_NEGATIVE_ONE_TO_ONE; + } clip_control; + OpenGLState(); /// Get the currently active OpenGL state @@ -167,6 +177,7 @@ public: void ApplyMultisample(); void ApplySRgb(); void ApplyCulling(); + void ApplyRasterizerDiscard(); void ApplyColorMask(); void ApplyDepth(); void ApplyPrimitiveRestart(); @@ -182,6 +193,7 @@ public: void ApplyDepthClamp(); void ApplyPolygonOffset(); void ApplyAlphaTest(); + void ApplyClipControl(); /// Resets any references to the given resource OpenGLState& UnbindTexture(GLuint handle); diff --git a/src/video_core/renderer_opengl/gl_texture_cache.cpp b/src/video_core/renderer_opengl/gl_texture_cache.cpp index 55b3e58b2..b790b0ef4 100644 --- a/src/video_core/renderer_opengl/gl_texture_cache.cpp +++ b/src/video_core/renderer_opengl/gl_texture_cache.cpp @@ -23,7 +23,6 @@ namespace OpenGL { using Tegra::Texture::SwizzleSource; using VideoCore::MortonSwizzleMode; -using VideoCore::Surface::ComponentType; using VideoCore::Surface::PixelFormat; using VideoCore::Surface::SurfaceCompression; using VideoCore::Surface::SurfaceTarget; @@ -40,114 +39,95 @@ struct FormatTuple { GLint internal_format; GLenum format; GLenum type; - ComponentType component_type; bool compressed; }; constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> tex_format_tuples = {{ - {GL_RGBA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, ComponentType::UNorm, false}, // ABGR8U - {GL_RGBA8, GL_RGBA, GL_BYTE, ComponentType::SNorm, false}, // ABGR8S - {GL_RGBA8UI, GL_RGBA_INTEGER, GL_UNSIGNED_BYTE, ComponentType::UInt, false}, // ABGR8UI - {GL_RGB565, GL_RGB, GL_UNSIGNED_SHORT_5_6_5_REV, ComponentType::UNorm, false}, // B5G6R5U - {GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, ComponentType::UNorm, - false}, // A2B10G10R10U - {GL_RGB5_A1, GL_RGBA, GL_UNSIGNED_SHORT_1_5_5_5_REV, ComponentType::UNorm, false}, // A1B5G5R5U - {GL_R8, GL_RED, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // R8U - {GL_R8UI, GL_RED_INTEGER, GL_UNSIGNED_BYTE, ComponentType::UInt, false}, // R8UI - {GL_RGBA16F, GL_RGBA, GL_HALF_FLOAT, ComponentType::Float, false}, // RGBA16F - {GL_RGBA16, GL_RGBA, GL_UNSIGNED_SHORT, ComponentType::UNorm, false}, // RGBA16U - {GL_RGBA16UI, GL_RGBA_INTEGER, GL_UNSIGNED_SHORT, ComponentType::UInt, false}, // RGBA16UI - {GL_R11F_G11F_B10F, GL_RGB, GL_UNSIGNED_INT_10F_11F_11F_REV, ComponentType::Float, - false}, // R11FG11FB10F - {GL_RGBA32UI, GL_RGBA_INTEGER, GL_UNSIGNED_INT, ComponentType::UInt, false}, // RGBA32UI - {GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm, - true}, // DXT1 - {GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm, - true}, // DXT23 - {GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm, - true}, // DXT45 - {GL_COMPRESSED_RED_RGTC1, GL_RED, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm, true}, // DXN1 - {GL_COMPRESSED_RG_RGTC2, GL_RG, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm, - true}, // DXN2UNORM - {GL_COMPRESSED_SIGNED_RG_RGTC2, GL_RG, GL_INT, ComponentType::SNorm, true}, // DXN2SNORM - {GL_COMPRESSED_RGBA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm, - true}, // BC7U - {GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, ComponentType::Float, - true}, // BC6H_UF16 - {GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, ComponentType::Float, - true}, // BC6H_SF16 - {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_4X4 - {GL_RGBA8, GL_BGRA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // BGRA8 - {GL_RGBA32F, GL_RGBA, GL_FLOAT, ComponentType::Float, false}, // RGBA32F - {GL_RG32F, GL_RG, GL_FLOAT, ComponentType::Float, false}, // RG32F - {GL_R32F, GL_RED, GL_FLOAT, ComponentType::Float, false}, // R32F - {GL_R16F, GL_RED, GL_HALF_FLOAT, ComponentType::Float, false}, // R16F - {GL_R16, GL_RED, GL_UNSIGNED_SHORT, ComponentType::UNorm, false}, // R16U - {GL_R16_SNORM, GL_RED, GL_SHORT, ComponentType::SNorm, false}, // R16S - {GL_R16UI, GL_RED_INTEGER, GL_UNSIGNED_SHORT, ComponentType::UInt, false}, // R16UI - {GL_R16I, GL_RED_INTEGER, GL_SHORT, ComponentType::SInt, false}, // R16I - {GL_RG16, GL_RG, GL_UNSIGNED_SHORT, ComponentType::UNorm, false}, // RG16 - {GL_RG16F, GL_RG, GL_HALF_FLOAT, ComponentType::Float, false}, // RG16F - {GL_RG16UI, GL_RG_INTEGER, GL_UNSIGNED_SHORT, ComponentType::UInt, false}, // RG16UI - {GL_RG16I, GL_RG_INTEGER, GL_SHORT, ComponentType::SInt, false}, // RG16I - {GL_RG16_SNORM, GL_RG, GL_SHORT, ComponentType::SNorm, false}, // RG16S - {GL_RGB32F, GL_RGB, GL_FLOAT, ComponentType::Float, false}, // RGB32F - {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, ComponentType::UNorm, - false}, // RGBA8_SRGB - {GL_RG8, GL_RG, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // RG8U - {GL_RG8, GL_RG, GL_BYTE, ComponentType::SNorm, false}, // RG8S - {GL_RG32UI, GL_RG_INTEGER, GL_UNSIGNED_INT, ComponentType::UInt, false}, // RG32UI - {GL_RGB16F, GL_RGBA16, GL_HALF_FLOAT, ComponentType::Float, false}, // RGBX16F - {GL_R32UI, GL_RED_INTEGER, GL_UNSIGNED_INT, ComponentType::UInt, false}, // R32UI - {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X8 - {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X5 - {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_5X4 - {GL_SRGB8_ALPHA8, GL_BGRA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // BGRA8 + {GL_RGBA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, false}, // ABGR8U + {GL_RGBA8, GL_RGBA, GL_BYTE, false}, // ABGR8S + {GL_RGBA8UI, GL_RGBA_INTEGER, GL_UNSIGNED_BYTE, false}, // ABGR8UI + {GL_RGB565, GL_RGB, GL_UNSIGNED_SHORT_5_6_5_REV, false}, // B5G6R5U + {GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, false}, // A2B10G10R10U + {GL_RGB5_A1, GL_RGBA, GL_UNSIGNED_SHORT_1_5_5_5_REV, false}, // A1B5G5R5U + {GL_R8, GL_RED, GL_UNSIGNED_BYTE, false}, // R8U + {GL_R8UI, GL_RED_INTEGER, GL_UNSIGNED_BYTE, false}, // R8UI + {GL_RGBA16F, GL_RGBA, GL_HALF_FLOAT, false}, // RGBA16F + {GL_RGBA16, GL_RGBA, GL_UNSIGNED_SHORT, false}, // RGBA16U + {GL_RGBA16UI, GL_RGBA_INTEGER, GL_UNSIGNED_SHORT, false}, // RGBA16UI + {GL_R11F_G11F_B10F, GL_RGB, GL_UNSIGNED_INT_10F_11F_11F_REV, false}, // R11FG11FB10F + {GL_RGBA32UI, GL_RGBA_INTEGER, GL_UNSIGNED_INT, false}, // RGBA32UI + {GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT1 + {GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT23 + {GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT45 + {GL_COMPRESSED_RED_RGTC1, GL_RED, GL_UNSIGNED_INT_8_8_8_8, true}, // DXN1 + {GL_COMPRESSED_RG_RGTC2, GL_RG, GL_UNSIGNED_INT_8_8_8_8, true}, // DXN2UNORM + {GL_COMPRESSED_SIGNED_RG_RGTC2, GL_RG, GL_INT, true}, // DXN2SNORM + {GL_COMPRESSED_RGBA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // BC7U + {GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, true}, // BC6H_UF16 + {GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, true}, // BC6H_SF16 + {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_4X4 + {GL_RGBA8, GL_BGRA, GL_UNSIGNED_BYTE, false}, // BGRA8 + {GL_RGBA32F, GL_RGBA, GL_FLOAT, false}, // RGBA32F + {GL_RG32F, GL_RG, GL_FLOAT, false}, // RG32F + {GL_R32F, GL_RED, GL_FLOAT, false}, // R32F + {GL_R16F, GL_RED, GL_HALF_FLOAT, false}, // R16F + {GL_R16, GL_RED, GL_UNSIGNED_SHORT, false}, // R16U + {GL_R16_SNORM, GL_RED, GL_SHORT, false}, // R16S + {GL_R16UI, GL_RED_INTEGER, GL_UNSIGNED_SHORT, false}, // R16UI + {GL_R16I, GL_RED_INTEGER, GL_SHORT, false}, // R16I + {GL_RG16, GL_RG, GL_UNSIGNED_SHORT, false}, // RG16 + {GL_RG16F, GL_RG, GL_HALF_FLOAT, false}, // RG16F + {GL_RG16UI, GL_RG_INTEGER, GL_UNSIGNED_SHORT, false}, // RG16UI + {GL_RG16I, GL_RG_INTEGER, GL_SHORT, false}, // RG16I + {GL_RG16_SNORM, GL_RG, GL_SHORT, false}, // RG16S + {GL_RGB32F, GL_RGB, GL_FLOAT, false}, // RGB32F + {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, false}, // RGBA8_SRGB + {GL_RG8, GL_RG, GL_UNSIGNED_BYTE, false}, // RG8U + {GL_RG8, GL_RG, GL_BYTE, false}, // RG8S + {GL_RG32UI, GL_RG_INTEGER, GL_UNSIGNED_INT, false}, // RG32UI + {GL_RGB16F, GL_RGBA16, GL_HALF_FLOAT, false}, // RGBX16F + {GL_R32UI, GL_RED_INTEGER, GL_UNSIGNED_INT, false}, // R32UI + {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_8X8 + {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_8X5 + {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_5X4 + {GL_SRGB8_ALPHA8, GL_BGRA, GL_UNSIGNED_BYTE, false}, // BGRA8 // Compressed sRGB formats - {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm, - true}, // DXT1_SRGB - {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm, - true}, // DXT23_SRGB - {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm, - true}, // DXT45_SRGB - {GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm, - true}, // BC7U_SRGB - {GL_RGBA4, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4_REV, ComponentType::UNorm, false}, // R4G4B4A4U - {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_4X4_SRGB - {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X8_SRGB - {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X5_SRGB - {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_5X4_SRGB - {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_5X5 - {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_5X5_SRGB - {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_10X8 - {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_10X8_SRGB - {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_6X6 - {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_6X6_SRGB - {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_10X10 - {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_10X10_SRGB - {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_12X12 - {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_12X12_SRGB - {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X6 - {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X6_SRGB - {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_6X5 - {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_6X5_SRGB - {GL_RGB9_E5, GL_RGB, GL_UNSIGNED_INT_5_9_9_9_REV, ComponentType::Float, false}, // E5B9G9R9F + {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT1_SRGB + {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT23_SRGB + {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT45_SRGB + {GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // BC7U_SRGB + {GL_RGBA4, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4_REV, false}, // R4G4B4A4U + {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_4X4_SRGB + {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_8X8_SRGB + {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_8X5_SRGB + {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_5X4_SRGB + {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_5X5 + {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_5X5_SRGB + {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_10X8 + {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_10X8_SRGB + {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_6X6 + {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_6X6_SRGB + {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_10X10 + {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_10X10_SRGB + {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_12X12 + {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_12X12_SRGB + {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_8X6 + {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_8X6_SRGB + {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_6X5 + {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_6X5_SRGB + {GL_RGB9_E5, GL_RGB, GL_UNSIGNED_INT_5_9_9_9_REV, false}, // E5B9G9R9F // Depth formats - {GL_DEPTH_COMPONENT32F, GL_DEPTH_COMPONENT, GL_FLOAT, ComponentType::Float, false}, // Z32F - {GL_DEPTH_COMPONENT16, GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT, ComponentType::UNorm, - false}, // Z16 + {GL_DEPTH_COMPONENT32F, GL_DEPTH_COMPONENT, GL_FLOAT, false}, // Z32F + {GL_DEPTH_COMPONENT16, GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT, false}, // Z16 // DepthStencil formats - {GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8, ComponentType::UNorm, - false}, // Z24S8 - {GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8, ComponentType::UNorm, - false}, // S8Z24 - {GL_DEPTH32F_STENCIL8, GL_DEPTH_STENCIL, GL_FLOAT_32_UNSIGNED_INT_24_8_REV, - ComponentType::Float, false}, // Z32FS8 + {GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8, false}, // Z24S8 + {GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8, false}, // S8Z24 + {GL_DEPTH32F_STENCIL8, GL_DEPTH_STENCIL, GL_FLOAT_32_UNSIGNED_INT_24_8_REV, false}, // Z32FS8 }}; -const FormatTuple& GetFormatTuple(PixelFormat pixel_format, ComponentType component_type) { +const FormatTuple& GetFormatTuple(PixelFormat pixel_format) { ASSERT(static_cast<std::size_t>(pixel_format) < tex_format_tuples.size()); const auto& format{tex_format_tuples[static_cast<std::size_t>(pixel_format)]}; return format; @@ -249,7 +229,7 @@ OGLTexture CreateTexture(const SurfaceParams& params, GLenum target, GLenum inte CachedSurface::CachedSurface(const GPUVAddr gpu_addr, const SurfaceParams& params) : VideoCommon::SurfaceBase<View>(gpu_addr, params) { - const auto& tuple{GetFormatTuple(params.pixel_format, params.component_type)}; + const auto& tuple{GetFormatTuple(params.pixel_format)}; internal_format = tuple.internal_format; format = tuple.format; type = tuple.type; @@ -451,8 +431,7 @@ OGLTextureView CachedSurfaceView::CreateTextureView() const { texture_view.Create(); const GLuint handle{texture_view.handle}; - const FormatTuple& tuple{ - GetFormatTuple(owner_params.pixel_format, owner_params.component_type)}; + const FormatTuple& tuple{GetFormatTuple(owner_params.pixel_format)}; glTextureView(handle, target, surface.texture.handle, tuple.internal_format, params.base_level, params.num_levels, params.base_layer, params.num_layers); @@ -509,6 +488,7 @@ void TextureCacheOpenGL::ImageBlit(View& src_view, View& dst_view, OpenGLState state; state.draw.read_framebuffer = src_framebuffer.handle; state.draw.draw_framebuffer = dst_framebuffer.handle; + state.framebuffer_srgb.enabled = dst_params.srgb_conversion; state.AllDirty(); state.Apply(); @@ -562,8 +542,8 @@ void TextureCacheOpenGL::BufferCopy(Surface& src_surface, Surface& dst_surface) const auto& dst_params = dst_surface->GetSurfaceParams(); UNIMPLEMENTED_IF(src_params.num_levels > 1 || dst_params.num_levels > 1); - const auto source_format = GetFormatTuple(src_params.pixel_format, src_params.component_type); - const auto dest_format = GetFormatTuple(dst_params.pixel_format, dst_params.component_type); + const auto source_format = GetFormatTuple(src_params.pixel_format); + const auto dest_format = GetFormatTuple(dst_params.pixel_format); const std::size_t source_size = src_surface->GetHostSizeInBytes(); const std::size_t dest_size = dst_surface->GetHostSizeInBytes(); diff --git a/src/video_core/renderer_opengl/maxwell_to_gl.h b/src/video_core/renderer_opengl/maxwell_to_gl.h index 9ed738171..ea4f35663 100644 --- a/src/video_core/renderer_opengl/maxwell_to_gl.h +++ b/src/video_core/renderer_opengl/maxwell_to_gl.h @@ -120,6 +120,8 @@ inline GLenum PrimitiveTopology(Maxwell::PrimitiveTopology topology) { return GL_POINTS; case Maxwell::PrimitiveTopology::Lines: return GL_LINES; + case Maxwell::PrimitiveTopology::LineLoop: + return GL_LINE_LOOP; case Maxwell::PrimitiveTopology::LineStrip: return GL_LINE_STRIP; case Maxwell::PrimitiveTopology::Triangles: @@ -130,11 +132,23 @@ inline GLenum PrimitiveTopology(Maxwell::PrimitiveTopology topology) { return GL_TRIANGLE_FAN; case Maxwell::PrimitiveTopology::Quads: return GL_QUADS; - default: - LOG_CRITICAL(Render_OpenGL, "Unimplemented topology={}", static_cast<u32>(topology)); - UNREACHABLE(); - return {}; + case Maxwell::PrimitiveTopology::QuadStrip: + return GL_QUAD_STRIP; + case Maxwell::PrimitiveTopology::Polygon: + return GL_POLYGON; + case Maxwell::PrimitiveTopology::LinesAdjacency: + return GL_LINES_ADJACENCY; + case Maxwell::PrimitiveTopology::LineStripAdjacency: + return GL_LINE_STRIP_ADJACENCY; + case Maxwell::PrimitiveTopology::TrianglesAdjacency: + return GL_TRIANGLES_ADJACENCY; + case Maxwell::PrimitiveTopology::TriangleStripAdjacency: + return GL_TRIANGLE_STRIP_ADJACENCY; + case Maxwell::PrimitiveTopology::Patches: + return GL_PATCHES; } + UNREACHABLE_MSG("Invalid topology={}", static_cast<int>(topology)); + return GL_POINTS; } inline GLenum TextureFilterMode(Tegra::Texture::TextureFilter filter_mode, diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp index 4bbd17b12..bba16afaf 100644 --- a/src/video_core/renderer_opengl/renderer_opengl.cpp +++ b/src/video_core/renderer_opengl/renderer_opengl.cpp @@ -24,19 +24,21 @@ namespace OpenGL { -static const char vertex_shader[] = R"( -#version 150 core +namespace { -in vec2 vert_position; -in vec2 vert_tex_coord; -out vec2 frag_tex_coord; +constexpr char vertex_shader[] = R"( +#version 430 core + +layout (location = 0) in vec2 vert_position; +layout (location = 1) in vec2 vert_tex_coord; +layout (location = 0) out vec2 frag_tex_coord; // This is a truncated 3x3 matrix for 2D transformations: // The upper-left 2x2 submatrix performs scaling/rotation/mirroring. // The third column performs translation. // The third row could be used for projection, which we don't need in 2D. It hence is assumed to // implicitly be [0, 0, 1] -uniform mat3x2 modelview_matrix; +layout (location = 0) uniform mat3x2 modelview_matrix; void main() { // Multiply input position by the rotscale part of the matrix and then manually translate by @@ -47,34 +49,29 @@ void main() { } )"; -static const char fragment_shader[] = R"( -#version 150 core +constexpr char fragment_shader[] = R"( +#version 430 core -in vec2 frag_tex_coord; -out vec4 color; +layout (location = 0) in vec2 frag_tex_coord; +layout (location = 0) out vec4 color; -uniform sampler2D color_texture; +layout (binding = 0) uniform sampler2D color_texture; void main() { - // Swap RGBA -> ABGR so we don't have to do this on the CPU. This needs to change if we have to - // support more framebuffer pixel formats. color = texture(color_texture, frag_tex_coord); } )"; -/** - * Vertex structure that the drawn screen rectangles are composed of. - */ +constexpr GLint PositionLocation = 0; +constexpr GLint TexCoordLocation = 1; +constexpr GLint ModelViewMatrixLocation = 0; + struct ScreenRectVertex { - ScreenRectVertex(GLfloat x, GLfloat y, GLfloat u, GLfloat v) { - position[0] = x; - position[1] = y; - tex_coord[0] = u; - tex_coord[1] = v; - } + constexpr ScreenRectVertex(GLfloat x, GLfloat y, GLfloat u, GLfloat v) + : position{{x, y}}, tex_coord{{u, v}} {} - GLfloat position[2]; - GLfloat tex_coord[2]; + std::array<GLfloat, 2> position; + std::array<GLfloat, 2> tex_coord; }; /** @@ -84,18 +81,82 @@ struct ScreenRectVertex { * The projection part of the matrix is trivial, hence these operations are represented * by a 3x2 matrix. */ -static std::array<GLfloat, 3 * 2> MakeOrthographicMatrix(const float width, const float height) { +std::array<GLfloat, 3 * 2> MakeOrthographicMatrix(float width, float height) { std::array<GLfloat, 3 * 2> matrix; // Laid out in column-major order // clang-format off - matrix[0] = 2.f / width; matrix[2] = 0.f; matrix[4] = -1.f; - matrix[1] = 0.f; matrix[3] = -2.f / height; matrix[5] = 1.f; + matrix[0] = 2.f / width; matrix[2] = 0.f; matrix[4] = -1.f; + matrix[1] = 0.f; matrix[3] = -2.f / height; matrix[5] = 1.f; // Last matrix row is implicitly assumed to be [0, 0, 1]. // clang-format on return matrix; } +const char* GetSource(GLenum source) { + switch (source) { + case GL_DEBUG_SOURCE_API: + return "API"; + case GL_DEBUG_SOURCE_WINDOW_SYSTEM: + return "WINDOW_SYSTEM"; + case GL_DEBUG_SOURCE_SHADER_COMPILER: + return "SHADER_COMPILER"; + case GL_DEBUG_SOURCE_THIRD_PARTY: + return "THIRD_PARTY"; + case GL_DEBUG_SOURCE_APPLICATION: + return "APPLICATION"; + case GL_DEBUG_SOURCE_OTHER: + return "OTHER"; + default: + UNREACHABLE(); + return "Unknown source"; + } +} + +const char* GetType(GLenum type) { + switch (type) { + case GL_DEBUG_TYPE_ERROR: + return "ERROR"; + case GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR: + return "DEPRECATED_BEHAVIOR"; + case GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR: + return "UNDEFINED_BEHAVIOR"; + case GL_DEBUG_TYPE_PORTABILITY: + return "PORTABILITY"; + case GL_DEBUG_TYPE_PERFORMANCE: + return "PERFORMANCE"; + case GL_DEBUG_TYPE_OTHER: + return "OTHER"; + case GL_DEBUG_TYPE_MARKER: + return "MARKER"; + default: + UNREACHABLE(); + return "Unknown type"; + } +} + +void APIENTRY DebugHandler(GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, + const GLchar* message, const void* user_param) { + const char format[] = "{} {} {}: {}"; + const char* const str_source = GetSource(source); + const char* const str_type = GetType(type); + + switch (severity) { + case GL_DEBUG_SEVERITY_HIGH: + LOG_CRITICAL(Render_OpenGL, format, str_source, str_type, id, message); + break; + case GL_DEBUG_SEVERITY_MEDIUM: + LOG_WARNING(Render_OpenGL, format, str_source, str_type, id, message); + break; + case GL_DEBUG_SEVERITY_NOTIFICATION: + case GL_DEBUG_SEVERITY_LOW: + LOG_DEBUG(Render_OpenGL, format, str_source, str_type, id, message); + break; + } +} + +} // Anonymous namespace + RendererOpenGL::RendererOpenGL(Core::Frontend::EmuWindow& emu_window, Core::System& system) : VideoCore::RendererBase{emu_window}, emu_window{emu_window}, system{system} {} @@ -138,9 +199,6 @@ void RendererOpenGL::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { prev_state.Apply(); } -/** - * Loads framebuffer from emulated memory into the active OpenGL texture. - */ void RendererOpenGL::LoadFBToScreenInfo(const Tegra::FramebufferConfig& framebuffer) { // Framebuffer orientation handling framebuffer_transform_flags = framebuffer.transform_flags; @@ -158,7 +216,7 @@ void RendererOpenGL::LoadFBToScreenInfo(const Tegra::FramebufferConfig& framebuf VideoCore::Surface::PixelFormatFromGPUPixelFormat(framebuffer.pixel_format)}; const u32 bytes_per_pixel{VideoCore::Surface::GetBytesPerPixel(pixel_format)}; const u64 size_in_bytes{framebuffer.stride * framebuffer.height * bytes_per_pixel}; - const auto host_ptr{Memory::GetPointer(framebuffer_addr)}; + u8* const host_ptr{system.Memory().GetPointer(framebuffer_addr)}; rasterizer->FlushRegion(ToCacheAddr(host_ptr), size_in_bytes); // TODO(Rodrigo): Read this from HLE @@ -181,19 +239,12 @@ void RendererOpenGL::LoadFBToScreenInfo(const Tegra::FramebufferConfig& framebuf glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); } -/** - * Fills active OpenGL texture with the given RGB color. Since the color is solid, the texture can - * be 1x1 but will stretch across whatever it's rendered on. - */ void RendererOpenGL::LoadColorToActiveGLTexture(u8 color_r, u8 color_g, u8 color_b, u8 color_a, const TextureInfo& texture) { const u8 framebuffer_data[4] = {color_a, color_b, color_g, color_r}; glClearTexImage(texture.resource.handle, 0, GL_RGBA, GL_UNSIGNED_BYTE, framebuffer_data); } -/** - * Initializes the OpenGL state and creates persistent objects. - */ void RendererOpenGL::InitOpenGLObjects() { glClearColor(Settings::values.bg_red, Settings::values.bg_green, Settings::values.bg_blue, 0.0f); @@ -203,10 +254,6 @@ void RendererOpenGL::InitOpenGLObjects() { state.draw.shader_program = shader.handle; state.AllDirty(); state.Apply(); - uniform_modelview_matrix = glGetUniformLocation(shader.handle, "modelview_matrix"); - uniform_color_texture = glGetUniformLocation(shader.handle, "color_texture"); - attrib_position = glGetAttribLocation(shader.handle, "vert_position"); - attrib_tex_coord = glGetAttribLocation(shader.handle, "vert_tex_coord"); // Generate VBO handle for drawing vertex_buffer.Create(); @@ -217,14 +264,14 @@ void RendererOpenGL::InitOpenGLObjects() { // Attach vertex data to VAO glNamedBufferData(vertex_buffer.handle, sizeof(ScreenRectVertex) * 4, nullptr, GL_STREAM_DRAW); - glVertexArrayAttribFormat(vertex_array.handle, attrib_position, 2, GL_FLOAT, GL_FALSE, + glVertexArrayAttribFormat(vertex_array.handle, PositionLocation, 2, GL_FLOAT, GL_FALSE, offsetof(ScreenRectVertex, position)); - glVertexArrayAttribFormat(vertex_array.handle, attrib_tex_coord, 2, GL_FLOAT, GL_FALSE, + glVertexArrayAttribFormat(vertex_array.handle, TexCoordLocation, 2, GL_FLOAT, GL_FALSE, offsetof(ScreenRectVertex, tex_coord)); - glVertexArrayAttribBinding(vertex_array.handle, attrib_position, 0); - glVertexArrayAttribBinding(vertex_array.handle, attrib_tex_coord, 0); - glEnableVertexArrayAttrib(vertex_array.handle, attrib_position); - glEnableVertexArrayAttrib(vertex_array.handle, attrib_tex_coord); + glVertexArrayAttribBinding(vertex_array.handle, PositionLocation, 0); + glVertexArrayAttribBinding(vertex_array.handle, TexCoordLocation, 0); + glEnableVertexArrayAttrib(vertex_array.handle, PositionLocation); + glEnableVertexArrayAttrib(vertex_array.handle, TexCoordLocation); glVertexArrayVertexBuffer(vertex_array.handle, 0, vertex_buffer.handle, 0, sizeof(ScreenRectVertex)); @@ -323,24 +370,26 @@ void RendererOpenGL::DrawScreenTriangles(const ScreenInfo& screen_info, float x, // (e.g. handheld mode) on a 1920x1080 framebuffer. f32 scale_u = 1.f, scale_v = 1.f; if (framebuffer_crop_rect.GetWidth() > 0) { - scale_u = static_cast<f32>(framebuffer_crop_rect.GetWidth()) / screen_info.texture.width; + scale_u = static_cast<f32>(framebuffer_crop_rect.GetWidth()) / + static_cast<f32>(screen_info.texture.width); } if (framebuffer_crop_rect.GetHeight() > 0) { - scale_v = static_cast<f32>(framebuffer_crop_rect.GetHeight()) / screen_info.texture.height; + scale_v = static_cast<f32>(framebuffer_crop_rect.GetHeight()) / + static_cast<f32>(screen_info.texture.height); } - std::array<ScreenRectVertex, 4> vertices = {{ + const std::array vertices = { ScreenRectVertex(x, y, texcoords.top * scale_u, left * scale_v), ScreenRectVertex(x + w, y, texcoords.bottom * scale_u, left * scale_v), ScreenRectVertex(x, y + h, texcoords.top * scale_u, right * scale_v), ScreenRectVertex(x + w, y + h, texcoords.bottom * scale_u, right * scale_v), - }}; + }; state.textures[0] = screen_info.display_texture; state.framebuffer_srgb.enabled = screen_info.display_srgb; state.AllDirty(); state.Apply(); - glNamedBufferSubData(vertex_buffer.handle, 0, sizeof(vertices), vertices.data()); + glNamedBufferSubData(vertex_buffer.handle, 0, sizeof(vertices), std::data(vertices)); glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); // Restore default state state.framebuffer_srgb.enabled = false; @@ -349,9 +398,6 @@ void RendererOpenGL::DrawScreenTriangles(const ScreenInfo& screen_info, float x, state.Apply(); } -/** - * Draws the emulated screens to the emulator window. - */ void RendererOpenGL::DrawScreen(const Layout::FramebufferLayout& layout) { if (renderer_settings.set_background_color) { // Update background color before drawing @@ -365,21 +411,17 @@ void RendererOpenGL::DrawScreen(const Layout::FramebufferLayout& layout) { glClear(GL_COLOR_BUFFER_BIT); // Set projection matrix - std::array<GLfloat, 3 * 2> ortho_matrix = - MakeOrthographicMatrix((float)layout.width, (float)layout.height); - glUniformMatrix3x2fv(uniform_modelview_matrix, 1, GL_FALSE, ortho_matrix.data()); - - // Bind texture in Texture Unit 0 - glActiveTexture(GL_TEXTURE0); - glUniform1i(uniform_color_texture, 0); + const std::array ortho_matrix = + MakeOrthographicMatrix(static_cast<float>(layout.width), static_cast<float>(layout.height)); + glUniformMatrix3x2fv(ModelViewMatrixLocation, 1, GL_FALSE, ortho_matrix.data()); - DrawScreenTriangles(screen_info, (float)screen.left, (float)screen.top, - (float)screen.GetWidth(), (float)screen.GetHeight()); + DrawScreenTriangles(screen_info, static_cast<float>(screen.left), + static_cast<float>(screen.top), static_cast<float>(screen.GetWidth()), + static_cast<float>(screen.GetHeight())); m_current_frame++; } -/// Updates the framerate void RendererOpenGL::UpdateFramerate() {} void RendererOpenGL::CaptureScreenshot() { @@ -416,63 +458,6 @@ void RendererOpenGL::CaptureScreenshot() { renderer_settings.screenshot_requested = false; } -static const char* GetSource(GLenum source) { -#define RET(s) \ - case GL_DEBUG_SOURCE_##s: \ - return #s - switch (source) { - RET(API); - RET(WINDOW_SYSTEM); - RET(SHADER_COMPILER); - RET(THIRD_PARTY); - RET(APPLICATION); - RET(OTHER); - default: - UNREACHABLE(); - return "Unknown source"; - } -#undef RET -} - -static const char* GetType(GLenum type) { -#define RET(t) \ - case GL_DEBUG_TYPE_##t: \ - return #t - switch (type) { - RET(ERROR); - RET(DEPRECATED_BEHAVIOR); - RET(UNDEFINED_BEHAVIOR); - RET(PORTABILITY); - RET(PERFORMANCE); - RET(OTHER); - RET(MARKER); - default: - UNREACHABLE(); - return "Unknown type"; - } -#undef RET -} - -static void APIENTRY DebugHandler(GLenum source, GLenum type, GLuint id, GLenum severity, - GLsizei length, const GLchar* message, const void* user_param) { - const char format[] = "{} {} {}: {}"; - const char* const str_source = GetSource(source); - const char* const str_type = GetType(type); - - switch (severity) { - case GL_DEBUG_SEVERITY_HIGH: - LOG_CRITICAL(Render_OpenGL, format, str_source, str_type, id, message); - break; - case GL_DEBUG_SEVERITY_MEDIUM: - LOG_WARNING(Render_OpenGL, format, str_source, str_type, id, message); - break; - case GL_DEBUG_SEVERITY_NOTIFICATION: - case GL_DEBUG_SEVERITY_LOW: - LOG_DEBUG(Render_OpenGL, format, str_source, str_type, id, message); - break; - } -} - bool RendererOpenGL::Init() { Core::Frontend::ScopeAcquireWindowContext acquire_context{render_window}; @@ -493,7 +478,6 @@ bool RendererOpenGL::Init() { return true; } -/// Shutdown the renderer void RendererOpenGL::ShutDown() {} } // namespace OpenGL diff --git a/src/video_core/renderer_opengl/renderer_opengl.h b/src/video_core/renderer_opengl/renderer_opengl.h index cf26628ca..b56328a7f 100644 --- a/src/video_core/renderer_opengl/renderer_opengl.h +++ b/src/video_core/renderer_opengl/renderer_opengl.h @@ -59,21 +59,31 @@ public: void ShutDown() override; private: + /// Initializes the OpenGL state and creates persistent objects. void InitOpenGLObjects(); + void AddTelemetryFields(); + void CreateRasterizer(); void ConfigureFramebufferTexture(TextureInfo& texture, const Tegra::FramebufferConfig& framebuffer); + + /// Draws the emulated screens to the emulator window. void DrawScreen(const Layout::FramebufferLayout& layout); + void DrawScreenTriangles(const ScreenInfo& screen_info, float x, float y, float w, float h); + + /// Updates the framerate. void UpdateFramerate(); void CaptureScreenshot(); - // Loads framebuffer from emulated memory into the display information structure + /// Loads framebuffer from emulated memory into the active OpenGL texture. void LoadFBToScreenInfo(const Tegra::FramebufferConfig& framebuffer); - // Fills active OpenGL texture with the given RGBA color. + + /// Fills active OpenGL texture with the given RGB color.Since the color is solid, the texture + /// can be 1x1 but will stretch across whatever it's rendered on. void LoadColorToActiveGLTexture(u8 color_r, u8 color_g, u8 color_b, u8 color_a, const TextureInfo& texture); @@ -94,14 +104,6 @@ private: /// OpenGL framebuffer data std::vector<u8> gl_framebuffer_data; - // Shader uniform location indices - GLuint uniform_modelview_matrix; - GLuint uniform_color_texture; - - // Shader attribute input indices - GLuint attrib_position; - GLuint attrib_tex_coord; - /// Used for transforming the framebuffer orientation Tegra::FramebufferConfig::TransformFlags framebuffer_transform_flags; Common::Rectangle<int> framebuffer_crop_rect; diff --git a/src/video_core/renderer_opengl/utils.cpp b/src/video_core/renderer_opengl/utils.cpp index c504a2c1a..9770dda1c 100644 --- a/src/video_core/renderer_opengl/utils.cpp +++ b/src/video_core/renderer_opengl/utils.cpp @@ -3,7 +3,10 @@ // Refer to the license.txt file included. #include <string> +#include <vector> + #include <fmt/format.h> + #include <glad/glad.h> #include "common/assert.h" @@ -48,34 +51,19 @@ BindBuffersRangePushBuffer::BindBuffersRangePushBuffer(GLenum target) : target{t BindBuffersRangePushBuffer::~BindBuffersRangePushBuffer() = default; -void BindBuffersRangePushBuffer::Setup(GLuint first_) { - first = first_; - buffer_pointers.clear(); - offsets.clear(); - sizes.clear(); +void BindBuffersRangePushBuffer::Setup() { + entries.clear(); } -void BindBuffersRangePushBuffer::Push(const GLuint* buffer, GLintptr offset, GLsizeiptr size) { - buffer_pointers.push_back(buffer); - offsets.push_back(offset); - sizes.push_back(size); +void BindBuffersRangePushBuffer::Push(GLuint binding, const GLuint* buffer, GLintptr offset, + GLsizeiptr size) { + entries.push_back(Entry{binding, buffer, offset, size}); } void BindBuffersRangePushBuffer::Bind() { - // Ensure sizes are valid. - const std::size_t count{buffer_pointers.size()}; - DEBUG_ASSERT(count == offsets.size() && count == sizes.size()); - if (count == 0) { - return; + for (const Entry& entry : entries) { + glBindBufferRange(target, entry.binding, *entry.buffer, entry.offset, entry.size); } - - // Dereference buffers. - buffers.resize(count); - std::transform(buffer_pointers.begin(), buffer_pointers.end(), buffers.begin(), - [](const GLuint* pointer) { return *pointer; }); - - glBindBuffersRange(target, first, static_cast<GLsizei>(count), buffers.data(), offsets.data(), - sizes.data()); } void LabelGLObject(GLenum identifier, GLuint handle, VAddr addr, std::string_view extra_info) { diff --git a/src/video_core/renderer_opengl/utils.h b/src/video_core/renderer_opengl/utils.h index 6c2b45546..d56153fe7 100644 --- a/src/video_core/renderer_opengl/utils.h +++ b/src/video_core/renderer_opengl/utils.h @@ -43,20 +43,22 @@ public: explicit BindBuffersRangePushBuffer(GLenum target); ~BindBuffersRangePushBuffer(); - void Setup(GLuint first_); + void Setup(); - void Push(const GLuint* buffer, GLintptr offset, GLsizeiptr size); + void Push(GLuint binding, const GLuint* buffer, GLintptr offset, GLsizeiptr size); void Bind(); private: - GLenum target{}; - GLuint first{}; - std::vector<const GLuint*> buffer_pointers; + struct Entry { + GLuint binding; + const GLuint* buffer; + GLintptr offset; + GLsizeiptr size; + }; - std::vector<GLuint> buffers; - std::vector<GLintptr> offsets; - std::vector<GLsizeiptr> sizes; + GLenum target; + std::vector<Entry> entries; }; void LabelGLObject(GLenum identifier, GLuint handle, VAddr addr, std::string_view extra_info = {}); diff --git a/src/video_core/renderer_vulkan/declarations.h b/src/video_core/renderer_vulkan/declarations.h index ba25b5bc7..323bf6b39 100644 --- a/src/video_core/renderer_vulkan/declarations.h +++ b/src/video_core/renderer_vulkan/declarations.h @@ -4,6 +4,17 @@ #pragma once +namespace vk { +class DispatchLoaderDynamic; +} + +namespace Vulkan { +constexpr vk::DispatchLoaderDynamic* dont_use_me_dld = nullptr; +} + +#define VULKAN_HPP_DEFAULT_DISPATCHER (*::Vulkan::dont_use_me_dld) +#define VULKAN_HPP_ENABLE_DYNAMIC_LOADER_TOOL 0 +#define VULKAN_HPP_DISPATCH_LOADER_DYNAMIC 1 #include <vulkan/vulkan.hpp> namespace Vulkan { @@ -41,5 +52,7 @@ using UniqueSemaphore = UniqueHandle<vk::Semaphore>; using UniqueShaderModule = UniqueHandle<vk::ShaderModule>; using UniqueSwapchainKHR = UniqueHandle<vk::SwapchainKHR>; using UniqueValidationCacheEXT = UniqueHandle<vk::ValidationCacheEXT>; +using UniqueDebugReportCallbackEXT = UniqueHandle<vk::DebugReportCallbackEXT>; +using UniqueDebugUtilsMessengerEXT = UniqueHandle<vk::DebugUtilsMessengerEXT>; } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp new file mode 100644 index 000000000..5a490f6ef --- /dev/null +++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp @@ -0,0 +1,296 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <tuple> + +#include <boost/functional/hash.hpp> + +#include "common/common_types.h" +#include "video_core/renderer_vulkan/fixed_pipeline_state.h" + +namespace Vulkan { + +namespace { + +constexpr FixedPipelineState::DepthStencil GetDepthStencilState(const Maxwell& regs) { + const FixedPipelineState::StencilFace front_stencil( + regs.stencil_front_op_fail, regs.stencil_front_op_zfail, regs.stencil_front_op_zpass, + regs.stencil_front_func_func); + const FixedPipelineState::StencilFace back_stencil = + regs.stencil_two_side_enable + ? FixedPipelineState::StencilFace(regs.stencil_back_op_fail, regs.stencil_back_op_zfail, + regs.stencil_back_op_zpass, + regs.stencil_back_func_func) + : front_stencil; + return FixedPipelineState::DepthStencil( + regs.depth_test_enable == 1, regs.depth_write_enabled == 1, regs.depth_bounds_enable == 1, + regs.stencil_enable == 1, regs.depth_test_func, front_stencil, back_stencil); +} + +constexpr FixedPipelineState::InputAssembly GetInputAssemblyState(const Maxwell& regs) { + return FixedPipelineState::InputAssembly( + regs.draw.topology, regs.primitive_restart.enabled, + regs.draw.topology == Maxwell::PrimitiveTopology::Points ? regs.point_size : 0.0f); +} + +constexpr FixedPipelineState::BlendingAttachment GetBlendingAttachmentState( + const Maxwell& regs, std::size_t render_target) { + const auto& mask = regs.color_mask[regs.color_mask_common ? 0 : render_target]; + const std::array components = {mask.R != 0, mask.G != 0, mask.B != 0, mask.A != 0}; + + const FixedPipelineState::BlendingAttachment default_blending( + false, Maxwell::Blend::Equation::Add, Maxwell::Blend::Factor::One, + Maxwell::Blend::Factor::Zero, Maxwell::Blend::Equation::Add, Maxwell::Blend::Factor::One, + Maxwell::Blend::Factor::Zero, components); + if (render_target >= regs.rt_control.count) { + return default_blending; + } + + if (!regs.independent_blend_enable) { + const auto& src = regs.blend; + if (!src.enable[render_target]) { + return default_blending; + } + return FixedPipelineState::BlendingAttachment( + true, src.equation_rgb, src.factor_source_rgb, src.factor_dest_rgb, src.equation_a, + src.factor_source_a, src.factor_dest_a, components); + } + + if (!regs.blend.enable[render_target]) { + return default_blending; + } + const auto& src = regs.independent_blend[render_target]; + return FixedPipelineState::BlendingAttachment( + true, src.equation_rgb, src.factor_source_rgb, src.factor_dest_rgb, src.equation_a, + src.factor_source_a, src.factor_dest_a, components); +} + +constexpr FixedPipelineState::ColorBlending GetColorBlendingState(const Maxwell& regs) { + return FixedPipelineState::ColorBlending( + {regs.blend_color.r, regs.blend_color.g, regs.blend_color.b, regs.blend_color.a}, + regs.rt_control.count, + {GetBlendingAttachmentState(regs, 0), GetBlendingAttachmentState(regs, 1), + GetBlendingAttachmentState(regs, 2), GetBlendingAttachmentState(regs, 3), + GetBlendingAttachmentState(regs, 4), GetBlendingAttachmentState(regs, 5), + GetBlendingAttachmentState(regs, 6), GetBlendingAttachmentState(regs, 7)}); +} + +constexpr FixedPipelineState::Tessellation GetTessellationState(const Maxwell& regs) { + return FixedPipelineState::Tessellation(regs.patch_vertices, regs.tess_mode.prim, + regs.tess_mode.spacing, regs.tess_mode.cw != 0); +} + +constexpr std::size_t Point = 0; +constexpr std::size_t Line = 1; +constexpr std::size_t Polygon = 2; +constexpr std::array PolygonOffsetEnableLUT = { + Point, // Points + Line, // Lines + Line, // LineLoop + Line, // LineStrip + Polygon, // Triangles + Polygon, // TriangleStrip + Polygon, // TriangleFan + Polygon, // Quads + Polygon, // QuadStrip + Polygon, // Polygon + Line, // LinesAdjacency + Line, // LineStripAdjacency + Polygon, // TrianglesAdjacency + Polygon, // TriangleStripAdjacency + Polygon, // Patches +}; + +constexpr FixedPipelineState::Rasterizer GetRasterizerState(const Maxwell& regs) { + const std::array enabled_lut = {regs.polygon_offset_point_enable, + regs.polygon_offset_line_enable, + regs.polygon_offset_fill_enable}; + const auto topology = static_cast<std::size_t>(regs.draw.topology.Value()); + const bool depth_bias_enabled = enabled_lut[PolygonOffsetEnableLUT[topology]]; + + Maxwell::Cull::FrontFace front_face = regs.cull.front_face; + if (regs.screen_y_control.triangle_rast_flip != 0 && + regs.viewport_transform[0].scale_y > 0.0f) { + if (front_face == Maxwell::Cull::FrontFace::CounterClockWise) + front_face = Maxwell::Cull::FrontFace::ClockWise; + else if (front_face == Maxwell::Cull::FrontFace::ClockWise) + front_face = Maxwell::Cull::FrontFace::CounterClockWise; + } + + const bool gl_ndc = regs.depth_mode == Maxwell::DepthMode::MinusOneToOne; + return FixedPipelineState::Rasterizer(regs.cull.enabled, depth_bias_enabled, gl_ndc, + regs.cull.cull_face, front_face); +} + +} // Anonymous namespace + +std::size_t FixedPipelineState::VertexBinding::Hash() const noexcept { + return (index << stride) ^ divisor; +} + +bool FixedPipelineState::VertexBinding::operator==(const VertexBinding& rhs) const noexcept { + return std::tie(index, stride, divisor) == std::tie(rhs.index, rhs.stride, rhs.divisor); +} + +std::size_t FixedPipelineState::VertexAttribute::Hash() const noexcept { + return static_cast<std::size_t>(index) ^ (static_cast<std::size_t>(buffer) << 13) ^ + (static_cast<std::size_t>(type) << 22) ^ (static_cast<std::size_t>(size) << 31) ^ + (static_cast<std::size_t>(offset) << 36); +} + +bool FixedPipelineState::VertexAttribute::operator==(const VertexAttribute& rhs) const noexcept { + return std::tie(index, buffer, type, size, offset) == + std::tie(rhs.index, rhs.buffer, rhs.type, rhs.size, rhs.offset); +} + +std::size_t FixedPipelineState::StencilFace::Hash() const noexcept { + return static_cast<std::size_t>(action_stencil_fail) ^ + (static_cast<std::size_t>(action_depth_fail) << 4) ^ + (static_cast<std::size_t>(action_depth_fail) << 20) ^ + (static_cast<std::size_t>(action_depth_pass) << 36); +} + +bool FixedPipelineState::StencilFace::operator==(const StencilFace& rhs) const noexcept { + return std::tie(action_stencil_fail, action_depth_fail, action_depth_pass, test_func) == + std::tie(rhs.action_stencil_fail, rhs.action_depth_fail, rhs.action_depth_pass, + rhs.test_func); +} + +std::size_t FixedPipelineState::BlendingAttachment::Hash() const noexcept { + return static_cast<std::size_t>(enable) ^ (static_cast<std::size_t>(rgb_equation) << 5) ^ + (static_cast<std::size_t>(src_rgb_func) << 10) ^ + (static_cast<std::size_t>(dst_rgb_func) << 15) ^ + (static_cast<std::size_t>(a_equation) << 20) ^ + (static_cast<std::size_t>(src_a_func) << 25) ^ + (static_cast<std::size_t>(dst_a_func) << 30) ^ + (static_cast<std::size_t>(components[0]) << 35) ^ + (static_cast<std::size_t>(components[1]) << 36) ^ + (static_cast<std::size_t>(components[2]) << 37) ^ + (static_cast<std::size_t>(components[3]) << 38); +} + +bool FixedPipelineState::BlendingAttachment::operator==(const BlendingAttachment& rhs) const + noexcept { + return std::tie(enable, rgb_equation, src_rgb_func, dst_rgb_func, a_equation, src_a_func, + dst_a_func, components) == + std::tie(rhs.enable, rhs.rgb_equation, rhs.src_rgb_func, rhs.dst_rgb_func, + rhs.a_equation, rhs.src_a_func, rhs.dst_a_func, rhs.components); +} + +std::size_t FixedPipelineState::VertexInput::Hash() const noexcept { + std::size_t hash = num_bindings ^ (num_attributes << 32); + for (std::size_t i = 0; i < num_bindings; ++i) { + boost::hash_combine(hash, bindings[i].Hash()); + } + for (std::size_t i = 0; i < num_attributes; ++i) { + boost::hash_combine(hash, attributes[i].Hash()); + } + return hash; +} + +bool FixedPipelineState::VertexInput::operator==(const VertexInput& rhs) const noexcept { + return std::equal(bindings.begin(), bindings.begin() + num_bindings, rhs.bindings.begin(), + rhs.bindings.begin() + rhs.num_bindings) && + std::equal(attributes.begin(), attributes.begin() + num_attributes, + rhs.attributes.begin(), rhs.attributes.begin() + rhs.num_attributes); +} + +std::size_t FixedPipelineState::InputAssembly::Hash() const noexcept { + std::size_t point_size_int = 0; + std::memcpy(&point_size_int, &point_size, sizeof(point_size)); + return (static_cast<std::size_t>(topology) << 24) ^ (point_size_int << 32) ^ + static_cast<std::size_t>(primitive_restart_enable); +} + +bool FixedPipelineState::InputAssembly::operator==(const InputAssembly& rhs) const noexcept { + return std::tie(topology, primitive_restart_enable, point_size) == + std::tie(rhs.topology, rhs.primitive_restart_enable, rhs.point_size); +} + +std::size_t FixedPipelineState::Tessellation::Hash() const noexcept { + return static_cast<std::size_t>(patch_control_points) ^ + (static_cast<std::size_t>(primitive) << 6) ^ (static_cast<std::size_t>(spacing) << 8) ^ + (static_cast<std::size_t>(clockwise) << 10); +} + +bool FixedPipelineState::Tessellation::operator==(const Tessellation& rhs) const noexcept { + return std::tie(patch_control_points, primitive, spacing, clockwise) == + std::tie(rhs.patch_control_points, rhs.primitive, rhs.spacing, rhs.clockwise); +} + +std::size_t FixedPipelineState::Rasterizer::Hash() const noexcept { + return static_cast<std::size_t>(cull_enable) ^ + (static_cast<std::size_t>(depth_bias_enable) << 1) ^ + (static_cast<std::size_t>(ndc_minus_one_to_one) << 2) ^ + (static_cast<std::size_t>(cull_face) << 24) ^ + (static_cast<std::size_t>(front_face) << 48); +} + +bool FixedPipelineState::Rasterizer::operator==(const Rasterizer& rhs) const noexcept { + return std::tie(cull_enable, depth_bias_enable, ndc_minus_one_to_one, cull_face, front_face) == + std::tie(rhs.cull_enable, rhs.depth_bias_enable, rhs.ndc_minus_one_to_one, rhs.cull_face, + rhs.front_face); +} + +std::size_t FixedPipelineState::DepthStencil::Hash() const noexcept { + std::size_t hash = static_cast<std::size_t>(depth_test_enable) ^ + (static_cast<std::size_t>(depth_write_enable) << 1) ^ + (static_cast<std::size_t>(depth_bounds_enable) << 2) ^ + (static_cast<std::size_t>(stencil_enable) << 3) ^ + (static_cast<std::size_t>(depth_test_function) << 4); + boost::hash_combine(hash, front_stencil.Hash()); + boost::hash_combine(hash, back_stencil.Hash()); + return hash; +} + +bool FixedPipelineState::DepthStencil::operator==(const DepthStencil& rhs) const noexcept { + return std::tie(depth_test_enable, depth_write_enable, depth_bounds_enable, depth_test_function, + stencil_enable, front_stencil, back_stencil) == + std::tie(rhs.depth_test_enable, rhs.depth_write_enable, rhs.depth_bounds_enable, + rhs.depth_test_function, rhs.stencil_enable, rhs.front_stencil, + rhs.back_stencil); +} + +std::size_t FixedPipelineState::ColorBlending::Hash() const noexcept { + std::size_t hash = attachments_count << 13; + for (std::size_t rt = 0; rt < static_cast<std::size_t>(attachments_count); ++rt) { + boost::hash_combine(hash, attachments[rt].Hash()); + } + return hash; +} + +bool FixedPipelineState::ColorBlending::operator==(const ColorBlending& rhs) const noexcept { + return std::equal(attachments.begin(), attachments.begin() + attachments_count, + rhs.attachments.begin(), rhs.attachments.begin() + rhs.attachments_count); +} + +std::size_t FixedPipelineState::Hash() const noexcept { + std::size_t hash = 0; + boost::hash_combine(hash, vertex_input.Hash()); + boost::hash_combine(hash, input_assembly.Hash()); + boost::hash_combine(hash, tessellation.Hash()); + boost::hash_combine(hash, rasterizer.Hash()); + boost::hash_combine(hash, depth_stencil.Hash()); + boost::hash_combine(hash, color_blending.Hash()); + return hash; +} + +bool FixedPipelineState::operator==(const FixedPipelineState& rhs) const noexcept { + return std::tie(vertex_input, input_assembly, tessellation, rasterizer, depth_stencil, + color_blending) == std::tie(rhs.vertex_input, rhs.input_assembly, + rhs.tessellation, rhs.rasterizer, rhs.depth_stencil, + rhs.color_blending); +} + +FixedPipelineState GetFixedPipelineState(const Maxwell& regs) { + FixedPipelineState fixed_state; + fixed_state.input_assembly = GetInputAssemblyState(regs); + fixed_state.tessellation = GetTessellationState(regs); + fixed_state.rasterizer = GetRasterizerState(regs); + fixed_state.depth_stencil = GetDepthStencilState(regs); + fixed_state.color_blending = GetColorBlendingState(regs); + return fixed_state; +} + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.h b/src/video_core/renderer_vulkan/fixed_pipeline_state.h new file mode 100644 index 000000000..04152c0d4 --- /dev/null +++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.h @@ -0,0 +1,282 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <array> +#include <type_traits> + +#include "common/common_types.h" + +#include "video_core/engines/maxwell_3d.h" +#include "video_core/surface.h" + +namespace Vulkan { + +using Maxwell = Tegra::Engines::Maxwell3D::Regs; + +// TODO(Rodrigo): Optimize this structure. + +struct FixedPipelineState { + using PixelFormat = VideoCore::Surface::PixelFormat; + + struct VertexBinding { + constexpr VertexBinding(u32 index, u32 stride, u32 divisor) + : index{index}, stride{stride}, divisor{divisor} {} + VertexBinding() = default; + + u32 index; + u32 stride; + u32 divisor; + + std::size_t Hash() const noexcept; + + bool operator==(const VertexBinding& rhs) const noexcept; + + bool operator!=(const VertexBinding& rhs) const noexcept { + return !operator==(rhs); + } + }; + + struct VertexAttribute { + constexpr VertexAttribute(u32 index, u32 buffer, Maxwell::VertexAttribute::Type type, + Maxwell::VertexAttribute::Size size, u32 offset) + : index{index}, buffer{buffer}, type{type}, size{size}, offset{offset} {} + VertexAttribute() = default; + + u32 index; + u32 buffer; + Maxwell::VertexAttribute::Type type; + Maxwell::VertexAttribute::Size size; + u32 offset; + + std::size_t Hash() const noexcept; + + bool operator==(const VertexAttribute& rhs) const noexcept; + + bool operator!=(const VertexAttribute& rhs) const noexcept { + return !operator==(rhs); + } + }; + + struct StencilFace { + constexpr StencilFace(Maxwell::StencilOp action_stencil_fail, + Maxwell::StencilOp action_depth_fail, + Maxwell::StencilOp action_depth_pass, Maxwell::ComparisonOp test_func) + : action_stencil_fail{action_stencil_fail}, action_depth_fail{action_depth_fail}, + action_depth_pass{action_depth_pass}, test_func{test_func} {} + StencilFace() = default; + + Maxwell::StencilOp action_stencil_fail; + Maxwell::StencilOp action_depth_fail; + Maxwell::StencilOp action_depth_pass; + Maxwell::ComparisonOp test_func; + + std::size_t Hash() const noexcept; + + bool operator==(const StencilFace& rhs) const noexcept; + + bool operator!=(const StencilFace& rhs) const noexcept { + return !operator==(rhs); + } + }; + + struct BlendingAttachment { + constexpr BlendingAttachment(bool enable, Maxwell::Blend::Equation rgb_equation, + Maxwell::Blend::Factor src_rgb_func, + Maxwell::Blend::Factor dst_rgb_func, + Maxwell::Blend::Equation a_equation, + Maxwell::Blend::Factor src_a_func, + Maxwell::Blend::Factor dst_a_func, + std::array<bool, 4> components) + : enable{enable}, rgb_equation{rgb_equation}, src_rgb_func{src_rgb_func}, + dst_rgb_func{dst_rgb_func}, a_equation{a_equation}, src_a_func{src_a_func}, + dst_a_func{dst_a_func}, components{components} {} + BlendingAttachment() = default; + + bool enable; + Maxwell::Blend::Equation rgb_equation; + Maxwell::Blend::Factor src_rgb_func; + Maxwell::Blend::Factor dst_rgb_func; + Maxwell::Blend::Equation a_equation; + Maxwell::Blend::Factor src_a_func; + Maxwell::Blend::Factor dst_a_func; + std::array<bool, 4> components; + + std::size_t Hash() const noexcept; + + bool operator==(const BlendingAttachment& rhs) const noexcept; + + bool operator!=(const BlendingAttachment& rhs) const noexcept { + return !operator==(rhs); + } + }; + + struct VertexInput { + std::size_t num_bindings = 0; + std::size_t num_attributes = 0; + std::array<VertexBinding, Maxwell::NumVertexArrays> bindings; + std::array<VertexAttribute, Maxwell::NumVertexAttributes> attributes; + + std::size_t Hash() const noexcept; + + bool operator==(const VertexInput& rhs) const noexcept; + + bool operator!=(const VertexInput& rhs) const noexcept { + return !operator==(rhs); + } + }; + + struct InputAssembly { + constexpr InputAssembly(Maxwell::PrimitiveTopology topology, bool primitive_restart_enable, + float point_size) + : topology{topology}, primitive_restart_enable{primitive_restart_enable}, + point_size{point_size} {} + InputAssembly() = default; + + Maxwell::PrimitiveTopology topology; + bool primitive_restart_enable; + float point_size; + + std::size_t Hash() const noexcept; + + bool operator==(const InputAssembly& rhs) const noexcept; + + bool operator!=(const InputAssembly& rhs) const noexcept { + return !operator==(rhs); + } + }; + + struct Tessellation { + constexpr Tessellation(u32 patch_control_points, Maxwell::TessellationPrimitive primitive, + Maxwell::TessellationSpacing spacing, bool clockwise) + : patch_control_points{patch_control_points}, primitive{primitive}, spacing{spacing}, + clockwise{clockwise} {} + Tessellation() = default; + + u32 patch_control_points; + Maxwell::TessellationPrimitive primitive; + Maxwell::TessellationSpacing spacing; + bool clockwise; + + std::size_t Hash() const noexcept; + + bool operator==(const Tessellation& rhs) const noexcept; + + bool operator!=(const Tessellation& rhs) const noexcept { + return !operator==(rhs); + } + }; + + struct Rasterizer { + constexpr Rasterizer(bool cull_enable, bool depth_bias_enable, bool ndc_minus_one_to_one, + Maxwell::Cull::CullFace cull_face, Maxwell::Cull::FrontFace front_face) + : cull_enable{cull_enable}, depth_bias_enable{depth_bias_enable}, + ndc_minus_one_to_one{ndc_minus_one_to_one}, cull_face{cull_face}, front_face{ + front_face} {} + Rasterizer() = default; + + bool cull_enable; + bool depth_bias_enable; + bool ndc_minus_one_to_one; + Maxwell::Cull::CullFace cull_face; + Maxwell::Cull::FrontFace front_face; + + std::size_t Hash() const noexcept; + + bool operator==(const Rasterizer& rhs) const noexcept; + + bool operator!=(const Rasterizer& rhs) const noexcept { + return !operator==(rhs); + } + }; + + struct DepthStencil { + constexpr DepthStencil(bool depth_test_enable, bool depth_write_enable, + bool depth_bounds_enable, bool stencil_enable, + Maxwell::ComparisonOp depth_test_function, StencilFace front_stencil, + StencilFace back_stencil) + : depth_test_enable{depth_test_enable}, depth_write_enable{depth_write_enable}, + depth_bounds_enable{depth_bounds_enable}, stencil_enable{stencil_enable}, + depth_test_function{depth_test_function}, front_stencil{front_stencil}, + back_stencil{back_stencil} {} + DepthStencil() = default; + + bool depth_test_enable; + bool depth_write_enable; + bool depth_bounds_enable; + bool stencil_enable; + Maxwell::ComparisonOp depth_test_function; + StencilFace front_stencil; + StencilFace back_stencil; + + std::size_t Hash() const noexcept; + + bool operator==(const DepthStencil& rhs) const noexcept; + + bool operator!=(const DepthStencil& rhs) const noexcept { + return !operator==(rhs); + } + }; + + struct ColorBlending { + constexpr ColorBlending( + std::array<float, 4> blend_constants, std::size_t attachments_count, + std::array<BlendingAttachment, Maxwell::NumRenderTargets> attachments) + : attachments_count{attachments_count}, attachments{attachments} {} + ColorBlending() = default; + + std::size_t attachments_count; + std::array<BlendingAttachment, Maxwell::NumRenderTargets> attachments; + + std::size_t Hash() const noexcept; + + bool operator==(const ColorBlending& rhs) const noexcept; + + bool operator!=(const ColorBlending& rhs) const noexcept { + return !operator==(rhs); + } + }; + + std::size_t Hash() const noexcept; + + bool operator==(const FixedPipelineState& rhs) const noexcept; + + bool operator!=(const FixedPipelineState& rhs) const noexcept { + return !operator==(rhs); + } + + VertexInput vertex_input; + InputAssembly input_assembly; + Tessellation tessellation; + Rasterizer rasterizer; + DepthStencil depth_stencil; + ColorBlending color_blending; +}; +static_assert(std::is_trivially_copyable_v<FixedPipelineState::VertexBinding>); +static_assert(std::is_trivially_copyable_v<FixedPipelineState::VertexAttribute>); +static_assert(std::is_trivially_copyable_v<FixedPipelineState::StencilFace>); +static_assert(std::is_trivially_copyable_v<FixedPipelineState::BlendingAttachment>); +static_assert(std::is_trivially_copyable_v<FixedPipelineState::VertexInput>); +static_assert(std::is_trivially_copyable_v<FixedPipelineState::InputAssembly>); +static_assert(std::is_trivially_copyable_v<FixedPipelineState::Tessellation>); +static_assert(std::is_trivially_copyable_v<FixedPipelineState::Rasterizer>); +static_assert(std::is_trivially_copyable_v<FixedPipelineState::DepthStencil>); +static_assert(std::is_trivially_copyable_v<FixedPipelineState::ColorBlending>); +static_assert(std::is_trivially_copyable_v<FixedPipelineState>); + +FixedPipelineState GetFixedPipelineState(const Maxwell& regs); + +} // namespace Vulkan + +namespace std { + +template <> +struct hash<Vulkan::FixedPipelineState> { + std::size_t operator()(const Vulkan::FixedPipelineState& k) const noexcept { + return k.Hash(); + } +}; + +} // namespace std diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp index 3c5acda3e..000e3616d 100644 --- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp +++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp @@ -13,6 +13,8 @@ namespace Vulkan::MaxwellToVK { +using Maxwell = Tegra::Engines::Maxwell3D::Regs; + namespace Sampler { vk::Filter Filter(Tegra::Texture::TextureFilter filter) { @@ -42,7 +44,8 @@ vk::SamplerMipmapMode MipmapMode(Tegra::Texture::TextureMipmapFilter mipmap_filt return {}; } -vk::SamplerAddressMode WrapMode(Tegra::Texture::WrapMode wrap_mode) { +vk::SamplerAddressMode WrapMode(Tegra::Texture::WrapMode wrap_mode, + Tegra::Texture::TextureFilter filter) { switch (wrap_mode) { case Tegra::Texture::WrapMode::Wrap: return vk::SamplerAddressMode::eRepeat; @@ -53,10 +56,15 @@ vk::SamplerAddressMode WrapMode(Tegra::Texture::WrapMode wrap_mode) { case Tegra::Texture::WrapMode::Border: return vk::SamplerAddressMode::eClampToBorder; case Tegra::Texture::WrapMode::Clamp: - // TODO(Rodrigo): GL_CLAMP was removed as of OpenGL 3.1, to implement GL_CLAMP, we can use - // eClampToBorder to get the border color of the texture, and then sample the edge to - // manually mix them. However the shader part of this is not yet implemented. - return vk::SamplerAddressMode::eClampToBorder; + // TODO(Rodrigo): Emulate GL_CLAMP properly + switch (filter) { + case Tegra::Texture::TextureFilter::Nearest: + return vk::SamplerAddressMode::eClampToEdge; + case Tegra::Texture::TextureFilter::Linear: + return vk::SamplerAddressMode::eClampToBorder; + } + UNREACHABLE(); + return vk::SamplerAddressMode::eClampToEdge; case Tegra::Texture::WrapMode::MirrorOnceClampToEdge: return vk::SamplerAddressMode::eMirrorClampToEdge; case Tegra::Texture::WrapMode::MirrorOnceBorder: @@ -94,128 +102,161 @@ vk::CompareOp DepthCompareFunction(Tegra::Texture::DepthCompareFunc depth_compar } // namespace Sampler -struct FormatTuple { - vk::Format format; ///< Vulkan format - ComponentType component_type; ///< Abstracted component type - bool attachable; ///< True when this format can be used as an attachment -}; +namespace { -static constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> tex_format_tuples = {{ - {vk::Format::eA8B8G8R8UnormPack32, ComponentType::UNorm, true}, // ABGR8U - {vk::Format::eUndefined, ComponentType::Invalid, false}, // ABGR8S - {vk::Format::eUndefined, ComponentType::Invalid, false}, // ABGR8UI - {vk::Format::eB5G6R5UnormPack16, ComponentType::UNorm, false}, // B5G6R5U - {vk::Format::eA2B10G10R10UnormPack32, ComponentType::UNorm, true}, // A2B10G10R10U - {vk::Format::eUndefined, ComponentType::Invalid, false}, // A1B5G5R5U - {vk::Format::eR8Unorm, ComponentType::UNorm, true}, // R8U - {vk::Format::eUndefined, ComponentType::Invalid, false}, // R8UI - {vk::Format::eUndefined, ComponentType::Invalid, false}, // RGBA16F - {vk::Format::eUndefined, ComponentType::Invalid, false}, // RGBA16U - {vk::Format::eUndefined, ComponentType::Invalid, false}, // RGBA16UI - {vk::Format::eUndefined, ComponentType::Invalid, false}, // R11FG11FB10F - {vk::Format::eUndefined, ComponentType::Invalid, false}, // RGBA32UI - {vk::Format::eBc1RgbaUnormBlock, ComponentType::UNorm, false}, // DXT1 - {vk::Format::eBc2UnormBlock, ComponentType::UNorm, false}, // DXT23 - {vk::Format::eBc3UnormBlock, ComponentType::UNorm, false}, // DXT45 - {vk::Format::eBc4UnormBlock, ComponentType::UNorm, false}, // DXN1 - {vk::Format::eUndefined, ComponentType::Invalid, false}, // DXN2UNORM - {vk::Format::eUndefined, ComponentType::Invalid, false}, // DXN2SNORM - {vk::Format::eUndefined, ComponentType::Invalid, false}, // BC7U - {vk::Format::eUndefined, ComponentType::Invalid, false}, // BC6H_UF16 - {vk::Format::eUndefined, ComponentType::Invalid, false}, // BC6H_SF16 - {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_4X4 - {vk::Format::eUndefined, ComponentType::Invalid, false}, // BGRA8 - {vk::Format::eUndefined, ComponentType::Invalid, false}, // RGBA32F - {vk::Format::eUndefined, ComponentType::Invalid, false}, // RG32F - {vk::Format::eUndefined, ComponentType::Invalid, false}, // R32F - {vk::Format::eUndefined, ComponentType::Invalid, false}, // R16F - {vk::Format::eUndefined, ComponentType::Invalid, false}, // R16U - {vk::Format::eUndefined, ComponentType::Invalid, false}, // R16S - {vk::Format::eUndefined, ComponentType::Invalid, false}, // R16UI - {vk::Format::eUndefined, ComponentType::Invalid, false}, // R16I - {vk::Format::eUndefined, ComponentType::Invalid, false}, // RG16 - {vk::Format::eUndefined, ComponentType::Invalid, false}, // RG16F - {vk::Format::eUndefined, ComponentType::Invalid, false}, // RG16UI - {vk::Format::eUndefined, ComponentType::Invalid, false}, // RG16I - {vk::Format::eUndefined, ComponentType::Invalid, false}, // RG16S - {vk::Format::eUndefined, ComponentType::Invalid, false}, // RGB32F - {vk::Format::eA8B8G8R8SrgbPack32, ComponentType::UNorm, true}, // RGBA8_SRGB - {vk::Format::eUndefined, ComponentType::Invalid, false}, // RG8U - {vk::Format::eUndefined, ComponentType::Invalid, false}, // RG8S - {vk::Format::eUndefined, ComponentType::Invalid, false}, // RG32UI - {vk::Format::eUndefined, ComponentType::Invalid, false}, // RGBX16F - {vk::Format::eUndefined, ComponentType::Invalid, false}, // R32UI - {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_8X8 - {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_8X5 - {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_5X4 - - // Compressed sRGB formats - {vk::Format::eUndefined, ComponentType::Invalid, false}, // BGRA8_SRGB - {vk::Format::eUndefined, ComponentType::Invalid, false}, // DXT1_SRGB - {vk::Format::eUndefined, ComponentType::Invalid, false}, // DXT23_SRGB - {vk::Format::eUndefined, ComponentType::Invalid, false}, // DXT45_SRGB - {vk::Format::eUndefined, ComponentType::Invalid, false}, // BC7U_SRGB - {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_4X4_SRGB - {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_8X8_SRGB - {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_8X5_SRGB - {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_5X4_SRGB - {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_5X5 - {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_5X5_SRGB - {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_10X8 - {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_10X8_SRGB +enum : u32 { Attachable = 1, Storage = 2 }; + +struct FormatTuple { + vk::Format format; ///< Vulkan format + int usage; ///< Describes image format usage +} constexpr tex_format_tuples[] = { + {vk::Format::eA8B8G8R8UnormPack32, Attachable | Storage}, // ABGR8U + {vk::Format::eA8B8G8R8SnormPack32, Attachable | Storage}, // ABGR8S + {vk::Format::eA8B8G8R8UintPack32, Attachable | Storage}, // ABGR8UI + {vk::Format::eB5G6R5UnormPack16, {}}, // B5G6R5U + {vk::Format::eA2B10G10R10UnormPack32, Attachable | Storage}, // A2B10G10R10U + {vk::Format::eA1R5G5B5UnormPack16, Attachable | Storage}, // A1B5G5R5U (flipped with swizzle) + {vk::Format::eR8Unorm, Attachable | Storage}, // R8U + {vk::Format::eR8Uint, Attachable | Storage}, // R8UI + {vk::Format::eR16G16B16A16Sfloat, Attachable | Storage}, // RGBA16F + {vk::Format::eR16G16B16A16Unorm, Attachable | Storage}, // RGBA16U + {vk::Format::eR16G16B16A16Uint, Attachable | Storage}, // RGBA16UI + {vk::Format::eB10G11R11UfloatPack32, Attachable | Storage}, // R11FG11FB10F + {vk::Format::eR32G32B32A32Uint, Attachable | Storage}, // RGBA32UI + {vk::Format::eBc1RgbaUnormBlock, {}}, // DXT1 + {vk::Format::eBc2UnormBlock, {}}, // DXT23 + {vk::Format::eBc3UnormBlock, {}}, // DXT45 + {vk::Format::eBc4UnormBlock, {}}, // DXN1 + {vk::Format::eBc5UnormBlock, {}}, // DXN2UNORM + {vk::Format::eBc5SnormBlock, {}}, // DXN2SNORM + {vk::Format::eBc7UnormBlock, {}}, // BC7U + {vk::Format::eBc6HUfloatBlock, {}}, // BC6H_UF16 + {vk::Format::eBc6HSfloatBlock, {}}, // BC6H_SF16 + {vk::Format::eAstc4x4UnormBlock, {}}, // ASTC_2D_4X4 + {vk::Format::eB8G8R8A8Unorm, {}}, // BGRA8 + {vk::Format::eR32G32B32A32Sfloat, Attachable | Storage}, // RGBA32F + {vk::Format::eR32G32Sfloat, Attachable | Storage}, // RG32F + {vk::Format::eR32Sfloat, Attachable | Storage}, // R32F + {vk::Format::eR16Sfloat, Attachable | Storage}, // R16F + {vk::Format::eR16Unorm, Attachable | Storage}, // R16U + {vk::Format::eUndefined, {}}, // R16S + {vk::Format::eUndefined, {}}, // R16UI + {vk::Format::eUndefined, {}}, // R16I + {vk::Format::eR16G16Unorm, Attachable | Storage}, // RG16 + {vk::Format::eR16G16Sfloat, Attachable | Storage}, // RG16F + {vk::Format::eUndefined, {}}, // RG16UI + {vk::Format::eUndefined, {}}, // RG16I + {vk::Format::eR16G16Snorm, Attachable | Storage}, // RG16S + {vk::Format::eUndefined, {}}, // RGB32F + {vk::Format::eR8G8B8A8Srgb, Attachable}, // RGBA8_SRGB + {vk::Format::eR8G8Unorm, Attachable | Storage}, // RG8U + {vk::Format::eR8G8Snorm, Attachable | Storage}, // RG8S + {vk::Format::eR32G32Uint, Attachable | Storage}, // RG32UI + {vk::Format::eUndefined, {}}, // RGBX16F + {vk::Format::eR32Uint, Attachable | Storage}, // R32UI + {vk::Format::eAstc8x8UnormBlock, {}}, // ASTC_2D_8X8 + {vk::Format::eUndefined, {}}, // ASTC_2D_8X5 + {vk::Format::eUndefined, {}}, // ASTC_2D_5X4 + {vk::Format::eUndefined, {}}, // BGRA8_SRGB + {vk::Format::eBc1RgbaSrgbBlock, {}}, // DXT1_SRGB + {vk::Format::eUndefined, {}}, // DXT23_SRGB + {vk::Format::eBc3SrgbBlock, {}}, // DXT45_SRGB + {vk::Format::eBc7SrgbBlock, {}}, // BC7U_SRGB + {vk::Format::eR4G4B4A4UnormPack16, Attachable}, // R4G4B4A4U + {vk::Format::eAstc4x4SrgbBlock, {}}, // ASTC_2D_4X4_SRGB + {vk::Format::eAstc8x8SrgbBlock, {}}, // ASTC_2D_8X8_SRGB + {vk::Format::eAstc8x5SrgbBlock, {}}, // ASTC_2D_8X5_SRGB + {vk::Format::eAstc5x4SrgbBlock, {}}, // ASTC_2D_5X4_SRGB + {vk::Format::eAstc5x5UnormBlock, {}}, // ASTC_2D_5X5 + {vk::Format::eAstc5x5SrgbBlock, {}}, // ASTC_2D_5X5_SRGB + {vk::Format::eAstc10x8UnormBlock, {}}, // ASTC_2D_10X8 + {vk::Format::eAstc10x8SrgbBlock, {}}, // ASTC_2D_10X8_SRGB + {vk::Format::eAstc6x6UnormBlock, {}}, // ASTC_2D_6X6 + {vk::Format::eAstc6x6SrgbBlock, {}}, // ASTC_2D_6X6_SRGB + {vk::Format::eAstc10x10UnormBlock, {}}, // ASTC_2D_10X10 + {vk::Format::eAstc10x10SrgbBlock, {}}, // ASTC_2D_10X10_SRGB + {vk::Format::eAstc12x12UnormBlock, {}}, // ASTC_2D_12X12 + {vk::Format::eAstc12x12SrgbBlock, {}}, // ASTC_2D_12X12_SRGB + {vk::Format::eAstc8x6UnormBlock, {}}, // ASTC_2D_8X6 + {vk::Format::eAstc8x6SrgbBlock, {}}, // ASTC_2D_8X6_SRGB + {vk::Format::eAstc6x5UnormBlock, {}}, // ASTC_2D_6X5 + {vk::Format::eAstc6x5SrgbBlock, {}}, // ASTC_2D_6X5_SRGB + {vk::Format::eE5B9G9R9UfloatPack32, {}}, // E5B9G9R9F // Depth formats - {vk::Format::eD32Sfloat, ComponentType::Float, true}, // Z32F - {vk::Format::eD16Unorm, ComponentType::UNorm, true}, // Z16 + {vk::Format::eD32Sfloat, Attachable}, // Z32F + {vk::Format::eD16Unorm, Attachable}, // Z16 // DepthStencil formats - {vk::Format::eD24UnormS8Uint, ComponentType::UNorm, true}, // Z24S8 - {vk::Format::eD24UnormS8Uint, ComponentType::UNorm, true}, // S8Z24 (emulated) - {vk::Format::eUndefined, ComponentType::Invalid, false}, // Z32FS8 -}}; + {vk::Format::eD24UnormS8Uint, Attachable}, // Z24S8 + {vk::Format::eD24UnormS8Uint, Attachable}, // S8Z24 (emulated) + {vk::Format::eD32SfloatS8Uint, Attachable}, // Z32FS8 +}; +static_assert(std::size(tex_format_tuples) == VideoCore::Surface::MaxPixelFormat); -static constexpr bool IsZetaFormat(PixelFormat pixel_format) { +constexpr bool IsZetaFormat(PixelFormat pixel_format) { return pixel_format >= PixelFormat::MaxColorFormat && pixel_format < PixelFormat::MaxDepthStencilFormat; } -std::pair<vk::Format, bool> SurfaceFormat(const VKDevice& device, FormatType format_type, - PixelFormat pixel_format, ComponentType component_type) { - ASSERT(static_cast<std::size_t>(pixel_format) < tex_format_tuples.size()); - - const auto tuple = tex_format_tuples[static_cast<u32>(pixel_format)]; - UNIMPLEMENTED_IF_MSG(tuple.format == vk::Format::eUndefined, - "Unimplemented texture format with pixel format={} and component type={}", - static_cast<u32>(pixel_format), static_cast<u32>(component_type)); - ASSERT_MSG(component_type == tuple.component_type, "Component type mismatch"); - - auto usage = vk::FormatFeatureFlagBits::eSampledImage | - vk::FormatFeatureFlagBits::eTransferDst | vk::FormatFeatureFlagBits::eTransferSrc; - if (tuple.attachable) { - usage |= IsZetaFormat(pixel_format) ? vk::FormatFeatureFlagBits::eDepthStencilAttachment - : vk::FormatFeatureFlagBits::eColorAttachment; +} // Anonymous namespace + +FormatInfo SurfaceFormat(const VKDevice& device, FormatType format_type, PixelFormat pixel_format) { + ASSERT(static_cast<std::size_t>(pixel_format) < std::size(tex_format_tuples)); + + auto tuple = tex_format_tuples[static_cast<std::size_t>(pixel_format)]; + if (tuple.format == vk::Format::eUndefined) { + UNIMPLEMENTED_MSG("Unimplemented texture format with pixel format={}", + static_cast<u32>(pixel_format)); + return {vk::Format::eA8B8G8R8UnormPack32, true, true}; + } + + // Use ABGR8 on hardware that doesn't support ASTC natively + if (!device.IsOptimalAstcSupported() && VideoCore::Surface::IsPixelFormatASTC(pixel_format)) { + tuple.format = VideoCore::Surface::IsPixelFormatSRGB(pixel_format) + ? vk::Format::eA8B8G8R8SrgbPack32 + : vk::Format::eA8B8G8R8UnormPack32; } - return {device.GetSupportedFormat(tuple.format, usage, format_type), tuple.attachable}; + const bool attachable = tuple.usage & Attachable; + const bool storage = tuple.usage & Storage; + + vk::FormatFeatureFlags usage; + if (format_type == FormatType::Buffer) { + usage = vk::FormatFeatureFlagBits::eStorageTexelBuffer | + vk::FormatFeatureFlagBits::eUniformTexelBuffer; + } else { + usage = vk::FormatFeatureFlagBits::eSampledImage | vk::FormatFeatureFlagBits::eTransferDst | + vk::FormatFeatureFlagBits::eTransferSrc; + if (attachable) { + usage |= IsZetaFormat(pixel_format) ? vk::FormatFeatureFlagBits::eDepthStencilAttachment + : vk::FormatFeatureFlagBits::eColorAttachment; + } + if (storage) { + usage |= vk::FormatFeatureFlagBits::eStorageImage; + } + } + return {device.GetSupportedFormat(tuple.format, usage, format_type), attachable, storage}; } -vk::ShaderStageFlagBits ShaderStage(Maxwell::ShaderStage stage) { +vk::ShaderStageFlagBits ShaderStage(Tegra::Engines::ShaderType stage) { switch (stage) { - case Maxwell::ShaderStage::Vertex: + case Tegra::Engines::ShaderType::Vertex: return vk::ShaderStageFlagBits::eVertex; - case Maxwell::ShaderStage::TesselationControl: + case Tegra::Engines::ShaderType::TesselationControl: return vk::ShaderStageFlagBits::eTessellationControl; - case Maxwell::ShaderStage::TesselationEval: + case Tegra::Engines::ShaderType::TesselationEval: return vk::ShaderStageFlagBits::eTessellationEvaluation; - case Maxwell::ShaderStage::Geometry: + case Tegra::Engines::ShaderType::Geometry: return vk::ShaderStageFlagBits::eGeometry; - case Maxwell::ShaderStage::Fragment: + case Tegra::Engines::ShaderType::Fragment: return vk::ShaderStageFlagBits::eFragment; } UNIMPLEMENTED_MSG("Unimplemented shader stage={}", static_cast<u32>(stage)); return {}; } -vk::PrimitiveTopology PrimitiveTopology(Maxwell::PrimitiveTopology topology) { +vk::PrimitiveTopology PrimitiveTopology([[maybe_unused]] const VKDevice& device, + Maxwell::PrimitiveTopology topology) { switch (topology) { case Maxwell::PrimitiveTopology::Points: return vk::PrimitiveTopology::ePointList; @@ -227,6 +268,13 @@ vk::PrimitiveTopology PrimitiveTopology(Maxwell::PrimitiveTopology topology) { return vk::PrimitiveTopology::eTriangleList; case Maxwell::PrimitiveTopology::TriangleStrip: return vk::PrimitiveTopology::eTriangleStrip; + case Maxwell::PrimitiveTopology::TriangleFan: + return vk::PrimitiveTopology::eTriangleFan; + case Maxwell::PrimitiveTopology::Quads: + // TODO(Rodrigo): Use VK_PRIMITIVE_TOPOLOGY_QUAD_LIST_EXT whenever it releases + return vk::PrimitiveTopology::eTriangleList; + case Maxwell::PrimitiveTopology::Patches: + return vk::PrimitiveTopology::ePatchList; default: UNIMPLEMENTED_MSG("Unimplemented topology={}", static_cast<u32>(topology)); return {}; @@ -236,37 +284,111 @@ vk::PrimitiveTopology PrimitiveTopology(Maxwell::PrimitiveTopology topology) { vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttribute::Size size) { switch (type) { case Maxwell::VertexAttribute::Type::SignedNorm: + switch (size) { + case Maxwell::VertexAttribute::Size::Size_8: + return vk::Format::eR8Snorm; + case Maxwell::VertexAttribute::Size::Size_8_8: + return vk::Format::eR8G8Snorm; + case Maxwell::VertexAttribute::Size::Size_8_8_8: + return vk::Format::eR8G8B8Snorm; + case Maxwell::VertexAttribute::Size::Size_8_8_8_8: + return vk::Format::eR8G8B8A8Snorm; + case Maxwell::VertexAttribute::Size::Size_16: + return vk::Format::eR16Snorm; + case Maxwell::VertexAttribute::Size::Size_16_16: + return vk::Format::eR16G16Snorm; + case Maxwell::VertexAttribute::Size::Size_16_16_16: + return vk::Format::eR16G16B16Snorm; + case Maxwell::VertexAttribute::Size::Size_16_16_16_16: + return vk::Format::eR16G16B16A16Snorm; + case Maxwell::VertexAttribute::Size::Size_10_10_10_2: + return vk::Format::eA2B10G10R10SnormPack32; + default: + break; + } break; case Maxwell::VertexAttribute::Type::UnsignedNorm: switch (size) { + case Maxwell::VertexAttribute::Size::Size_8: + return vk::Format::eR8Unorm; + case Maxwell::VertexAttribute::Size::Size_8_8: + return vk::Format::eR8G8Unorm; + case Maxwell::VertexAttribute::Size::Size_8_8_8: + return vk::Format::eR8G8B8Unorm; case Maxwell::VertexAttribute::Size::Size_8_8_8_8: return vk::Format::eR8G8B8A8Unorm; + case Maxwell::VertexAttribute::Size::Size_16: + return vk::Format::eR16Unorm; + case Maxwell::VertexAttribute::Size::Size_16_16: + return vk::Format::eR16G16Unorm; + case Maxwell::VertexAttribute::Size::Size_16_16_16: + return vk::Format::eR16G16B16Unorm; + case Maxwell::VertexAttribute::Size::Size_16_16_16_16: + return vk::Format::eR16G16B16A16Unorm; default: break; } break; case Maxwell::VertexAttribute::Type::SignedInt: - break; + switch (size) { + case Maxwell::VertexAttribute::Size::Size_16_16_16_16: + return vk::Format::eR16G16B16A16Sint; + case Maxwell::VertexAttribute::Size::Size_8: + return vk::Format::eR8Sint; + case Maxwell::VertexAttribute::Size::Size_8_8: + return vk::Format::eR8G8Sint; + case Maxwell::VertexAttribute::Size::Size_8_8_8: + return vk::Format::eR8G8B8Sint; + case Maxwell::VertexAttribute::Size::Size_8_8_8_8: + return vk::Format::eR8G8B8A8Sint; + case Maxwell::VertexAttribute::Size::Size_32: + return vk::Format::eR32Sint; + default: + break; + } case Maxwell::VertexAttribute::Type::UnsignedInt: switch (size) { + case Maxwell::VertexAttribute::Size::Size_8: + return vk::Format::eR8Uint; + case Maxwell::VertexAttribute::Size::Size_8_8: + return vk::Format::eR8G8Uint; + case Maxwell::VertexAttribute::Size::Size_8_8_8: + return vk::Format::eR8G8B8Uint; + case Maxwell::VertexAttribute::Size::Size_8_8_8_8: + return vk::Format::eR8G8B8A8Uint; case Maxwell::VertexAttribute::Size::Size_32: return vk::Format::eR32Uint; default: break; } case Maxwell::VertexAttribute::Type::UnsignedScaled: + switch (size) { + case Maxwell::VertexAttribute::Size::Size_8_8: + return vk::Format::eR8G8Uscaled; + default: + break; + } + break; case Maxwell::VertexAttribute::Type::SignedScaled: break; case Maxwell::VertexAttribute::Type::Float: switch (size) { - case Maxwell::VertexAttribute::Size::Size_32_32_32_32: - return vk::Format::eR32G32B32A32Sfloat; - case Maxwell::VertexAttribute::Size::Size_32_32_32: - return vk::Format::eR32G32B32Sfloat; - case Maxwell::VertexAttribute::Size::Size_32_32: - return vk::Format::eR32G32Sfloat; case Maxwell::VertexAttribute::Size::Size_32: return vk::Format::eR32Sfloat; + case Maxwell::VertexAttribute::Size::Size_32_32: + return vk::Format::eR32G32Sfloat; + case Maxwell::VertexAttribute::Size::Size_32_32_32: + return vk::Format::eR32G32B32Sfloat; + case Maxwell::VertexAttribute::Size::Size_32_32_32_32: + return vk::Format::eR32G32B32A32Sfloat; + case Maxwell::VertexAttribute::Size::Size_16: + return vk::Format::eR16Sfloat; + case Maxwell::VertexAttribute::Size::Size_16_16: + return vk::Format::eR16G16Sfloat; + case Maxwell::VertexAttribute::Size::Size_16_16_16: + return vk::Format::eR16G16B16Sfloat; + case Maxwell::VertexAttribute::Size::Size_16_16_16_16: + return vk::Format::eR16G16B16A16Sfloat; default: break; } @@ -308,11 +430,14 @@ vk::CompareOp ComparisonOp(Maxwell::ComparisonOp comparison) { return {}; } -vk::IndexType IndexFormat(Maxwell::IndexFormat index_format) { +vk::IndexType IndexFormat(const VKDevice& device, Maxwell::IndexFormat index_format) { switch (index_format) { case Maxwell::IndexFormat::UnsignedByte: - UNIMPLEMENTED_MSG("Vulkan does not support native u8 index format"); - return vk::IndexType::eUint16; + if (!device.IsExtIndexTypeUint8Supported()) { + UNIMPLEMENTED_MSG("Native uint8 indices are not supported on this device"); + return vk::IndexType::eUint16; + } + return vk::IndexType::eUint8EXT; case Maxwell::IndexFormat::UnsignedShort: return vk::IndexType::eUint16; case Maxwell::IndexFormat::UnsignedInt: diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.h b/src/video_core/renderer_vulkan/maxwell_to_vk.h index 4cadc0721..1534b738b 100644 --- a/src/video_core/renderer_vulkan/maxwell_to_vk.h +++ b/src/video_core/renderer_vulkan/maxwell_to_vk.h @@ -4,7 +4,6 @@ #pragma once -#include <utility> #include "common/common_types.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/renderer_vulkan/declarations.h" @@ -16,7 +15,6 @@ namespace Vulkan::MaxwellToVK { using Maxwell = Tegra::Engines::Maxwell3D::Regs; using PixelFormat = VideoCore::Surface::PixelFormat; -using ComponentType = VideoCore::Surface::ComponentType; namespace Sampler { @@ -24,24 +22,31 @@ vk::Filter Filter(Tegra::Texture::TextureFilter filter); vk::SamplerMipmapMode MipmapMode(Tegra::Texture::TextureMipmapFilter mipmap_filter); -vk::SamplerAddressMode WrapMode(Tegra::Texture::WrapMode wrap_mode); +vk::SamplerAddressMode WrapMode(Tegra::Texture::WrapMode wrap_mode, + Tegra::Texture::TextureFilter filter); vk::CompareOp DepthCompareFunction(Tegra::Texture::DepthCompareFunc depth_compare_func); } // namespace Sampler -std::pair<vk::Format, bool> SurfaceFormat(const VKDevice& device, FormatType format_type, - PixelFormat pixel_format, ComponentType component_type); +struct FormatInfo { + vk::Format format; + bool attachable; + bool storage; +}; -vk::ShaderStageFlagBits ShaderStage(Maxwell::ShaderStage stage); +FormatInfo SurfaceFormat(const VKDevice& device, FormatType format_type, PixelFormat pixel_format); -vk::PrimitiveTopology PrimitiveTopology(Maxwell::PrimitiveTopology topology); +vk::ShaderStageFlagBits ShaderStage(Tegra::Engines::ShaderType stage); + +vk::PrimitiveTopology PrimitiveTopology(const VKDevice& device, + Maxwell::PrimitiveTopology topology); vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttribute::Size size); vk::CompareOp ComparisonOp(Maxwell::ComparisonOp comparison); -vk::IndexType IndexFormat(Maxwell::IndexFormat index_format); +vk::IndexType IndexFormat(const VKDevice& device, Maxwell::IndexFormat index_format); vk::StencilOp StencilOp(Maxwell::StencilOp stencil_op); diff --git a/src/video_core/renderer_vulkan/shaders/blit.frag b/src/video_core/renderer_vulkan/shaders/blit.frag new file mode 100644 index 000000000..a06ecd24a --- /dev/null +++ b/src/video_core/renderer_vulkan/shaders/blit.frag @@ -0,0 +1,24 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +/* + * Build instructions: + * $ glslangValidator -V $THIS_FILE -o output.spv + * $ spirv-opt -O --strip-debug output.spv -o optimized.spv + * $ xxd -i optimized.spv + * + * Then copy that bytecode to the C++ file + */ + +#version 460 core + +layout (location = 0) in vec2 frag_tex_coord; + +layout (location = 0) out vec4 color; + +layout (binding = 1) uniform sampler2D color_texture; + +void main() { + color = texture(color_texture, frag_tex_coord); +} diff --git a/src/video_core/renderer_vulkan/shaders/blit.vert b/src/video_core/renderer_vulkan/shaders/blit.vert new file mode 100644 index 000000000..c64d9235a --- /dev/null +++ b/src/video_core/renderer_vulkan/shaders/blit.vert @@ -0,0 +1,28 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +/* + * Build instructions: + * $ glslangValidator -V $THIS_FILE -o output.spv + * $ spirv-opt -O --strip-debug output.spv -o optimized.spv + * $ xxd -i optimized.spv + * + * Then copy that bytecode to the C++ file + */ + +#version 460 core + +layout (location = 0) in vec2 vert_position; +layout (location = 1) in vec2 vert_tex_coord; + +layout (location = 0) out vec2 frag_tex_coord; + +layout (set = 0, binding = 0) uniform MatrixBlock { + mat4 modelview_matrix; +}; + +void main() { + gl_Position = modelview_matrix * vec4(vert_position, 0.0, 1.0); + frag_tex_coord = vert_tex_coord; +} diff --git a/src/video_core/renderer_vulkan/shaders/quad_array.comp b/src/video_core/renderer_vulkan/shaders/quad_array.comp new file mode 100644 index 000000000..5a5703308 --- /dev/null +++ b/src/video_core/renderer_vulkan/shaders/quad_array.comp @@ -0,0 +1,37 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +/* + * Build instructions: + * $ glslangValidator -V $THIS_FILE -o output.spv + * $ spirv-opt -O --strip-debug output.spv -o optimized.spv + * $ xxd -i optimized.spv + * + * Then copy that bytecode to the C++ file + */ + +#version 460 core + +layout (local_size_x = 1024) in; + +layout (std430, set = 0, binding = 0) buffer OutputBuffer { + uint output_indexes[]; +}; + +layout (push_constant) uniform PushConstants { + uint first; +}; + +void main() { + uint primitive = gl_GlobalInvocationID.x; + if (primitive * 6 >= output_indexes.length()) { + return; + } + + const uint quad_map[6] = uint[](0, 1, 2, 0, 2, 3); + for (uint vertex = 0; vertex < 6; ++vertex) { + uint index = first + primitive * 4 + quad_map[vertex]; + output_indexes[primitive * 6 + vertex] = index; + } +} diff --git a/src/video_core/renderer_vulkan/shaders/uint8.comp b/src/video_core/renderer_vulkan/shaders/uint8.comp new file mode 100644 index 000000000..a320f3ae0 --- /dev/null +++ b/src/video_core/renderer_vulkan/shaders/uint8.comp @@ -0,0 +1,33 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +/* + * Build instructions: + * $ glslangValidator -V $THIS_FILE -o output.spv + * $ spirv-opt -O --strip-debug output.spv -o optimized.spv + * $ xxd -i optimized.spv + * + * Then copy that bytecode to the C++ file + */ + +#version 460 core +#extension GL_EXT_shader_16bit_storage : require +#extension GL_EXT_shader_8bit_storage : require + +layout (local_size_x = 1024) in; + +layout (std430, set = 0, binding = 0) readonly buffer InputBuffer { + uint8_t input_indexes[]; +}; + +layout (std430, set = 0, binding = 1) writeonly buffer OutputBuffer { + uint16_t output_indexes[]; +}; + +void main() { + uint id = gl_GlobalInvocationID.x; + if (id < input_indexes.length()) { + output_indexes[id] = uint16_t(input_indexes[id]); + } +} diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp index d2e9f4031..46da81aaa 100644 --- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp @@ -24,9 +24,11 @@ CachedBufferEntry::CachedBufferEntry(VAddr cpu_addr, std::size_t size, u64 offse alignment{alignment} {} VKBufferCache::VKBufferCache(Tegra::MemoryManager& tegra_memory_manager, + Memory::Memory& cpu_memory_, VideoCore::RasterizerInterface& rasterizer, const VKDevice& device, VKMemoryManager& memory_manager, VKScheduler& scheduler, u64 size) - : RasterizerCache{rasterizer}, tegra_memory_manager{tegra_memory_manager} { + : RasterizerCache{rasterizer}, tegra_memory_manager{tegra_memory_manager}, cpu_memory{ + cpu_memory_} { const auto usage = vk::BufferUsageFlagBits::eVertexBuffer | vk::BufferUsageFlagBits::eIndexBuffer | vk::BufferUsageFlagBits::eUniformBuffer; @@ -48,9 +50,9 @@ u64 VKBufferCache::UploadMemory(GPUVAddr gpu_addr, std::size_t size, u64 alignme // TODO: Figure out which size is the best for given games. cache &= size >= 2048; - const auto& host_ptr{Memory::GetPointer(*cpu_addr)}; + u8* const host_ptr{cpu_memory.GetPointer(*cpu_addr)}; if (cache) { - auto entry = TryGet(host_ptr); + const auto entry = TryGet(host_ptr); if (entry) { if (entry->GetSize() >= size && entry->GetAlignment() == alignment) { return entry->GetOffset(); @@ -62,7 +64,7 @@ u64 VKBufferCache::UploadMemory(GPUVAddr gpu_addr, std::size_t size, u64 alignme AlignBuffer(alignment); const u64 uploaded_offset = buffer_offset; - if (!host_ptr) { + if (host_ptr == nullptr) { return uploaded_offset; } diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h index 49f13bcdc..daa8ccf66 100644 --- a/src/video_core/renderer_vulkan/vk_buffer_cache.h +++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h @@ -13,6 +13,10 @@ #include "video_core/renderer_vulkan/declarations.h" #include "video_core/renderer_vulkan/vk_scheduler.h" +namespace Memory { +class Memory; +} + namespace Tegra { class MemoryManager; } @@ -58,7 +62,7 @@ private: class VKBufferCache final : public RasterizerCache<std::shared_ptr<CachedBufferEntry>> { public: - explicit VKBufferCache(Tegra::MemoryManager& tegra_memory_manager, + explicit VKBufferCache(Tegra::MemoryManager& tegra_memory_manager, Memory::Memory& cpu_memory_, VideoCore::RasterizerInterface& rasterizer, const VKDevice& device, VKMemoryManager& memory_manager, VKScheduler& scheduler, u64 size); ~VKBufferCache(); @@ -92,6 +96,7 @@ private: void AlignBuffer(std::size_t alignment); Tegra::MemoryManager& tegra_memory_manager; + Memory::Memory& cpu_memory; std::unique_ptr<VKStreamBuffer> stream_buffer; vk::Buffer buffer_handle; diff --git a/src/video_core/renderer_vulkan/vk_device.cpp b/src/video_core/renderer_vulkan/vk_device.cpp index 897cbb4e8..939eebe83 100644 --- a/src/video_core/renderer_vulkan/vk_device.cpp +++ b/src/video_core/renderer_vulkan/vk_device.cpp @@ -3,11 +3,15 @@ // Refer to the license.txt file included. #include <bitset> +#include <chrono> +#include <cstdlib> #include <optional> #include <set> #include <string_view> +#include <thread> #include <vector> #include "common/assert.h" +#include "core/settings.h" #include "video_core/renderer_vulkan/declarations.h" #include "video_core/renderer_vulkan/vk_device.h" @@ -15,6 +19,15 @@ namespace Vulkan { namespace { +namespace Alternatives { + +constexpr std::array Depth24UnormS8Uint = {vk::Format::eD32SfloatS8Uint, + vk::Format::eD16UnormS8Uint, vk::Format{}}; +constexpr std::array Depth16UnormS8Uint = {vk::Format::eD24UnormS8Uint, + vk::Format::eD32SfloatS8Uint, vk::Format{}}; + +} // namespace Alternatives + template <typename T> void SetNext(void**& next, T& data) { *next = &data; @@ -22,7 +35,7 @@ void SetNext(void**& next, T& data) { } template <typename T> -T GetFeatures(vk::PhysicalDevice physical, vk::DispatchLoaderDynamic dldi) { +T GetFeatures(vk::PhysicalDevice physical, const vk::DispatchLoaderDynamic& dldi) { vk::PhysicalDeviceFeatures2 features; T extension_features; features.pNext = &extension_features; @@ -30,17 +43,14 @@ T GetFeatures(vk::PhysicalDevice physical, vk::DispatchLoaderDynamic dldi) { return extension_features; } -} // Anonymous namespace - -namespace Alternatives { - -constexpr std::array Depth24UnormS8Uint = {vk::Format::eD32SfloatS8Uint, - vk::Format::eD16UnormS8Uint, vk::Format{}}; -constexpr std::array Depth16UnormS8Uint = {vk::Format::eD24UnormS8Uint, - vk::Format::eD32SfloatS8Uint, vk::Format{}}; -constexpr std::array Astc = {vk::Format::eA8B8G8R8UnormPack32, vk::Format{}}; - -} // namespace Alternatives +template <typename T> +T GetProperties(vk::PhysicalDevice physical, const vk::DispatchLoaderDynamic& dldi) { + vk::PhysicalDeviceProperties2 properties; + T extension_properties; + properties.pNext = &extension_properties; + physical.getProperties2(&properties, dldi); + return extension_properties; +} constexpr const vk::Format* GetFormatAlternatives(vk::Format format) { switch (format) { @@ -53,8 +63,7 @@ constexpr const vk::Format* GetFormatAlternatives(vk::Format format) { } } -constexpr vk::FormatFeatureFlags GetFormatFeatures(vk::FormatProperties properties, - FormatType format_type) { +vk::FormatFeatureFlags GetFormatFeatures(vk::FormatProperties properties, FormatType format_type) { switch (format_type) { case FormatType::Linear: return properties.linearTilingFeatures; @@ -67,11 +76,13 @@ constexpr vk::FormatFeatureFlags GetFormatFeatures(vk::FormatProperties properti } } +} // Anonymous namespace + VKDevice::VKDevice(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical, vk::SurfaceKHR surface) - : physical{physical}, format_properties{GetFormatProperties(dldi, physical)} { + : physical{physical}, properties{physical.getProperties(dldi)}, + format_properties{GetFormatProperties(dldi, physical)} { SetupFamilies(dldi, surface); - SetupProperties(dldi); SetupFeatures(dldi); } @@ -89,12 +100,22 @@ bool VKDevice::Create(const vk::DispatchLoaderDynamic& dldi, vk::Instance instan features.depthClamp = true; features.samplerAnisotropy = true; features.largePoints = true; + features.multiViewport = true; + features.depthBiasClamp = true; + features.geometryShader = true; + features.tessellationShader = true; + features.fragmentStoresAndAtomics = true; + features.shaderImageGatherExtended = true; + features.shaderStorageImageWriteWithoutFormat = true; features.textureCompressionASTC_LDR = is_optimal_astc_supported; - vk::PhysicalDeviceVertexAttributeDivisorFeaturesEXT vertex_divisor; - vertex_divisor.vertexAttributeInstanceRateDivisor = true; - vertex_divisor.vertexAttributeInstanceRateZeroDivisor = true; - SetNext(next, vertex_divisor); + vk::PhysicalDevice16BitStorageFeaturesKHR bit16_storage; + bit16_storage.uniformAndStorageBuffer16BitAccess = true; + SetNext(next, bit16_storage); + + vk::PhysicalDevice8BitStorageFeaturesKHR bit8_storage; + bit8_storage.uniformAndStorageBuffer8BitAccess = true; + SetNext(next, bit8_storage); vk::PhysicalDeviceFloat16Int8FeaturesKHR float16_int8; if (is_float16_supported) { @@ -120,6 +141,10 @@ bool VKDevice::Create(const vk::DispatchLoaderDynamic& dldi, vk::Instance instan LOG_INFO(Render_Vulkan, "Device doesn't support uint8 indexes"); } + if (!ext_depth_range_unrestricted) { + LOG_INFO(Render_Vulkan, "Device doesn't support depth range unrestricted"); + } + vk::DeviceCreateInfo device_ci({}, static_cast<u32>(queue_cis.size()), queue_cis.data(), 0, nullptr, static_cast<u32>(extensions.size()), extensions.data(), nullptr); @@ -135,16 +160,7 @@ bool VKDevice::Create(const vk::DispatchLoaderDynamic& dldi, vk::Instance instan logical = UniqueDevice( dummy_logical, vk::ObjectDestroy<vk::NoParent, vk::DispatchLoaderDynamic>(nullptr, dld)); - if (khr_driver_properties) { - vk::PhysicalDeviceDriverPropertiesKHR driver; - vk::PhysicalDeviceProperties2 properties; - properties.pNext = &driver; - physical.getProperties2(&properties, dld); - driver_id = driver.driverID; - LOG_INFO(Render_Vulkan, "Driver: {} {}", driver.driverName, driver.driverInfo); - } else { - LOG_INFO(Render_Vulkan, "Driver: Unknown"); - } + CollectTelemetryParameters(); graphics_queue = logical->getQueue(graphics_family, 0, dld); present_queue = logical->getQueue(present_family, 0, dld); @@ -188,8 +204,36 @@ vk::Format VKDevice::GetSupportedFormat(vk::Format wanted_format, return wanted_format; } +void VKDevice::ReportLoss() const { + LOG_CRITICAL(Render_Vulkan, "Device loss occured!"); + + // Wait some time to let the log flush + std::this_thread::sleep_for(std::chrono::seconds{1}); + + if (!nv_device_diagnostic_checkpoints) { + return; + } + + [[maybe_unused]] const std::vector data = graphics_queue.getCheckpointDataNV(dld); + // Catch here in debug builds (or with optimizations disabled) the last graphics pipeline to be + // executed. It can be done on a debugger by evaluating the expression: + // *(VKGraphicsPipeline*)data[0] +} + bool VKDevice::IsOptimalAstcSupported(const vk::PhysicalDeviceFeatures& features, const vk::DispatchLoaderDynamic& dldi) const { + // Disable for now to avoid converting ASTC twice. + return false; + static constexpr std::array astc_formats = { + vk::Format::eAstc4x4SrgbBlock, vk::Format::eAstc8x8SrgbBlock, + vk::Format::eAstc8x5SrgbBlock, vk::Format::eAstc5x4SrgbBlock, + vk::Format::eAstc5x5UnormBlock, vk::Format::eAstc5x5SrgbBlock, + vk::Format::eAstc10x8UnormBlock, vk::Format::eAstc10x8SrgbBlock, + vk::Format::eAstc6x6UnormBlock, vk::Format::eAstc6x6SrgbBlock, + vk::Format::eAstc10x10UnormBlock, vk::Format::eAstc10x10SrgbBlock, + vk::Format::eAstc12x12UnormBlock, vk::Format::eAstc12x12SrgbBlock, + vk::Format::eAstc8x6UnormBlock, vk::Format::eAstc8x6SrgbBlock, + vk::Format::eAstc6x5UnormBlock, vk::Format::eAstc6x5SrgbBlock}; if (!features.textureCompressionASTC_LDR) { return false; } @@ -197,12 +241,6 @@ bool VKDevice::IsOptimalAstcSupported(const vk::PhysicalDeviceFeatures& features vk::FormatFeatureFlagBits::eSampledImage | vk::FormatFeatureFlagBits::eBlitSrc | vk::FormatFeatureFlagBits::eBlitDst | vk::FormatFeatureFlagBits::eTransferSrc | vk::FormatFeatureFlagBits::eTransferDst}; - constexpr std::array astc_formats = { - vk::Format::eAstc4x4UnormBlock, vk::Format::eAstc4x4SrgbBlock, - vk::Format::eAstc8x8SrgbBlock, vk::Format::eAstc8x6SrgbBlock, - vk::Format::eAstc5x4SrgbBlock, vk::Format::eAstc5x5UnormBlock, - vk::Format::eAstc5x5SrgbBlock, vk::Format::eAstc10x8UnormBlock, - vk::Format::eAstc10x8SrgbBlock}; for (const auto format : astc_formats) { const auto format_properties{physical.getFormatProperties(format, dldi)}; if (!(format_properties.optimalTilingFeatures & format_feature_usage)) { @@ -225,11 +263,17 @@ bool VKDevice::IsFormatSupported(vk::Format wanted_format, vk::FormatFeatureFlag bool VKDevice::IsSuitable(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical, vk::SurfaceKHR surface) { - LOG_INFO(Render_Vulkan, "{}", physical.getProperties(dldi).deviceName); bool is_suitable = true; - constexpr std::array required_extensions = {VK_KHR_SWAPCHAIN_EXTENSION_NAME, - VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME}; + constexpr std::array required_extensions = { + VK_KHR_SWAPCHAIN_EXTENSION_NAME, + VK_KHR_16BIT_STORAGE_EXTENSION_NAME, + VK_KHR_8BIT_STORAGE_EXTENSION_NAME, + VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME, + VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME, + VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME, + VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME, + }; std::bitset<required_extensions.size()> available_extensions{}; for (const auto& prop : physical.enumerateDeviceExtensionProperties(nullptr, dldi)) { @@ -246,7 +290,7 @@ bool VKDevice::IsSuitable(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDev if (available_extensions[i]) { continue; } - LOG_INFO(Render_Vulkan, "Missing required extension: {}", required_extensions[i]); + LOG_ERROR(Render_Vulkan, "Missing required extension: {}", required_extensions[i]); is_suitable = false; } } @@ -263,7 +307,7 @@ bool VKDevice::IsSuitable(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDev has_present |= physical.getSurfaceSupportKHR(i, surface, dldi) != 0; } if (!has_graphics || !has_present) { - LOG_INFO(Render_Vulkan, "Device lacks a graphics and present queue"); + LOG_ERROR(Render_Vulkan, "Device lacks a graphics and present queue"); is_suitable = false; } @@ -273,8 +317,15 @@ bool VKDevice::IsSuitable(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDev constexpr u32 required_ubo_size = 65536; if (limits.maxUniformBufferRange < required_ubo_size) { - LOG_INFO(Render_Vulkan, "Device UBO size {} is too small, {} is required)", - limits.maxUniformBufferRange, required_ubo_size); + LOG_ERROR(Render_Vulkan, "Device UBO size {} is too small, {} is required", + limits.maxUniformBufferRange, required_ubo_size); + is_suitable = false; + } + + constexpr u32 required_num_viewports = 16; + if (limits.maxViewports < required_num_viewports) { + LOG_INFO(Render_Vulkan, "Device number of viewports {} is too small, {} is required", + limits.maxViewports, required_num_viewports); is_suitable = false; } @@ -285,24 +336,32 @@ bool VKDevice::IsSuitable(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDev std::make_pair(features.depthClamp, "depthClamp"), std::make_pair(features.samplerAnisotropy, "samplerAnisotropy"), std::make_pair(features.largePoints, "largePoints"), + std::make_pair(features.multiViewport, "multiViewport"), + std::make_pair(features.depthBiasClamp, "depthBiasClamp"), + std::make_pair(features.geometryShader, "geometryShader"), + std::make_pair(features.tessellationShader, "tessellationShader"), + std::make_pair(features.fragmentStoresAndAtomics, "fragmentStoresAndAtomics"), + std::make_pair(features.shaderImageGatherExtended, "shaderImageGatherExtended"), + std::make_pair(features.shaderStorageImageWriteWithoutFormat, + "shaderStorageImageWriteWithoutFormat"), }; for (const auto& [supported, name] : feature_report) { if (supported) { continue; } - LOG_INFO(Render_Vulkan, "Missing required feature: {}", name); + LOG_ERROR(Render_Vulkan, "Missing required feature: {}", name); is_suitable = false; } + if (!is_suitable) { + LOG_ERROR(Render_Vulkan, "{} is not suitable", properties.deviceName); + } + return is_suitable; } std::vector<const char*> VKDevice::LoadExtensions(const vk::DispatchLoaderDynamic& dldi) { std::vector<const char*> extensions; - extensions.reserve(7); - extensions.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME); - extensions.push_back(VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME); - const auto Test = [&](const vk::ExtensionProperties& extension, std::optional<std::reference_wrapper<bool>> status, const char* name, bool push) { @@ -317,13 +376,32 @@ std::vector<const char*> VKDevice::LoadExtensions(const vk::DispatchLoaderDynami } }; + extensions.reserve(13); + extensions.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME); + extensions.push_back(VK_KHR_16BIT_STORAGE_EXTENSION_NAME); + extensions.push_back(VK_KHR_8BIT_STORAGE_EXTENSION_NAME); + extensions.push_back(VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME); + extensions.push_back(VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME); + extensions.push_back(VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME); + extensions.push_back(VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME); + + [[maybe_unused]] const bool nsight = + std::getenv("NVTX_INJECTION64_PATH") || std::getenv("NSIGHT_LAUNCHED"); bool khr_shader_float16_int8{}; + bool ext_subgroup_size_control{}; for (const auto& extension : physical.enumerateDeviceExtensionProperties(nullptr, dldi)) { Test(extension, khr_uniform_buffer_standard_layout, VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME, true); - Test(extension, ext_index_type_uint8, VK_EXT_INDEX_TYPE_UINT8_EXTENSION_NAME, true); - Test(extension, khr_driver_properties, VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME, true); Test(extension, khr_shader_float16_int8, VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME, false); + Test(extension, ext_depth_range_unrestricted, + VK_EXT_DEPTH_RANGE_UNRESTRICTED_EXTENSION_NAME, true); + Test(extension, ext_index_type_uint8, VK_EXT_INDEX_TYPE_UINT8_EXTENSION_NAME, true); + Test(extension, ext_shader_viewport_index_layer, + VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME, true); + Test(extension, ext_subgroup_size_control, VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME, + false); + Test(extension, nv_device_diagnostic_checkpoints, + VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_EXTENSION_NAME, true); } if (khr_shader_float16_int8) { @@ -332,6 +410,23 @@ std::vector<const char*> VKDevice::LoadExtensions(const vk::DispatchLoaderDynami extensions.push_back(VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME); } + if (ext_subgroup_size_control) { + const auto features = + GetFeatures<vk::PhysicalDeviceSubgroupSizeControlFeaturesEXT>(physical, dldi); + const auto properties = + GetProperties<vk::PhysicalDeviceSubgroupSizeControlPropertiesEXT>(physical, dldi); + + is_warp_potentially_bigger = properties.maxSubgroupSize > GuestWarpSize; + + if (features.subgroupSizeControl && properties.minSubgroupSize <= GuestWarpSize && + properties.maxSubgroupSize >= GuestWarpSize) { + extensions.push_back(VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME); + guest_warp_stages = properties.requiredSubgroupSizeStages; + } + } else { + is_warp_potentially_bigger = true; + } + return extensions; } @@ -358,19 +453,23 @@ void VKDevice::SetupFamilies(const vk::DispatchLoaderDynamic& dldi, vk::SurfaceK present_family = *present_family_; } -void VKDevice::SetupProperties(const vk::DispatchLoaderDynamic& dldi) { - const auto props = physical.getProperties(dldi); - device_type = props.deviceType; - uniform_buffer_alignment = static_cast<u64>(props.limits.minUniformBufferOffsetAlignment); - storage_buffer_alignment = static_cast<u64>(props.limits.minStorageBufferOffsetAlignment); - max_storage_buffer_range = static_cast<u64>(props.limits.maxStorageBufferRange); -} - void VKDevice::SetupFeatures(const vk::DispatchLoaderDynamic& dldi) { const auto supported_features{physical.getFeatures(dldi)}; is_optimal_astc_supported = IsOptimalAstcSupported(supported_features, dldi); } +void VKDevice::CollectTelemetryParameters() { + const auto driver = GetProperties<vk::PhysicalDeviceDriverPropertiesKHR>(physical, dld); + driver_id = driver.driverID; + vendor_name = driver.driverName; + + const auto extensions = physical.enumerateDeviceExtensionProperties(nullptr, dld); + reported_extensions.reserve(std::size(extensions)); + for (const auto& extension : extensions) { + reported_extensions.push_back(extension.extensionName); + } +} + std::vector<vk::DeviceQueueCreateInfo> VKDevice::GetDeviceQueueCreateInfos() const { static const float QUEUE_PRIORITY = 1.0f; @@ -385,50 +484,71 @@ std::vector<vk::DeviceQueueCreateInfo> VKDevice::GetDeviceQueueCreateInfos() con std::unordered_map<vk::Format, vk::FormatProperties> VKDevice::GetFormatProperties( const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical) { - constexpr std::array formats{vk::Format::eA8B8G8R8UnormPack32, - vk::Format::eA8B8G8R8SnormPack32, - vk::Format::eA8B8G8R8SrgbPack32, - vk::Format::eB5G6R5UnormPack16, - vk::Format::eA2B10G10R10UnormPack32, - vk::Format::eR32G32B32A32Sfloat, - vk::Format::eR16G16B16A16Uint, - vk::Format::eR16G16Unorm, - vk::Format::eR16G16Snorm, - vk::Format::eR16G16Sfloat, - vk::Format::eR16Unorm, - vk::Format::eR8G8B8A8Srgb, - vk::Format::eR8G8Unorm, - vk::Format::eR8G8Snorm, - vk::Format::eR8Unorm, - vk::Format::eB10G11R11UfloatPack32, - vk::Format::eR32Sfloat, - vk::Format::eR16Sfloat, - vk::Format::eR16G16B16A16Sfloat, - vk::Format::eB8G8R8A8Unorm, - vk::Format::eD32Sfloat, - vk::Format::eD16Unorm, - vk::Format::eD16UnormS8Uint, - vk::Format::eD24UnormS8Uint, - vk::Format::eD32SfloatS8Uint, - vk::Format::eBc1RgbaUnormBlock, - vk::Format::eBc2UnormBlock, - vk::Format::eBc3UnormBlock, - vk::Format::eBc4UnormBlock, - vk::Format::eBc5UnormBlock, - vk::Format::eBc5SnormBlock, - vk::Format::eBc7UnormBlock, - vk::Format::eBc1RgbaSrgbBlock, - vk::Format::eBc3SrgbBlock, - vk::Format::eBc7SrgbBlock, - vk::Format::eAstc4x4UnormBlock, - vk::Format::eAstc4x4SrgbBlock, - vk::Format::eAstc8x8SrgbBlock, - vk::Format::eAstc8x6SrgbBlock, - vk::Format::eAstc5x4SrgbBlock, - vk::Format::eAstc5x5UnormBlock, - vk::Format::eAstc5x5SrgbBlock, - vk::Format::eAstc10x8UnormBlock, - vk::Format::eAstc10x8SrgbBlock}; + static constexpr std::array formats{vk::Format::eA8B8G8R8UnormPack32, + vk::Format::eA8B8G8R8UintPack32, + vk::Format::eA8B8G8R8SnormPack32, + vk::Format::eA8B8G8R8SrgbPack32, + vk::Format::eB5G6R5UnormPack16, + vk::Format::eA2B10G10R10UnormPack32, + vk::Format::eA1R5G5B5UnormPack16, + vk::Format::eR32G32B32A32Sfloat, + vk::Format::eR32G32B32A32Uint, + vk::Format::eR32G32Sfloat, + vk::Format::eR32G32Uint, + vk::Format::eR16G16B16A16Uint, + vk::Format::eR16G16B16A16Unorm, + vk::Format::eR16G16Unorm, + vk::Format::eR16G16Snorm, + vk::Format::eR16G16Sfloat, + vk::Format::eR16Unorm, + vk::Format::eR8G8B8A8Srgb, + vk::Format::eR8G8Unorm, + vk::Format::eR8G8Snorm, + vk::Format::eR8Unorm, + vk::Format::eR8Uint, + vk::Format::eB10G11R11UfloatPack32, + vk::Format::eR32Sfloat, + vk::Format::eR32Uint, + vk::Format::eR16Sfloat, + vk::Format::eR16G16B16A16Sfloat, + vk::Format::eB8G8R8A8Unorm, + vk::Format::eR4G4B4A4UnormPack16, + vk::Format::eD32Sfloat, + vk::Format::eD16Unorm, + vk::Format::eD16UnormS8Uint, + vk::Format::eD24UnormS8Uint, + vk::Format::eD32SfloatS8Uint, + vk::Format::eBc1RgbaUnormBlock, + vk::Format::eBc2UnormBlock, + vk::Format::eBc3UnormBlock, + vk::Format::eBc4UnormBlock, + vk::Format::eBc5UnormBlock, + vk::Format::eBc5SnormBlock, + vk::Format::eBc7UnormBlock, + vk::Format::eBc6HUfloatBlock, + vk::Format::eBc6HSfloatBlock, + vk::Format::eBc1RgbaSrgbBlock, + vk::Format::eBc3SrgbBlock, + vk::Format::eBc7SrgbBlock, + vk::Format::eAstc4x4SrgbBlock, + vk::Format::eAstc8x8SrgbBlock, + vk::Format::eAstc8x5SrgbBlock, + vk::Format::eAstc5x4SrgbBlock, + vk::Format::eAstc5x5UnormBlock, + vk::Format::eAstc5x5SrgbBlock, + vk::Format::eAstc10x8UnormBlock, + vk::Format::eAstc10x8SrgbBlock, + vk::Format::eAstc6x6UnormBlock, + vk::Format::eAstc6x6SrgbBlock, + vk::Format::eAstc10x10UnormBlock, + vk::Format::eAstc10x10SrgbBlock, + vk::Format::eAstc12x12UnormBlock, + vk::Format::eAstc12x12SrgbBlock, + vk::Format::eAstc8x6UnormBlock, + vk::Format::eAstc8x6SrgbBlock, + vk::Format::eAstc6x5UnormBlock, + vk::Format::eAstc6x5SrgbBlock, + vk::Format::eE5B9G9R9UfloatPack32}; std::unordered_map<vk::Format, vk::FormatProperties> format_properties; for (const auto format : formats) { format_properties.emplace(format, physical.getFormatProperties(format, dldi)); diff --git a/src/video_core/renderer_vulkan/vk_device.h b/src/video_core/renderer_vulkan/vk_device.h index 010d4c3d6..72603f9f6 100644 --- a/src/video_core/renderer_vulkan/vk_device.h +++ b/src/video_core/renderer_vulkan/vk_device.h @@ -4,6 +4,8 @@ #pragma once +#include <string> +#include <string_view> #include <unordered_map> #include <vector> #include "common/common_types.h" @@ -14,6 +16,9 @@ namespace Vulkan { /// Format usage descriptor. enum class FormatType { Linear, Optimal, Buffer }; +/// Subgroup size of the guest emulated hardware (Nvidia has 32 threads per subgroup). +const u32 GuestWarpSize = 32; + /// Handles data specific to a physical device. class VKDevice final { public: @@ -34,6 +39,9 @@ public: vk::Format GetSupportedFormat(vk::Format wanted_format, vk::FormatFeatureFlags wanted_usage, FormatType format_type) const; + /// Reports a device loss. + void ReportLoss() const; + /// Returns the dispatch loader with direct function pointers of the device. const vk::DispatchLoaderDynamic& GetDispatchLoader() const { return dld; @@ -71,7 +79,22 @@ public: /// Returns true if the device is integrated with the host CPU. bool IsIntegrated() const { - return device_type == vk::PhysicalDeviceType::eIntegratedGpu; + return properties.deviceType == vk::PhysicalDeviceType::eIntegratedGpu; + } + + /// Returns the current Vulkan API version provided in Vulkan-formatted version numbers. + u32 GetApiVersion() const { + return properties.apiVersion; + } + + /// Returns the current driver version provided in Vulkan-formatted version numbers. + u32 GetDriverVersion() const { + return properties.driverVersion; + } + + /// Returns the device name. + std::string_view GetModelName() const { + return properties.deviceName; } /// Returns the driver ID. @@ -80,18 +103,23 @@ public: } /// Returns uniform buffer alignment requeriment. - u64 GetUniformBufferAlignment() const { - return uniform_buffer_alignment; + vk::DeviceSize GetUniformBufferAlignment() const { + return properties.limits.minUniformBufferOffsetAlignment; } /// Returns storage alignment requeriment. - u64 GetStorageBufferAlignment() const { - return storage_buffer_alignment; + vk::DeviceSize GetStorageBufferAlignment() const { + return properties.limits.minStorageBufferOffsetAlignment; } /// Returns the maximum range for storage buffers. - u64 GetMaxStorageBufferRange() const { - return max_storage_buffer_range; + vk::DeviceSize GetMaxStorageBufferRange() const { + return properties.limits.maxStorageBufferRange; + } + + /// Returns the maximum size for push constants. + vk::DeviceSize GetMaxPushConstantsSize() const { + return properties.limits.maxPushConstantsSize; } /// Returns true if ASTC is natively supported. @@ -104,6 +132,16 @@ public: return is_float16_supported; } + /// Returns true if the device warp size can potentially be bigger than guest's warp size. + bool IsWarpSizePotentiallyBiggerThanGuest() const { + return is_warp_potentially_bigger; + } + + /// Returns true if the device can be forced to use the guest warp size. + bool IsGuestWarpSizeSupported(vk::ShaderStageFlagBits stage) const { + return (guest_warp_stages & stage) != vk::ShaderStageFlags{}; + } + /// Returns true if the device supports VK_EXT_scalar_block_layout. bool IsKhrUniformBufferStandardLayoutSupported() const { return khr_uniform_buffer_standard_layout; @@ -114,6 +152,31 @@ public: return ext_index_type_uint8; } + /// Returns true if the device supports VK_EXT_depth_range_unrestricted. + bool IsExtDepthRangeUnrestrictedSupported() const { + return ext_depth_range_unrestricted; + } + + /// Returns true if the device supports VK_EXT_shader_viewport_index_layer. + bool IsExtShaderViewportIndexLayerSupported() const { + return ext_shader_viewport_index_layer; + } + + /// Returns true if the device supports VK_NV_device_diagnostic_checkpoints. + bool IsNvDeviceDiagnosticCheckpoints() const { + return nv_device_diagnostic_checkpoints; + } + + /// Returns the vendor name reported from Vulkan. + std::string_view GetVendorName() const { + return vendor_name; + } + + /// Returns the list of available extensions. + const std::vector<std::string>& GetAvailableExtensions() const { + return reported_extensions; + } + /// Checks if the physical device is suitable. static bool IsSuitable(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical, vk::SurfaceKHR surface); @@ -125,12 +188,12 @@ private: /// Sets up queue families. void SetupFamilies(const vk::DispatchLoaderDynamic& dldi, vk::SurfaceKHR surface); - /// Sets up device properties. - void SetupProperties(const vk::DispatchLoaderDynamic& dldi); - /// Sets up device features. void SetupFeatures(const vk::DispatchLoaderDynamic& dldi); + /// Collects telemetry information from the device. + void CollectTelemetryParameters(); + /// Returns a list of queue initialization descriptors. std::vector<vk::DeviceQueueCreateInfo> GetDeviceQueueCreateInfos() const; @@ -148,23 +211,29 @@ private: const vk::PhysicalDevice physical; ///< Physical device. vk::DispatchLoaderDynamic dld; ///< Device function pointers. + vk::PhysicalDeviceProperties properties; ///< Device properties. UniqueDevice logical; ///< Logical device. vk::Queue graphics_queue; ///< Main graphics queue. vk::Queue present_queue; ///< Main present queue. u32 graphics_family{}; ///< Main graphics queue family index. u32 present_family{}; ///< Main present queue family index. - vk::PhysicalDeviceType device_type; ///< Physical device type. vk::DriverIdKHR driver_id{}; ///< Driver ID. - u64 uniform_buffer_alignment{}; ///< Uniform buffer alignment requeriment. - u64 storage_buffer_alignment{}; ///< Storage buffer alignment requeriment. - u64 max_storage_buffer_range{}; ///< Max storage buffer size. + vk::ShaderStageFlags guest_warp_stages{}; ///< Stages where the guest warp size can be forced. bool is_optimal_astc_supported{}; ///< Support for native ASTC. bool is_float16_supported{}; ///< Support for float16 arithmetics. + bool is_warp_potentially_bigger{}; ///< Host warp size can be bigger than guest. bool khr_uniform_buffer_standard_layout{}; ///< Support for std430 on UBOs. bool ext_index_type_uint8{}; ///< Support for VK_EXT_index_type_uint8. - bool khr_driver_properties{}; ///< Support for VK_KHR_driver_properties. - std::unordered_map<vk::Format, vk::FormatProperties> - format_properties; ///< Format properties dictionary. + bool ext_depth_range_unrestricted{}; ///< Support for VK_EXT_depth_range_unrestricted. + bool ext_shader_viewport_index_layer{}; ///< Support for VK_EXT_shader_viewport_index_layer. + bool nv_device_diagnostic_checkpoints{}; ///< Support for VK_NV_device_diagnostic_checkpoints. + + // Telemetry parameters + std::string vendor_name; ///< Device's driver name. + std::vector<std::string> reported_extensions; ///< Reported Vulkan extensions. + + /// Format properties dictionary. + std::unordered_map<vk::Format, vk::FormatProperties> format_properties; }; } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_image.cpp b/src/video_core/renderer_vulkan/vk_image.cpp new file mode 100644 index 000000000..4bcbef959 --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_image.cpp @@ -0,0 +1,106 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <memory> +#include <vector> + +#include "common/assert.h" +#include "video_core/renderer_vulkan/declarations.h" +#include "video_core/renderer_vulkan/vk_device.h" +#include "video_core/renderer_vulkan/vk_image.h" +#include "video_core/renderer_vulkan/vk_scheduler.h" + +namespace Vulkan { + +VKImage::VKImage(const VKDevice& device, VKScheduler& scheduler, + const vk::ImageCreateInfo& image_ci, vk::ImageAspectFlags aspect_mask) + : device{device}, scheduler{scheduler}, format{image_ci.format}, aspect_mask{aspect_mask}, + image_num_layers{image_ci.arrayLayers}, image_num_levels{image_ci.mipLevels} { + UNIMPLEMENTED_IF_MSG(image_ci.queueFamilyIndexCount != 0, + "Queue family tracking is not implemented"); + + const auto dev = device.GetLogical(); + image = dev.createImageUnique(image_ci, nullptr, device.GetDispatchLoader()); + + const u32 num_ranges = image_num_layers * image_num_levels; + barriers.resize(num_ranges); + subrange_states.resize(num_ranges, {{}, image_ci.initialLayout}); +} + +VKImage::~VKImage() = default; + +void VKImage::Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels, + vk::PipelineStageFlags new_stage_mask, vk::AccessFlags new_access, + vk::ImageLayout new_layout) { + if (!HasChanged(base_layer, num_layers, base_level, num_levels, new_access, new_layout)) { + return; + } + + std::size_t cursor = 0; + for (u32 layer_it = 0; layer_it < num_layers; ++layer_it) { + for (u32 level_it = 0; level_it < num_levels; ++level_it, ++cursor) { + const u32 layer = base_layer + layer_it; + const u32 level = base_level + level_it; + auto& state = GetSubrangeState(layer, level); + barriers[cursor] = vk::ImageMemoryBarrier( + state.access, new_access, state.layout, new_layout, VK_QUEUE_FAMILY_IGNORED, + VK_QUEUE_FAMILY_IGNORED, *image, {aspect_mask, level, 1, layer, 1}); + state.access = new_access; + state.layout = new_layout; + } + } + + scheduler.RequestOutsideRenderPassOperationContext(); + + scheduler.Record([barriers = barriers, cursor](auto cmdbuf, auto& dld) { + // TODO(Rodrigo): Implement a way to use the latest stage across subresources. + constexpr auto stage_stub = vk::PipelineStageFlagBits::eAllCommands; + cmdbuf.pipelineBarrier(stage_stub, stage_stub, {}, 0, nullptr, 0, nullptr, + static_cast<u32>(cursor), barriers.data(), dld); + }); +} + +bool VKImage::HasChanged(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels, + vk::AccessFlags new_access, vk::ImageLayout new_layout) noexcept { + const bool is_full_range = base_layer == 0 && num_layers == image_num_layers && + base_level == 0 && num_levels == image_num_levels; + if (!is_full_range) { + state_diverged = true; + } + + if (!state_diverged) { + auto& state = GetSubrangeState(0, 0); + if (state.access != new_access || state.layout != new_layout) { + return true; + } + } + + for (u32 layer_it = 0; layer_it < num_layers; ++layer_it) { + for (u32 level_it = 0; level_it < num_levels; ++level_it) { + const u32 layer = base_layer + layer_it; + const u32 level = base_level + level_it; + auto& state = GetSubrangeState(layer, level); + if (state.access != new_access || state.layout != new_layout) { + return true; + } + } + } + return false; +} + +void VKImage::CreatePresentView() { + // Image type has to be 2D to be presented. + const vk::ImageViewCreateInfo image_view_ci({}, *image, vk::ImageViewType::e2D, format, {}, + {aspect_mask, 0, 1, 0, 1}); + const auto dev = device.GetLogical(); + const auto& dld = device.GetDispatchLoader(); + present_view = dev.createImageViewUnique(image_view_ci, nullptr, dld); +} + +VKImage::SubrangeState& VKImage::GetSubrangeState(u32 layer, u32 level) noexcept { + return subrange_states[static_cast<std::size_t>(layer * image_num_levels) + + static_cast<std::size_t>(level)]; +} + +} // namespace Vulkan
\ No newline at end of file diff --git a/src/video_core/renderer_vulkan/vk_image.h b/src/video_core/renderer_vulkan/vk_image.h new file mode 100644 index 000000000..b78242512 --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_image.h @@ -0,0 +1,84 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <memory> +#include <vector> + +#include "common/common_types.h" +#include "video_core/renderer_vulkan/declarations.h" + +namespace Vulkan { + +class VKDevice; +class VKScheduler; + +class VKImage { +public: + explicit VKImage(const VKDevice& device, VKScheduler& scheduler, + const vk::ImageCreateInfo& image_ci, vk::ImageAspectFlags aspect_mask); + ~VKImage(); + + /// Records in the passed command buffer an image transition and updates the state of the image. + void Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels, + vk::PipelineStageFlags new_stage_mask, vk::AccessFlags new_access, + vk::ImageLayout new_layout); + + /// Returns a view compatible with presentation, the image has to be 2D. + vk::ImageView GetPresentView() { + if (!present_view) { + CreatePresentView(); + } + return *present_view; + } + + /// Returns the Vulkan image handler. + vk::Image GetHandle() const { + return *image; + } + + /// Returns the Vulkan format for this image. + vk::Format GetFormat() const { + return format; + } + + /// Returns the Vulkan aspect mask. + vk::ImageAspectFlags GetAspectMask() const { + return aspect_mask; + } + +private: + struct SubrangeState final { + vk::AccessFlags access{}; ///< Current access bits. + vk::ImageLayout layout = vk::ImageLayout::eUndefined; ///< Current image layout. + }; + + bool HasChanged(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels, + vk::AccessFlags new_access, vk::ImageLayout new_layout) noexcept; + + /// Creates a presentation view. + void CreatePresentView(); + + /// Returns the subrange state for a layer and layer. + SubrangeState& GetSubrangeState(u32 layer, u32 level) noexcept; + + const VKDevice& device; ///< Device handler. + VKScheduler& scheduler; ///< Device scheduler. + + const vk::Format format; ///< Vulkan format. + const vk::ImageAspectFlags aspect_mask; ///< Vulkan aspect mask. + const u32 image_num_layers; ///< Number of layers. + const u32 image_num_levels; ///< Number of mipmap levels. + + UniqueImage image; ///< Image handle. + UniqueImageView present_view; ///< Image view compatible with presentation. + + std::vector<vk::ImageMemoryBarrier> barriers; ///< Pool of barriers. + std::vector<SubrangeState> subrange_states; ///< Current subrange state. + + bool state_diverged = false; ///< True when subresources mismatch in layout. +}; + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_resource_manager.cpp b/src/video_core/renderer_vulkan/vk_resource_manager.cpp index 13c46e5b8..525b4bb46 100644 --- a/src/video_core/renderer_vulkan/vk_resource_manager.cpp +++ b/src/video_core/renderer_vulkan/vk_resource_manager.cpp @@ -72,12 +72,22 @@ VKFence::VKFence(const VKDevice& device, UniqueFence handle) VKFence::~VKFence() = default; void VKFence::Wait() { + static constexpr u64 timeout = std::numeric_limits<u64>::max(); const auto dev = device.GetLogical(); const auto& dld = device.GetDispatchLoader(); - dev.waitForFences({*handle}, true, std::numeric_limits<u64>::max(), dld); + switch (const auto result = dev.waitForFences(1, &*handle, true, timeout, dld)) { + case vk::Result::eSuccess: + return; + case vk::Result::eErrorDeviceLost: + device.ReportLoss(); + [[fallthrough]]; + default: + vk::throwResultException(result, "vk::waitForFences"); + } } void VKFence::Release() { + ASSERT(is_owned); is_owned = false; } @@ -133,8 +143,32 @@ void VKFence::Unprotect(VKResource* resource) { protected_resources.erase(it); } +void VKFence::RedirectProtection(VKResource* old_resource, VKResource* new_resource) noexcept { + std::replace(std::begin(protected_resources), std::end(protected_resources), old_resource, + new_resource); +} + VKFenceWatch::VKFenceWatch() = default; +VKFenceWatch::VKFenceWatch(VKFence& initial_fence) { + Watch(initial_fence); +} + +VKFenceWatch::VKFenceWatch(VKFenceWatch&& rhs) noexcept { + fence = std::exchange(rhs.fence, nullptr); + if (fence) { + fence->RedirectProtection(&rhs, this); + } +} + +VKFenceWatch& VKFenceWatch::operator=(VKFenceWatch&& rhs) noexcept { + fence = std::exchange(rhs.fence, nullptr); + if (fence) { + fence->RedirectProtection(&rhs, this); + } + return *this; +} + VKFenceWatch::~VKFenceWatch() { if (fence) { fence->Unprotect(this); diff --git a/src/video_core/renderer_vulkan/vk_resource_manager.h b/src/video_core/renderer_vulkan/vk_resource_manager.h index 08ee86fa6..d4cbc95a5 100644 --- a/src/video_core/renderer_vulkan/vk_resource_manager.h +++ b/src/video_core/renderer_vulkan/vk_resource_manager.h @@ -65,6 +65,9 @@ public: /// Removes protection for a resource. void Unprotect(VKResource* resource); + /// Redirects one protected resource to a new address. + void RedirectProtection(VKResource* old_resource, VKResource* new_resource) noexcept; + /// Retreives the fence. operator vk::Fence() const { return *handle; @@ -97,8 +100,13 @@ private: class VKFenceWatch final : public VKResource { public: explicit VKFenceWatch(); + VKFenceWatch(VKFence& initial_fence); + VKFenceWatch(VKFenceWatch&&) noexcept; + VKFenceWatch(const VKFenceWatch&) = delete; ~VKFenceWatch() override; + VKFenceWatch& operator=(VKFenceWatch&&) noexcept; + /// Waits for the fence to be released. void Wait(); @@ -116,6 +124,14 @@ public: void OnFenceRemoval(VKFence* signaling_fence) override; + /** + * Do not use it paired with Watch. Use TryWatch instead. + * Returns true when the watch is free. + */ + bool IsUsed() const { + return fence != nullptr; + } + private: VKFence* fence{}; ///< Fence watching this resource. nullptr when the watch is free. }; diff --git a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp index 801826d3d..1ce583f75 100644 --- a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp @@ -46,9 +46,10 @@ UniqueSampler VKSamplerCache::CreateSampler(const Tegra::Texture::TSCEntry& tsc) {}, MaxwellToVK::Sampler::Filter(tsc.mag_filter), MaxwellToVK::Sampler::Filter(tsc.min_filter), MaxwellToVK::Sampler::MipmapMode(tsc.mipmap_filter), - MaxwellToVK::Sampler::WrapMode(tsc.wrap_u), MaxwellToVK::Sampler::WrapMode(tsc.wrap_v), - MaxwellToVK::Sampler::WrapMode(tsc.wrap_p), tsc.GetLodBias(), has_anisotropy, - max_anisotropy, tsc.depth_compare_enabled, + MaxwellToVK::Sampler::WrapMode(tsc.wrap_u, tsc.mag_filter), + MaxwellToVK::Sampler::WrapMode(tsc.wrap_v, tsc.mag_filter), + MaxwellToVK::Sampler::WrapMode(tsc.wrap_p, tsc.mag_filter), tsc.GetLodBias(), + has_anisotropy, max_anisotropy, tsc.depth_compare_enabled, MaxwellToVK::Sampler::DepthCompareFunction(tsc.depth_compare_func), tsc.GetMinLod(), tsc.GetMaxLod(), vk_border_color.value_or(vk::BorderColor::eFloatTransparentBlack), unnormalized_coords); diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp index 0f8116458..d66133ad1 100644 --- a/src/video_core/renderer_vulkan/vk_scheduler.cpp +++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp @@ -3,7 +3,7 @@ // Refer to the license.txt file included. #include "common/assert.h" -#include "common/logging/log.h" +#include "common/microprofile.h" #include "video_core/renderer_vulkan/declarations.h" #include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_resource_manager.h" @@ -11,46 +11,172 @@ namespace Vulkan { +MICROPROFILE_DECLARE(Vulkan_WaitForWorker); + +void VKScheduler::CommandChunk::ExecuteAll(vk::CommandBuffer cmdbuf, + const vk::DispatchLoaderDynamic& dld) { + auto command = first; + while (command != nullptr) { + auto next = command->GetNext(); + command->Execute(cmdbuf, dld); + command->~Command(); + command = next; + } + + command_offset = 0; + first = nullptr; + last = nullptr; +} + VKScheduler::VKScheduler(const VKDevice& device, VKResourceManager& resource_manager) - : device{device}, resource_manager{resource_manager} { - next_fence = &resource_manager.CommitFence(); + : device{device}, resource_manager{resource_manager}, next_fence{ + &resource_manager.CommitFence()} { + AcquireNewChunk(); AllocateNewContext(); + worker_thread = std::thread(&VKScheduler::WorkerThread, this); } -VKScheduler::~VKScheduler() = default; +VKScheduler::~VKScheduler() { + quit = true; + cv.notify_all(); + worker_thread.join(); +} void VKScheduler::Flush(bool release_fence, vk::Semaphore semaphore) { SubmitExecution(semaphore); - if (release_fence) + if (release_fence) { current_fence->Release(); + } AllocateNewContext(); } void VKScheduler::Finish(bool release_fence, vk::Semaphore semaphore) { SubmitExecution(semaphore); current_fence->Wait(); - if (release_fence) + if (release_fence) { current_fence->Release(); + } AllocateNewContext(); } +void VKScheduler::WaitWorker() { + MICROPROFILE_SCOPE(Vulkan_WaitForWorker); + DispatchWork(); + + bool finished = false; + do { + cv.notify_all(); + std::unique_lock lock{mutex}; + finished = chunk_queue.Empty(); + } while (!finished); +} + +void VKScheduler::DispatchWork() { + if (chunk->Empty()) { + return; + } + chunk_queue.Push(std::move(chunk)); + cv.notify_all(); + AcquireNewChunk(); +} + +void VKScheduler::RequestRenderpass(const vk::RenderPassBeginInfo& renderpass_bi) { + if (state.renderpass && renderpass_bi == *state.renderpass) { + return; + } + const bool end_renderpass = state.renderpass.has_value(); + state.renderpass = renderpass_bi; + Record([renderpass_bi, end_renderpass](auto cmdbuf, auto& dld) { + if (end_renderpass) { + cmdbuf.endRenderPass(dld); + } + cmdbuf.beginRenderPass(renderpass_bi, vk::SubpassContents::eInline, dld); + }); +} + +void VKScheduler::RequestOutsideRenderPassOperationContext() { + EndRenderPass(); +} + +void VKScheduler::BindGraphicsPipeline(vk::Pipeline pipeline) { + if (state.graphics_pipeline == pipeline) { + return; + } + state.graphics_pipeline = pipeline; + Record([pipeline](auto cmdbuf, auto& dld) { + cmdbuf.bindPipeline(vk::PipelineBindPoint::eGraphics, pipeline, dld); + }); +} + +void VKScheduler::WorkerThread() { + std::unique_lock lock{mutex}; + do { + cv.wait(lock, [this] { return !chunk_queue.Empty() || quit; }); + if (quit) { + continue; + } + auto extracted_chunk = std::move(chunk_queue.Front()); + chunk_queue.Pop(); + extracted_chunk->ExecuteAll(current_cmdbuf, device.GetDispatchLoader()); + chunk_reserve.Push(std::move(extracted_chunk)); + } while (!quit); +} + void VKScheduler::SubmitExecution(vk::Semaphore semaphore) { + EndPendingOperations(); + InvalidateState(); + WaitWorker(); + + std::unique_lock lock{mutex}; + + const auto queue = device.GetGraphicsQueue(); const auto& dld = device.GetDispatchLoader(); current_cmdbuf.end(dld); - const auto queue = device.GetGraphicsQueue(); - const vk::SubmitInfo submit_info(0, nullptr, nullptr, 1, ¤t_cmdbuf, semaphore ? 1u : 0u, + const vk::SubmitInfo submit_info(0, nullptr, nullptr, 1, ¤t_cmdbuf, semaphore ? 1U : 0U, &semaphore); - queue.submit({submit_info}, *current_fence, dld); + queue.submit({submit_info}, static_cast<vk::Fence>(*current_fence), dld); } void VKScheduler::AllocateNewContext() { + std::unique_lock lock{mutex}; current_fence = next_fence; - current_cmdbuf = resource_manager.CommitCommandBuffer(*current_fence); next_fence = &resource_manager.CommitFence(); - const auto& dld = device.GetDispatchLoader(); - current_cmdbuf.begin({vk::CommandBufferUsageFlagBits::eOneTimeSubmit}, dld); + current_cmdbuf = resource_manager.CommitCommandBuffer(*current_fence); + current_cmdbuf.begin({vk::CommandBufferUsageFlagBits::eOneTimeSubmit}, + device.GetDispatchLoader()); +} + +void VKScheduler::InvalidateState() { + state.graphics_pipeline = nullptr; + state.viewports = false; + state.scissors = false; + state.depth_bias = false; + state.blend_constants = false; + state.depth_bounds = false; + state.stencil_values = false; +} + +void VKScheduler::EndPendingOperations() { + EndRenderPass(); +} + +void VKScheduler::EndRenderPass() { + if (!state.renderpass) { + return; + } + state.renderpass = std::nullopt; + Record([](auto cmdbuf, auto& dld) { cmdbuf.endRenderPass(dld); }); +} + +void VKScheduler::AcquireNewChunk() { + if (chunk_reserve.Empty()) { + chunk = std::make_unique<CommandChunk>(); + return; + } + chunk = std::move(chunk_reserve.Front()); + chunk_reserve.Pop(); } } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_scheduler.h b/src/video_core/renderer_vulkan/vk_scheduler.h index 0e5b49c7f..bcdffbba0 100644 --- a/src/video_core/renderer_vulkan/vk_scheduler.h +++ b/src/video_core/renderer_vulkan/vk_scheduler.h @@ -4,7 +4,14 @@ #pragma once +#include <condition_variable> +#include <memory> +#include <optional> +#include <stack> +#include <thread> +#include <utility> #include "common/common_types.h" +#include "common/threadsafe_queue.h" #include "video_core/renderer_vulkan/declarations.h" namespace Vulkan { @@ -30,56 +37,197 @@ private: VKFence* const& fence; }; -class VKCommandBufferView { +/// The scheduler abstracts command buffer and fence management with an interface that's able to do +/// OpenGL-like operations on Vulkan command buffers. +class VKScheduler { public: - VKCommandBufferView() = default; - VKCommandBufferView(const vk::CommandBuffer& cmdbuf) : cmdbuf{cmdbuf} {} + explicit VKScheduler(const VKDevice& device, VKResourceManager& resource_manager); + ~VKScheduler(); + + /// Sends the current execution context to the GPU. + void Flush(bool release_fence = true, vk::Semaphore semaphore = nullptr); + + /// Sends the current execution context to the GPU and waits for it to complete. + void Finish(bool release_fence = true, vk::Semaphore semaphore = nullptr); + + /// Waits for the worker thread to finish executing everything. After this function returns it's + /// safe to touch worker resources. + void WaitWorker(); + + /// Sends currently recorded work to the worker thread. + void DispatchWork(); + + /// Requests to begin a renderpass. + void RequestRenderpass(const vk::RenderPassBeginInfo& renderpass_bi); + + /// Requests the current executino context to be able to execute operations only allowed outside + /// of a renderpass. + void RequestOutsideRenderPassOperationContext(); + + /// Binds a pipeline to the current execution context. + void BindGraphicsPipeline(vk::Pipeline pipeline); - const vk::CommandBuffer* operator->() const noexcept { - return &cmdbuf; + /// Returns true when viewports have been set in the current command buffer. + bool TouchViewports() { + return std::exchange(state.viewports, true); } - operator vk::CommandBuffer() const noexcept { - return cmdbuf; + /// Returns true when scissors have been set in the current command buffer. + bool TouchScissors() { + return std::exchange(state.scissors, true); } -private: - const vk::CommandBuffer& cmdbuf; -}; + /// Returns true when depth bias have been set in the current command buffer. + bool TouchDepthBias() { + return std::exchange(state.depth_bias, true); + } -/// The scheduler abstracts command buffer and fence management with an interface that's able to do -/// OpenGL-like operations on Vulkan command buffers. -class VKScheduler { -public: - explicit VKScheduler(const VKDevice& device, VKResourceManager& resource_manager); - ~VKScheduler(); + /// Returns true when blend constants have been set in the current command buffer. + bool TouchBlendConstants() { + return std::exchange(state.blend_constants, true); + } + + /// Returns true when depth bounds have been set in the current command buffer. + bool TouchDepthBounds() { + return std::exchange(state.depth_bounds, true); + } + + /// Returns true when stencil values have been set in the current command buffer. + bool TouchStencilValues() { + return std::exchange(state.stencil_values, true); + } + + /// Send work to a separate thread. + template <typename T> + void Record(T&& command) { + if (chunk->Record(command)) { + return; + } + DispatchWork(); + (void)chunk->Record(command); + } /// Gets a reference to the current fence. VKFenceView GetFence() const { return current_fence; } - /// Gets a reference to the current command buffer. - VKCommandBufferView GetCommandBuffer() const { - return current_cmdbuf; - } +private: + class Command { + public: + virtual ~Command() = default; - /// Sends the current execution context to the GPU. - void Flush(bool release_fence = true, vk::Semaphore semaphore = nullptr); + virtual void Execute(vk::CommandBuffer cmdbuf, + const vk::DispatchLoaderDynamic& dld) const = 0; - /// Sends the current execution context to the GPU and waits for it to complete. - void Finish(bool release_fence = true, vk::Semaphore semaphore = nullptr); + Command* GetNext() const { + return next; + } + + void SetNext(Command* next_) { + next = next_; + } + + private: + Command* next = nullptr; + }; + + template <typename T> + class TypedCommand final : public Command { + public: + explicit TypedCommand(T&& command) : command{std::move(command)} {} + ~TypedCommand() override = default; + + TypedCommand(TypedCommand&&) = delete; + TypedCommand& operator=(TypedCommand&&) = delete; + + void Execute(vk::CommandBuffer cmdbuf, + const vk::DispatchLoaderDynamic& dld) const override { + command(cmdbuf, dld); + } + + private: + T command; + }; + + class CommandChunk final { + public: + void ExecuteAll(vk::CommandBuffer cmdbuf, const vk::DispatchLoaderDynamic& dld); + + template <typename T> + bool Record(T& command) { + using FuncType = TypedCommand<T>; + static_assert(sizeof(FuncType) < sizeof(data), "Lambda is too large"); + + if (command_offset > sizeof(data) - sizeof(FuncType)) { + return false; + } + + Command* current_last = last; + + last = new (data.data() + command_offset) FuncType(std::move(command)); + + if (current_last) { + current_last->SetNext(last); + } else { + first = last; + } + + command_offset += sizeof(FuncType); + return true; + } + + bool Empty() const { + return command_offset == 0; + } + + private: + Command* first = nullptr; + Command* last = nullptr; + + std::size_t command_offset = 0; + std::array<u8, 0x8000> data{}; + }; + + void WorkerThread(); -private: void SubmitExecution(vk::Semaphore semaphore); void AllocateNewContext(); + void InvalidateState(); + + void EndPendingOperations(); + + void EndRenderPass(); + + void AcquireNewChunk(); + const VKDevice& device; VKResourceManager& resource_manager; vk::CommandBuffer current_cmdbuf; VKFence* current_fence = nullptr; VKFence* next_fence = nullptr; + + struct State { + std::optional<vk::RenderPassBeginInfo> renderpass; + vk::Pipeline graphics_pipeline; + bool viewports = false; + bool scissors = false; + bool depth_bias = false; + bool blend_constants = false; + bool depth_bounds = false; + bool stencil_values = false; + } state; + + std::unique_ptr<CommandChunk> chunk; + std::thread worker_thread; + + Common::SPSCQueue<std::unique_ptr<CommandChunk>> chunk_queue; + Common::SPSCQueue<std::unique_ptr<CommandChunk>> chunk_reserve; + std::mutex mutex; + std::condition_variable cv; + bool quit = false; }; } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp index 42cf068b6..8fe852ce8 100644 --- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp +++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp @@ -3,8 +3,10 @@ // Refer to the license.txt file included. #include <functional> +#include <limits> #include <map> -#include <set> +#include <type_traits> +#include <utility> #include <fmt/format.h> @@ -17,39 +19,80 @@ #include "video_core/engines/maxwell_3d.h" #include "video_core/engines/shader_bytecode.h" #include "video_core/engines/shader_header.h" +#include "video_core/engines/shader_type.h" #include "video_core/renderer_vulkan/vk_device.h" #include "video_core/renderer_vulkan/vk_shader_decompiler.h" #include "video_core/shader/node.h" #include "video_core/shader/shader_ir.h" -namespace Vulkan::VKShader { +namespace Vulkan { + +namespace { using Sirit::Id; +using Tegra::Engines::ShaderType; using Tegra::Shader::Attribute; using Tegra::Shader::AttributeUse; using Tegra::Shader::Register; using namespace VideoCommon::Shader; using Maxwell = Tegra::Engines::Maxwell3D::Regs; -using ShaderStage = Tegra::Engines::Maxwell3D::Regs::ShaderStage; using Operation = const OperationNode&; +class ASTDecompiler; +class ExprDecompiler; + // TODO(Rodrigo): Use rasterizer's value -constexpr u32 MAX_CONSTBUFFER_FLOATS = 0x4000; -constexpr u32 MAX_CONSTBUFFER_ELEMENTS = MAX_CONSTBUFFER_FLOATS / 4; -constexpr u32 STAGE_BINDING_STRIDE = 0x100; +constexpr u32 MaxConstBufferFloats = 0x4000; +constexpr u32 MaxConstBufferElements = MaxConstBufferFloats / 4; -enum class Type { Bool, Bool2, Float, Int, Uint, HalfFloat }; +constexpr u32 NumInputPatches = 32; // This value seems to be the standard + +enum class Type { Void, Bool, Bool2, Float, Int, Uint, HalfFloat }; + +class Expression final { +public: + Expression(Id id, Type type) : id{id}, type{type} { + ASSERT(type != Type::Void); + } + Expression() : type{Type::Void} {} -struct SamplerImage { - Id image_type; - Id sampled_image_type; - Id sampler; + Id id{}; + Type type{}; }; +static_assert(std::is_standard_layout_v<Expression>); -namespace { +struct TexelBuffer { + Id image_type{}; + Id image{}; +}; + +struct SampledImage { + Id image_type{}; + Id sampled_image_type{}; + Id sampler{}; +}; + +struct StorageImage { + Id image_type{}; + Id image{}; +}; + +struct AttributeType { + Type type; + Id scalar; + Id vector; +}; + +struct VertexIndices { + std::optional<u32> position; + std::optional<u32> viewport; + std::optional<u32> point_size; + std::optional<u32> clip_distances; +}; spv::Dim GetSamplerDim(const Sampler& sampler) { + ASSERT(!sampler.IsBuffer()); switch (sampler.GetType()) { case Tegra::Shader::TextureType::Texture1D: return spv::Dim::Dim1D; @@ -65,6 +108,138 @@ spv::Dim GetSamplerDim(const Sampler& sampler) { } } +std::pair<spv::Dim, bool> GetImageDim(const Image& image) { + switch (image.GetType()) { + case Tegra::Shader::ImageType::Texture1D: + return {spv::Dim::Dim1D, false}; + case Tegra::Shader::ImageType::TextureBuffer: + return {spv::Dim::Buffer, false}; + case Tegra::Shader::ImageType::Texture1DArray: + return {spv::Dim::Dim1D, true}; + case Tegra::Shader::ImageType::Texture2D: + return {spv::Dim::Dim2D, false}; + case Tegra::Shader::ImageType::Texture2DArray: + return {spv::Dim::Dim2D, true}; + case Tegra::Shader::ImageType::Texture3D: + return {spv::Dim::Dim3D, false}; + default: + UNIMPLEMENTED_MSG("Unimplemented image type={}", static_cast<u32>(image.GetType())); + return {spv::Dim::Dim2D, false}; + } +} + +/// Returns the number of vertices present in a primitive topology. +u32 GetNumPrimitiveTopologyVertices(Maxwell::PrimitiveTopology primitive_topology) { + switch (primitive_topology) { + case Maxwell::PrimitiveTopology::Points: + return 1; + case Maxwell::PrimitiveTopology::Lines: + case Maxwell::PrimitiveTopology::LineLoop: + case Maxwell::PrimitiveTopology::LineStrip: + return 2; + case Maxwell::PrimitiveTopology::Triangles: + case Maxwell::PrimitiveTopology::TriangleStrip: + case Maxwell::PrimitiveTopology::TriangleFan: + return 3; + case Maxwell::PrimitiveTopology::LinesAdjacency: + case Maxwell::PrimitiveTopology::LineStripAdjacency: + return 4; + case Maxwell::PrimitiveTopology::TrianglesAdjacency: + case Maxwell::PrimitiveTopology::TriangleStripAdjacency: + return 6; + case Maxwell::PrimitiveTopology::Quads: + UNIMPLEMENTED_MSG("Quads"); + return 3; + case Maxwell::PrimitiveTopology::QuadStrip: + UNIMPLEMENTED_MSG("QuadStrip"); + return 3; + case Maxwell::PrimitiveTopology::Polygon: + UNIMPLEMENTED_MSG("Polygon"); + return 3; + case Maxwell::PrimitiveTopology::Patches: + UNIMPLEMENTED_MSG("Patches"); + return 3; + default: + UNREACHABLE(); + return 3; + } +} + +spv::ExecutionMode GetExecutionMode(Maxwell::TessellationPrimitive primitive) { + switch (primitive) { + case Maxwell::TessellationPrimitive::Isolines: + return spv::ExecutionMode::Isolines; + case Maxwell::TessellationPrimitive::Triangles: + return spv::ExecutionMode::Triangles; + case Maxwell::TessellationPrimitive::Quads: + return spv::ExecutionMode::Quads; + } + UNREACHABLE(); + return spv::ExecutionMode::Triangles; +} + +spv::ExecutionMode GetExecutionMode(Maxwell::TessellationSpacing spacing) { + switch (spacing) { + case Maxwell::TessellationSpacing::Equal: + return spv::ExecutionMode::SpacingEqual; + case Maxwell::TessellationSpacing::FractionalOdd: + return spv::ExecutionMode::SpacingFractionalOdd; + case Maxwell::TessellationSpacing::FractionalEven: + return spv::ExecutionMode::SpacingFractionalEven; + } + UNREACHABLE(); + return spv::ExecutionMode::SpacingEqual; +} + +spv::ExecutionMode GetExecutionMode(Maxwell::PrimitiveTopology input_topology) { + switch (input_topology) { + case Maxwell::PrimitiveTopology::Points: + return spv::ExecutionMode::InputPoints; + case Maxwell::PrimitiveTopology::Lines: + case Maxwell::PrimitiveTopology::LineLoop: + case Maxwell::PrimitiveTopology::LineStrip: + return spv::ExecutionMode::InputLines; + case Maxwell::PrimitiveTopology::Triangles: + case Maxwell::PrimitiveTopology::TriangleStrip: + case Maxwell::PrimitiveTopology::TriangleFan: + return spv::ExecutionMode::Triangles; + case Maxwell::PrimitiveTopology::LinesAdjacency: + case Maxwell::PrimitiveTopology::LineStripAdjacency: + return spv::ExecutionMode::InputLinesAdjacency; + case Maxwell::PrimitiveTopology::TrianglesAdjacency: + case Maxwell::PrimitiveTopology::TriangleStripAdjacency: + return spv::ExecutionMode::InputTrianglesAdjacency; + case Maxwell::PrimitiveTopology::Quads: + UNIMPLEMENTED_MSG("Quads"); + return spv::ExecutionMode::Triangles; + case Maxwell::PrimitiveTopology::QuadStrip: + UNIMPLEMENTED_MSG("QuadStrip"); + return spv::ExecutionMode::Triangles; + case Maxwell::PrimitiveTopology::Polygon: + UNIMPLEMENTED_MSG("Polygon"); + return spv::ExecutionMode::Triangles; + case Maxwell::PrimitiveTopology::Patches: + UNIMPLEMENTED_MSG("Patches"); + return spv::ExecutionMode::Triangles; + } + UNREACHABLE(); + return spv::ExecutionMode::Triangles; +} + +spv::ExecutionMode GetExecutionMode(Tegra::Shader::OutputTopology output_topology) { + switch (output_topology) { + case Tegra::Shader::OutputTopology::PointList: + return spv::ExecutionMode::OutputPoints; + case Tegra::Shader::OutputTopology::LineStrip: + return spv::ExecutionMode::OutputLineStrip; + case Tegra::Shader::OutputTopology::TriangleStrip: + return spv::ExecutionMode::OutputTriangleStrip; + default: + UNREACHABLE(); + return spv::ExecutionMode::OutputPoints; + } +} + /// Returns true if an attribute index is one of the 32 generic attributes constexpr bool IsGenericAttribute(Attribute::Index attribute) { return attribute >= Attribute::Index::Attribute_0 && @@ -72,7 +247,7 @@ constexpr bool IsGenericAttribute(Attribute::Index attribute) { } /// Returns the location of a generic attribute -constexpr u32 GetGenericAttributeLocation(Attribute::Index attribute) { +u32 GetGenericAttributeLocation(Attribute::Index attribute) { ASSERT(IsGenericAttribute(attribute)); return static_cast<u32>(attribute) - static_cast<u32>(Attribute::Index::Attribute_0); } @@ -86,20 +261,146 @@ bool IsPrecise(Operation operand) { return false; } -} // namespace - -class ASTDecompiler; -class ExprDecompiler; - -class SPIRVDecompiler : public Sirit::Module { +class SPIRVDecompiler final : public Sirit::Module { public: - explicit SPIRVDecompiler(const VKDevice& device, const ShaderIR& ir, ShaderStage stage) - : Module(0x00010300), device{device}, ir{ir}, stage{stage}, header{ir.GetHeader()} { + explicit SPIRVDecompiler(const VKDevice& device, const ShaderIR& ir, ShaderType stage, + const Specialization& specialization) + : Module(0x00010300), device{device}, ir{ir}, stage{stage}, header{ir.GetHeader()}, + specialization{specialization} { AddCapability(spv::Capability::Shader); + AddCapability(spv::Capability::UniformAndStorageBuffer16BitAccess); + AddCapability(spv::Capability::ImageQuery); + AddCapability(spv::Capability::Image1D); + AddCapability(spv::Capability::ImageBuffer); + AddCapability(spv::Capability::ImageGatherExtended); + AddCapability(spv::Capability::SampledBuffer); + AddCapability(spv::Capability::StorageImageWriteWithoutFormat); + AddCapability(spv::Capability::SubgroupBallotKHR); + AddCapability(spv::Capability::SubgroupVoteKHR); + AddExtension("SPV_KHR_shader_ballot"); + AddExtension("SPV_KHR_subgroup_vote"); AddExtension("SPV_KHR_storage_buffer_storage_class"); AddExtension("SPV_KHR_variable_pointers"); + + if (ir.UsesViewportIndex()) { + AddCapability(spv::Capability::MultiViewport); + if (device.IsExtShaderViewportIndexLayerSupported()) { + AddExtension("SPV_EXT_shader_viewport_index_layer"); + AddCapability(spv::Capability::ShaderViewportIndexLayerEXT); + } + } + + if (device.IsFloat16Supported()) { + AddCapability(spv::Capability::Float16); + } + t_scalar_half = Name(TypeFloat(device.IsFloat16Supported() ? 16 : 32), "scalar_half"); + t_half = Name(TypeVector(t_scalar_half, 2), "half"); + + const Id main = Decompile(); + + switch (stage) { + case ShaderType::Vertex: + AddEntryPoint(spv::ExecutionModel::Vertex, main, "main", interfaces); + break; + case ShaderType::TesselationControl: + AddCapability(spv::Capability::Tessellation); + AddEntryPoint(spv::ExecutionModel::TessellationControl, main, "main", interfaces); + AddExecutionMode(main, spv::ExecutionMode::OutputVertices, + header.common2.threads_per_input_primitive); + break; + case ShaderType::TesselationEval: + AddCapability(spv::Capability::Tessellation); + AddEntryPoint(spv::ExecutionModel::TessellationEvaluation, main, "main", interfaces); + AddExecutionMode(main, GetExecutionMode(specialization.tessellation.primitive)); + AddExecutionMode(main, GetExecutionMode(specialization.tessellation.spacing)); + AddExecutionMode(main, specialization.tessellation.clockwise + ? spv::ExecutionMode::VertexOrderCw + : spv::ExecutionMode::VertexOrderCcw); + break; + case ShaderType::Geometry: + AddCapability(spv::Capability::Geometry); + AddEntryPoint(spv::ExecutionModel::Geometry, main, "main", interfaces); + AddExecutionMode(main, GetExecutionMode(specialization.primitive_topology)); + AddExecutionMode(main, GetExecutionMode(header.common3.output_topology)); + AddExecutionMode(main, spv::ExecutionMode::OutputVertices, + header.common4.max_output_vertices); + // TODO(Rodrigo): Where can we get this info from? + AddExecutionMode(main, spv::ExecutionMode::Invocations, 1U); + break; + case ShaderType::Fragment: + AddEntryPoint(spv::ExecutionModel::Fragment, main, "main", interfaces); + AddExecutionMode(main, spv::ExecutionMode::OriginUpperLeft); + if (header.ps.omap.depth) { + AddExecutionMode(main, spv::ExecutionMode::DepthReplacing); + } + break; + case ShaderType::Compute: + const auto workgroup_size = specialization.workgroup_size; + AddExecutionMode(main, spv::ExecutionMode::LocalSize, workgroup_size[0], + workgroup_size[1], workgroup_size[2]); + AddEntryPoint(spv::ExecutionModel::GLCompute, main, "main", interfaces); + break; + } } +private: + Id Decompile() { + DeclareCommon(); + DeclareVertex(); + DeclareTessControl(); + DeclareTessEval(); + DeclareGeometry(); + DeclareFragment(); + DeclareCompute(); + DeclareRegisters(); + DeclarePredicates(); + DeclareLocalMemory(); + DeclareSharedMemory(); + DeclareInternalFlags(); + DeclareInputAttributes(); + DeclareOutputAttributes(); + + u32 binding = specialization.base_binding; + binding = DeclareConstantBuffers(binding); + binding = DeclareGlobalBuffers(binding); + binding = DeclareTexelBuffers(binding); + binding = DeclareSamplers(binding); + binding = DeclareImages(binding); + + const Id main = OpFunction(t_void, {}, TypeFunction(t_void)); + AddLabel(); + + if (ir.IsDecompiled()) { + DeclareFlowVariables(); + DecompileAST(); + } else { + AllocateLabels(); + DecompileBranchMode(); + } + + OpReturn(); + OpFunctionEnd(); + + return main; + } + + void DefinePrologue() { + if (stage == ShaderType::Vertex) { + // Clear Position to avoid reading trash on the Z conversion. + const auto position_index = out_indices.position.value(); + const Id position = AccessElement(t_out_float4, out_vertex, position_index); + OpStore(position, v_varying_default); + + if (specialization.point_size) { + const u32 point_size_index = out_indices.point_size.value(); + const Id out_point_size = AccessElement(t_out_float, out_vertex, point_size_index); + OpStore(out_point_size, Constant(t_float, *specialization.point_size)); + } + } + } + + void DecompileAST(); + void DecompileBranchMode() { const u32 first_address = ir.GetBasicBlocks().begin()->first; const Id loop_label = OpLabel("loop"); @@ -110,14 +411,15 @@ public: std::vector<Sirit::Literal> literals; std::vector<Id> branch_labels; - for (const auto& pair : labels) { - const auto [literal, label] = pair; + for (const auto& [literal, label] : labels) { literals.push_back(literal); branch_labels.push_back(label); } - jmp_to = Emit(OpVariable(TypePointer(spv::StorageClass::Function, t_uint), - spv::StorageClass::Function, Constant(t_uint, first_address))); + jmp_to = OpVariable(TypePointer(spv::StorageClass::Function, t_uint), + spv::StorageClass::Function, Constant(t_uint, first_address)); + AddLocalVariable(jmp_to); + std::tie(ssy_flow_stack, ssy_flow_stack_top) = CreateFlowStack(); std::tie(pbk_flow_stack, pbk_flow_stack_top) = CreateFlowStack(); @@ -127,154 +429,121 @@ public: Name(pbk_flow_stack, "pbk_flow_stack"); Name(pbk_flow_stack_top, "pbk_flow_stack_top"); - Emit(OpBranch(loop_label)); - Emit(loop_label); - Emit(OpLoopMerge(merge_label, continue_label, spv::LoopControlMask::Unroll)); - Emit(OpBranch(dummy_label)); + DefinePrologue(); - Emit(dummy_label); + OpBranch(loop_label); + AddLabel(loop_label); + OpLoopMerge(merge_label, continue_label, spv::LoopControlMask::MaskNone); + OpBranch(dummy_label); + + AddLabel(dummy_label); const Id default_branch = OpLabel(); - const Id jmp_to_load = Emit(OpLoad(t_uint, jmp_to)); - Emit(OpSelectionMerge(jump_label, spv::SelectionControlMask::MaskNone)); - Emit(OpSwitch(jmp_to_load, default_branch, literals, branch_labels)); + const Id jmp_to_load = OpLoad(t_uint, jmp_to); + OpSelectionMerge(jump_label, spv::SelectionControlMask::MaskNone); + OpSwitch(jmp_to_load, default_branch, literals, branch_labels); - Emit(default_branch); - Emit(OpReturn()); + AddLabel(default_branch); + OpReturn(); - for (const auto& pair : ir.GetBasicBlocks()) { - const auto& [address, bb] = pair; - Emit(labels.at(address)); + for (const auto& [address, bb] : ir.GetBasicBlocks()) { + AddLabel(labels.at(address)); VisitBasicBlock(bb); const auto next_it = labels.lower_bound(address + 1); const Id next_label = next_it != labels.end() ? next_it->second : default_branch; - Emit(OpBranch(next_label)); + OpBranch(next_label); } - Emit(jump_label); - Emit(OpBranch(continue_label)); - Emit(continue_label); - Emit(OpBranch(loop_label)); - Emit(merge_label); + AddLabel(jump_label); + OpBranch(continue_label); + AddLabel(continue_label); + OpBranch(loop_label); + AddLabel(merge_label); } - void DecompileAST(); +private: + friend class ASTDecompiler; + friend class ExprDecompiler; - void Decompile() { - const bool is_fully_decompiled = ir.IsDecompiled(); - AllocateBindings(); - if (!is_fully_decompiled) { - AllocateLabels(); - } + static constexpr auto INTERNAL_FLAGS_COUNT = static_cast<std::size_t>(InternalFlag::Amount); - DeclareVertex(); - DeclareGeometry(); - DeclareFragment(); - DeclareRegisters(); - DeclarePredicates(); - if (is_fully_decompiled) { - DeclareFlowVariables(); + void AllocateLabels() { + for (const auto& pair : ir.GetBasicBlocks()) { + const u32 address = pair.first; + labels.emplace(address, OpLabel(fmt::format("label_0x{:x}", address))); } - DeclareLocalMemory(); - DeclareInternalFlags(); - DeclareInputAttributes(); - DeclareOutputAttributes(); - DeclareConstantBuffers(); - DeclareGlobalBuffers(); - DeclareSamplers(); + } - execute_function = - Emit(OpFunction(t_void, spv::FunctionControlMask::Inline, TypeFunction(t_void))); - Emit(OpLabel()); + void DeclareCommon() { + thread_id = + DeclareInputBuiltIn(spv::BuiltIn::SubgroupLocalInvocationId, t_in_uint, "thread_id"); + } - if (is_fully_decompiled) { - DecompileAST(); - } else { - DecompileBranchMode(); + void DeclareVertex() { + if (stage != ShaderType::Vertex) { + return; } + Id out_vertex_struct; + std::tie(out_vertex_struct, out_indices) = DeclareVertexStruct(); + const Id vertex_ptr = TypePointer(spv::StorageClass::Output, out_vertex_struct); + out_vertex = OpVariable(vertex_ptr, spv::StorageClass::Output); + interfaces.push_back(AddGlobalVariable(Name(out_vertex, "out_vertex"))); - Emit(OpReturn()); - Emit(OpFunctionEnd()); + // Declare input attributes + vertex_index = DeclareInputBuiltIn(spv::BuiltIn::VertexIndex, t_in_uint, "vertex_index"); + instance_index = + DeclareInputBuiltIn(spv::BuiltIn::InstanceIndex, t_in_uint, "instance_index"); } - ShaderEntries GetShaderEntries() const { - ShaderEntries entries; - entries.const_buffers_base_binding = const_buffers_base_binding; - entries.global_buffers_base_binding = global_buffers_base_binding; - entries.samplers_base_binding = samplers_base_binding; - for (const auto& cbuf : ir.GetConstantBuffers()) { - entries.const_buffers.emplace_back(cbuf.second, cbuf.first); - } - for (const auto& gmem_pair : ir.GetGlobalMemory()) { - const auto& [base, usage] = gmem_pair; - entries.global_buffers.emplace_back(base.cbuf_index, base.cbuf_offset); - } - for (const auto& sampler : ir.GetSamplers()) { - entries.samplers.emplace_back(sampler); - } - for (const auto& attribute : ir.GetInputAttributes()) { - if (IsGenericAttribute(attribute)) { - entries.attributes.insert(GetGenericAttributeLocation(attribute)); - } + void DeclareTessControl() { + if (stage != ShaderType::TesselationControl) { + return; } - entries.clip_distances = ir.GetClipDistances(); - entries.shader_length = ir.GetLength(); - entries.entry_function = execute_function; - entries.interfaces = interfaces; - return entries; - } - -private: - friend class ASTDecompiler; - friend class ExprDecompiler; + DeclareInputVertexArray(NumInputPatches); + DeclareOutputVertexArray(header.common2.threads_per_input_primitive); - static constexpr auto INTERNAL_FLAGS_COUNT = static_cast<std::size_t>(InternalFlag::Amount); + tess_level_outer = DeclareBuiltIn( + spv::BuiltIn::TessLevelOuter, spv::StorageClass::Output, + TypePointer(spv::StorageClass::Output, TypeArray(t_float, Constant(t_uint, 4U))), + "tess_level_outer"); + Decorate(tess_level_outer, spv::Decoration::Patch); - void AllocateBindings() { - const u32 binding_base = static_cast<u32>(stage) * STAGE_BINDING_STRIDE; - u32 binding_iterator = binding_base; + tess_level_inner = DeclareBuiltIn( + spv::BuiltIn::TessLevelInner, spv::StorageClass::Output, + TypePointer(spv::StorageClass::Output, TypeArray(t_float, Constant(t_uint, 2U))), + "tess_level_inner"); + Decorate(tess_level_inner, spv::Decoration::Patch); - const auto Allocate = [&binding_iterator](std::size_t count) { - const u32 current_binding = binding_iterator; - binding_iterator += static_cast<u32>(count); - return current_binding; - }; - const_buffers_base_binding = Allocate(ir.GetConstantBuffers().size()); - global_buffers_base_binding = Allocate(ir.GetGlobalMemory().size()); - samplers_base_binding = Allocate(ir.GetSamplers().size()); - - ASSERT_MSG(binding_iterator - binding_base < STAGE_BINDING_STRIDE, - "Stage binding stride is too small"); + invocation_id = DeclareInputBuiltIn(spv::BuiltIn::InvocationId, t_in_int, "invocation_id"); } - void AllocateLabels() { - for (const auto& pair : ir.GetBasicBlocks()) { - const u32 address = pair.first; - labels.emplace(address, OpLabel(fmt::format("label_0x{:x}", address))); - } - } - - void DeclareVertex() { - if (stage != ShaderStage::Vertex) + void DeclareTessEval() { + if (stage != ShaderType::TesselationEval) { return; + } + DeclareInputVertexArray(NumInputPatches); + DeclareOutputVertex(); - DeclareVertexRedeclarations(); + tess_coord = DeclareInputBuiltIn(spv::BuiltIn::TessCoord, t_in_float3, "tess_coord"); } void DeclareGeometry() { - if (stage != ShaderStage::Geometry) + if (stage != ShaderType::Geometry) { return; - - UNIMPLEMENTED(); + } + const u32 num_input = GetNumPrimitiveTopologyVertices(specialization.primitive_topology); + DeclareInputVertexArray(num_input); + DeclareOutputVertex(); } void DeclareFragment() { - if (stage != ShaderStage::Fragment) + if (stage != ShaderType::Fragment) { return; + } for (u32 rt = 0; rt < static_cast<u32>(frag_colors.size()); ++rt) { - if (!IsRenderTargetUsed(rt)) { + if (!specialization.enabled_rendertargets[rt]) { continue; } @@ -295,10 +564,19 @@ private: interfaces.push_back(frag_depth); } - frag_coord = DeclareBuiltIn(spv::BuiltIn::FragCoord, spv::StorageClass::Input, t_in_float4, - "frag_coord"); - front_facing = DeclareBuiltIn(spv::BuiltIn::FrontFacing, spv::StorageClass::Input, - t_in_bool, "front_facing"); + frag_coord = DeclareInputBuiltIn(spv::BuiltIn::FragCoord, t_in_float4, "frag_coord"); + front_facing = DeclareInputBuiltIn(spv::BuiltIn::FrontFacing, t_in_bool, "front_facing"); + point_coord = DeclareInputBuiltIn(spv::BuiltIn::PointCoord, t_in_float2, "point_coord"); + } + + void DeclareCompute() { + if (stage != ShaderType::Compute) { + return; + } + + workgroup_id = DeclareInputBuiltIn(spv::BuiltIn::WorkgroupId, t_in_uint3, "workgroup_id"); + local_invocation_id = + DeclareInputBuiltIn(spv::BuiltIn::LocalInvocationId, t_in_uint3, "local_invocation_id"); } void DeclareRegisters() { @@ -326,21 +604,44 @@ private: } void DeclareLocalMemory() { - if (const u64 local_memory_size = header.GetLocalMemorySize(); local_memory_size > 0) { - const auto element_count = static_cast<u32>(Common::AlignUp(local_memory_size, 4) / 4); - const Id type_array = TypeArray(t_float, Constant(t_uint, element_count)); - const Id type_pointer = TypePointer(spv::StorageClass::Private, type_array); - Name(type_pointer, "LocalMemory"); + // TODO(Rodrigo): Unstub kernel local memory size and pass it from a register at + // specialization time. + const u64 lmem_size = stage == ShaderType::Compute ? 0x400 : header.GetLocalMemorySize(); + if (lmem_size == 0) { + return; + } + const auto element_count = static_cast<u32>(Common::AlignUp(lmem_size, 4) / 4); + const Id type_array = TypeArray(t_float, Constant(t_uint, element_count)); + const Id type_pointer = TypePointer(spv::StorageClass::Private, type_array); + Name(type_pointer, "LocalMemory"); + + local_memory = + OpVariable(type_pointer, spv::StorageClass::Private, ConstantNull(type_array)); + AddGlobalVariable(Name(local_memory, "local_memory")); + } - local_memory = - OpVariable(type_pointer, spv::StorageClass::Private, ConstantNull(type_array)); - AddGlobalVariable(Name(local_memory, "local_memory")); + void DeclareSharedMemory() { + if (stage != ShaderType::Compute) { + return; + } + t_smem_uint = TypePointer(spv::StorageClass::Workgroup, t_uint); + + const u32 smem_size = specialization.shared_memory_size; + if (smem_size == 0) { + // Avoid declaring an empty array. + return; } + const auto element_count = static_cast<u32>(Common::AlignUp(smem_size, 4) / 4); + const Id type_array = TypeArray(t_uint, Constant(t_uint, element_count)); + const Id type_pointer = TypePointer(spv::StorageClass::Workgroup, type_array); + Name(type_pointer, "SharedMemory"); + + shared_memory = OpVariable(type_pointer, spv::StorageClass::Workgroup); + AddGlobalVariable(Name(shared_memory, "shared_memory")); } void DeclareInternalFlags() { - constexpr std::array<const char*, INTERNAL_FLAGS_COUNT> names = {"zero", "sign", "carry", - "overflow"}; + constexpr std::array names = {"zero", "sign", "carry", "overflow"}; for (std::size_t flag = 0; flag < INTERNAL_FLAGS_COUNT; ++flag) { const auto flag_code = static_cast<InternalFlag>(flag); const Id id = OpVariable(t_prv_bool, spv::StorageClass::Private, v_false); @@ -348,23 +649,59 @@ private: } } + void DeclareInputVertexArray(u32 length) { + constexpr auto storage = spv::StorageClass::Input; + std::tie(in_indices, in_vertex) = DeclareVertexArray(storage, "in_indices", length); + } + + void DeclareOutputVertexArray(u32 length) { + constexpr auto storage = spv::StorageClass::Output; + std::tie(out_indices, out_vertex) = DeclareVertexArray(storage, "out_indices", length); + } + + std::tuple<VertexIndices, Id> DeclareVertexArray(spv::StorageClass storage_class, + std::string name, u32 length) { + const auto [struct_id, indices] = DeclareVertexStruct(); + const Id vertex_array = TypeArray(struct_id, Constant(t_uint, length)); + const Id vertex_ptr = TypePointer(storage_class, vertex_array); + const Id vertex = OpVariable(vertex_ptr, storage_class); + AddGlobalVariable(Name(vertex, std::move(name))); + interfaces.push_back(vertex); + return {indices, vertex}; + } + + void DeclareOutputVertex() { + Id out_vertex_struct; + std::tie(out_vertex_struct, out_indices) = DeclareVertexStruct(); + const Id out_vertex_ptr = TypePointer(spv::StorageClass::Output, out_vertex_struct); + out_vertex = OpVariable(out_vertex_ptr, spv::StorageClass::Output); + interfaces.push_back(AddGlobalVariable(Name(out_vertex, "out_vertex"))); + } + void DeclareInputAttributes() { for (const auto index : ir.GetInputAttributes()) { if (!IsGenericAttribute(index)) { continue; } - UNIMPLEMENTED_IF(stage == ShaderStage::Geometry); - const u32 location = GetGenericAttributeLocation(index); - const Id id = OpVariable(t_in_float4, spv::StorageClass::Input); - Name(AddGlobalVariable(id), fmt::format("in_attr{}", location)); + const auto type_descriptor = GetAttributeType(location); + Id type; + if (IsInputAttributeArray()) { + type = GetTypeVectorDefinitionLut(type_descriptor.type).at(3); + type = TypeArray(type, Constant(t_uint, GetNumInputVertices())); + type = TypePointer(spv::StorageClass::Input, type); + } else { + type = type_descriptor.vector; + } + const Id id = OpVariable(type, spv::StorageClass::Input); + AddGlobalVariable(Name(id, fmt::format("in_attr{}", location))); input_attributes.emplace(index, id); interfaces.push_back(id); Decorate(id, spv::Decoration::Location, location); - if (stage != ShaderStage::Fragment) { + if (stage != ShaderType::Fragment) { continue; } switch (header.ps.GetAttributeUse(location)) { @@ -388,8 +725,21 @@ private: if (!IsGenericAttribute(index)) { continue; } - const auto location = GetGenericAttributeLocation(index); - const Id id = OpVariable(t_out_float4, spv::StorageClass::Output); + const u32 location = GetGenericAttributeLocation(index); + Id type = t_float4; + Id varying_default = v_varying_default; + if (IsOutputAttributeArray()) { + const u32 num = GetNumOutputVertices(); + type = TypeArray(type, Constant(t_uint, num)); + if (device.GetDriverID() != vk::DriverIdKHR::eIntelProprietaryWindows) { + // Intel's proprietary driver fails to setup defaults for arrayed output + // attributes. + varying_default = ConstantComposite(type, std::vector(num, varying_default)); + } + } + type = TypePointer(spv::StorageClass::Output, type); + + const Id id = OpVariable(type, spv::StorageClass::Output, varying_default); Name(AddGlobalVariable(id), fmt::format("out_attr{}", location)); output_attributes.emplace(index, id); interfaces.push_back(id); @@ -398,10 +748,8 @@ private: } } - void DeclareConstantBuffers() { - u32 binding = const_buffers_base_binding; - for (const auto& entry : ir.GetConstantBuffers()) { - const auto [index, size] = entry; + u32 DeclareConstantBuffers(u32 binding) { + for (const auto& [index, size] : ir.GetConstantBuffers()) { const Id type = device.IsKhrUniformBufferStandardLayoutSupported() ? t_cbuf_scalar_ubo : t_cbuf_std140_ubo; const Id id = OpVariable(type, spv::StorageClass::Uniform); @@ -411,12 +759,11 @@ private: Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET); constant_buffers.emplace(index, id); } + return binding; } - void DeclareGlobalBuffers() { - u32 binding = global_buffers_base_binding; - for (const auto& entry : ir.GetGlobalMemory()) { - const auto [base, usage] = entry; + u32 DeclareGlobalBuffers(u32 binding) { + for (const auto& [base, usage] : ir.GetGlobalMemory()) { const Id id = OpVariable(t_gmem_ssbo, spv::StorageClass::StorageBuffer); AddGlobalVariable( Name(id, fmt::format("gmem_{}_{}", base.cbuf_index, base.cbuf_offset))); @@ -425,108 +772,213 @@ private: Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET); global_buffers.emplace(base, id); } + return binding; } - void DeclareSamplers() { - u32 binding = samplers_base_binding; + u32 DeclareTexelBuffers(u32 binding) { for (const auto& sampler : ir.GetSamplers()) { + if (!sampler.IsBuffer()) { + continue; + } + ASSERT(!sampler.IsArray()); + ASSERT(!sampler.IsShadow()); + + constexpr auto dim = spv::Dim::Buffer; + constexpr int depth = 0; + constexpr int arrayed = 0; + constexpr bool ms = false; + constexpr int sampled = 1; + constexpr auto format = spv::ImageFormat::Unknown; + const Id image_type = TypeImage(t_float, dim, depth, arrayed, ms, sampled, format); + const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type); + const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant); + AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.GetIndex()))); + Decorate(id, spv::Decoration::Binding, binding++); + Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET); + + texel_buffers.emplace(sampler.GetIndex(), TexelBuffer{image_type, id}); + } + return binding; + } + + u32 DeclareSamplers(u32 binding) { + for (const auto& sampler : ir.GetSamplers()) { + if (sampler.IsBuffer()) { + continue; + } const auto dim = GetSamplerDim(sampler); const int depth = sampler.IsShadow() ? 1 : 0; const int arrayed = sampler.IsArray() ? 1 : 0; - // TODO(Rodrigo): Sampled 1 indicates that the image will be used with a sampler. When - // SULD and SUST instructions are implemented, replace this value. - const int sampled = 1; - const Id image_type = - TypeImage(t_float, dim, depth, arrayed, false, sampled, spv::ImageFormat::Unknown); + constexpr bool ms = false; + constexpr int sampled = 1; + constexpr auto format = spv::ImageFormat::Unknown; + const Id image_type = TypeImage(t_float, dim, depth, arrayed, ms, sampled, format); const Id sampled_image_type = TypeSampledImage(image_type); const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, sampled_image_type); const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant); AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.GetIndex()))); + Decorate(id, spv::Decoration::Binding, binding++); + Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET); - sampler_images.insert( - {static_cast<u32>(sampler.GetIndex()), {image_type, sampled_image_type, id}}); + sampled_images.emplace(sampler.GetIndex(), + SampledImage{image_type, sampled_image_type, id}); + } + return binding; + } + + u32 DeclareImages(u32 binding) { + for (const auto& image : ir.GetImages()) { + const auto [dim, arrayed] = GetImageDim(image); + constexpr int depth = 0; + constexpr bool ms = false; + constexpr int sampled = 2; // This won't be accessed with a sampler + constexpr auto format = spv::ImageFormat::Unknown; + const Id image_type = TypeImage(t_uint, dim, depth, arrayed, ms, sampled, format, {}); + const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type); + const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant); + AddGlobalVariable(Name(id, fmt::format("image_{}", image.GetIndex()))); Decorate(id, spv::Decoration::Binding, binding++); Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET); + if (image.IsRead() && !image.IsWritten()) { + Decorate(id, spv::Decoration::NonWritable); + } else if (image.IsWritten() && !image.IsRead()) { + Decorate(id, spv::Decoration::NonReadable); + } + + images.emplace(static_cast<u32>(image.GetIndex()), StorageImage{image_type, id}); } + return binding; } - void DeclareVertexRedeclarations() { - vertex_index = DeclareBuiltIn(spv::BuiltIn::VertexIndex, spv::StorageClass::Input, - t_in_uint, "vertex_index"); - instance_index = DeclareBuiltIn(spv::BuiltIn::InstanceIndex, spv::StorageClass::Input, - t_in_uint, "instance_index"); + bool IsInputAttributeArray() const { + return stage == ShaderType::TesselationControl || stage == ShaderType::TesselationEval || + stage == ShaderType::Geometry; + } - bool is_clip_distances_declared = false; - for (const auto index : ir.GetOutputAttributes()) { - if (index == Attribute::Index::ClipDistances0123 || - index == Attribute::Index::ClipDistances4567) { - is_clip_distances_declared = true; - } + bool IsOutputAttributeArray() const { + return stage == ShaderType::TesselationControl; + } + + u32 GetNumInputVertices() const { + switch (stage) { + case ShaderType::Geometry: + return GetNumPrimitiveTopologyVertices(specialization.primitive_topology); + case ShaderType::TesselationControl: + case ShaderType::TesselationEval: + return NumInputPatches; + default: + UNREACHABLE(); + return 1; } + } - std::vector<Id> members; - members.push_back(t_float4); - if (ir.UsesPointSize()) { - members.push_back(t_float); - } - if (is_clip_distances_declared) { - members.push_back(TypeArray(t_float, Constant(t_uint, 8))); - } - - const Id gl_per_vertex_struct = Name(TypeStruct(members), "PerVertex"); - Decorate(gl_per_vertex_struct, spv::Decoration::Block); - - u32 declaration_index = 0; - const auto MemberDecorateBuiltIn = [&](spv::BuiltIn builtin, std::string name, - bool condition) { - if (!condition) - return u32{}; - MemberName(gl_per_vertex_struct, declaration_index, name); - MemberDecorate(gl_per_vertex_struct, declaration_index, spv::Decoration::BuiltIn, - static_cast<u32>(builtin)); - return declaration_index++; + u32 GetNumOutputVertices() const { + switch (stage) { + case ShaderType::TesselationControl: + return header.common2.threads_per_input_primitive; + default: + UNREACHABLE(); + return 1; + } + } + + std::tuple<Id, VertexIndices> DeclareVertexStruct() { + struct BuiltIn { + Id type; + spv::BuiltIn builtin; + const char* name; + }; + std::vector<BuiltIn> members; + members.reserve(4); + + const auto AddBuiltIn = [&](Id type, spv::BuiltIn builtin, const char* name) { + const auto index = static_cast<u32>(members.size()); + members.push_back(BuiltIn{type, builtin, name}); + return index; }; - position_index = MemberDecorateBuiltIn(spv::BuiltIn::Position, "position", true); - point_size_index = - MemberDecorateBuiltIn(spv::BuiltIn::PointSize, "point_size", ir.UsesPointSize()); - clip_distances_index = MemberDecorateBuiltIn(spv::BuiltIn::ClipDistance, "clip_distances", - is_clip_distances_declared); + VertexIndices indices; + indices.position = AddBuiltIn(t_float4, spv::BuiltIn::Position, "position"); - const Id type_pointer = TypePointer(spv::StorageClass::Output, gl_per_vertex_struct); - per_vertex = OpVariable(type_pointer, spv::StorageClass::Output); - AddGlobalVariable(Name(per_vertex, "per_vertex")); - interfaces.push_back(per_vertex); + if (ir.UsesViewportIndex()) { + if (stage != ShaderType::Vertex || device.IsExtShaderViewportIndexLayerSupported()) { + indices.viewport = AddBuiltIn(t_int, spv::BuiltIn::ViewportIndex, "viewport_index"); + } else { + LOG_ERROR(Render_Vulkan, + "Shader requires ViewportIndex but it's not supported on this " + "stage with this device."); + } + } + + if (ir.UsesPointSize() || specialization.point_size) { + indices.point_size = AddBuiltIn(t_float, spv::BuiltIn::PointSize, "point_size"); + } + + const auto& output_attributes = ir.GetOutputAttributes(); + const bool declare_clip_distances = + std::any_of(output_attributes.begin(), output_attributes.end(), [](const auto& index) { + return index == Attribute::Index::ClipDistances0123 || + index == Attribute::Index::ClipDistances4567; + }); + if (declare_clip_distances) { + indices.clip_distances = AddBuiltIn(TypeArray(t_float, Constant(t_uint, 8)), + spv::BuiltIn::ClipDistance, "clip_distances"); + } + + std::vector<Id> member_types; + member_types.reserve(members.size()); + for (std::size_t i = 0; i < members.size(); ++i) { + member_types.push_back(members[i].type); + } + const Id per_vertex_struct = Name(TypeStruct(member_types), "PerVertex"); + Decorate(per_vertex_struct, spv::Decoration::Block); + + for (std::size_t index = 0; index < members.size(); ++index) { + const auto& member = members[index]; + MemberName(per_vertex_struct, static_cast<u32>(index), member.name); + MemberDecorate(per_vertex_struct, static_cast<u32>(index), spv::Decoration::BuiltIn, + static_cast<u32>(member.builtin)); + } + + return {per_vertex_struct, indices}; } void VisitBasicBlock(const NodeBlock& bb) { for (const auto& node : bb) { - static_cast<void>(Visit(node)); + [[maybe_unused]] const Type type = Visit(node).type; + ASSERT(type == Type::Void); } } - Id Visit(const Node& node) { + Expression Visit(const Node& node) { if (const auto operation = std::get_if<OperationNode>(&*node)) { + if (const auto amend_index = operation->GetAmendIndex()) { + [[maybe_unused]] const Type type = Visit(ir.GetAmendNode(*amend_index)).type; + ASSERT(type == Type::Void); + } const auto operation_index = static_cast<std::size_t>(operation->GetCode()); const auto decompiler = operation_decompilers[operation_index]; if (decompiler == nullptr) { UNREACHABLE_MSG("Operation decompiler {} not defined", operation_index); } return (this->*decompiler)(*operation); + } - } else if (const auto gpr = std::get_if<GprNode>(&*node)) { + if (const auto gpr = std::get_if<GprNode>(&*node)) { const u32 index = gpr->GetIndex(); if (index == Register::ZeroIndex) { - return Constant(t_float, 0.0f); + return {v_float_zero, Type::Float}; } - return Emit(OpLoad(t_float, registers.at(index))); + return {OpLoad(t_float, registers.at(index)), Type::Float}; + } - } else if (const auto immediate = std::get_if<ImmediateNode>(&*node)) { - return BitcastTo<Type::Float>(Constant(t_uint, immediate->GetValue())); + if (const auto immediate = std::get_if<ImmediateNode>(&*node)) { + return {Constant(t_uint, immediate->GetValue()), Type::Uint}; + } - } else if (const auto predicate = std::get_if<PredicateNode>(&*node)) { + if (const auto predicate = std::get_if<PredicateNode>(&*node)) { const auto value = [&]() -> Id { switch (const auto index = predicate->GetIndex(); index) { case Tegra::Shader::Pred::UnusedIndex: @@ -534,74 +986,108 @@ private: case Tegra::Shader::Pred::NeverExecute: return v_false; default: - return Emit(OpLoad(t_bool, predicates.at(index))); + return OpLoad(t_bool, predicates.at(index)); } }(); if (predicate->IsNegated()) { - return Emit(OpLogicalNot(t_bool, value)); + return {OpLogicalNot(t_bool, value), Type::Bool}; } - return value; + return {value, Type::Bool}; + } - } else if (const auto abuf = std::get_if<AbufNode>(&*node)) { + if (const auto abuf = std::get_if<AbufNode>(&*node)) { const auto attribute = abuf->GetIndex(); - const auto element = abuf->GetElement(); + const u32 element = abuf->GetElement(); + const auto& buffer = abuf->GetBuffer(); + + const auto ArrayPass = [&](Id pointer_type, Id composite, std::vector<u32> indices) { + std::vector<Id> members; + members.reserve(std::size(indices) + 1); + + if (buffer && IsInputAttributeArray()) { + members.push_back(AsUint(Visit(buffer))); + } + for (const u32 index : indices) { + members.push_back(Constant(t_uint, index)); + } + return OpAccessChain(pointer_type, composite, members); + }; switch (attribute) { - case Attribute::Index::Position: - if (stage != ShaderStage::Fragment) { - UNIMPLEMENTED(); - break; - } else { + case Attribute::Index::Position: { + if (stage == ShaderType::Fragment) { if (element == 3) { - return Constant(t_float, 1.0f); + return {Constant(t_float, 1.0f), Type::Float}; } - return Emit(OpLoad(t_float, AccessElement(t_in_float, frag_coord, element))); + return {OpLoad(t_float, AccessElement(t_in_float, frag_coord, element)), + Type::Float}; } + const std::vector elements = {in_indices.position.value(), element}; + return {OpLoad(t_float, ArrayPass(t_in_float, in_vertex, elements)), Type::Float}; + } + case Attribute::Index::PointCoord: { + switch (element) { + case 0: + case 1: + return {OpCompositeExtract(t_float, OpLoad(t_float2, point_coord), element), + Type::Float}; + } + UNIMPLEMENTED_MSG("Unimplemented point coord element={}", element); + return {v_float_zero, Type::Float}; + } case Attribute::Index::TessCoordInstanceIDVertexID: // TODO(Subv): Find out what the values are for the first two elements when inside a // vertex shader, and what's the value of the fourth element when inside a Tess Eval // shader. - ASSERT(stage == ShaderStage::Vertex); switch (element) { + case 0: + case 1: + return {OpLoad(t_float, AccessElement(t_in_float, tess_coord, element)), + Type::Float}; case 2: - return BitcastFrom<Type::Uint>(Emit(OpLoad(t_uint, instance_index))); + return {OpLoad(t_uint, instance_index), Type::Uint}; case 3: - return BitcastFrom<Type::Uint>(Emit(OpLoad(t_uint, vertex_index))); + return {OpLoad(t_uint, vertex_index), Type::Uint}; } UNIMPLEMENTED_MSG("Unmanaged TessCoordInstanceIDVertexID element={}", element); - return Constant(t_float, 0); + return {Constant(t_uint, 0U), Type::Uint}; case Attribute::Index::FrontFacing: // TODO(Subv): Find out what the values are for the other elements. - ASSERT(stage == ShaderStage::Fragment); + ASSERT(stage == ShaderType::Fragment); if (element == 3) { - const Id is_front_facing = Emit(OpLoad(t_bool, front_facing)); - const Id true_value = - BitcastTo<Type::Float>(Constant(t_int, static_cast<s32>(-1))); - const Id false_value = BitcastTo<Type::Float>(Constant(t_int, 0)); - return Emit(OpSelect(t_float, is_front_facing, true_value, false_value)); + const Id is_front_facing = OpLoad(t_bool, front_facing); + const Id true_value = Constant(t_int, static_cast<s32>(-1)); + const Id false_value = Constant(t_int, 0); + return {OpSelect(t_int, is_front_facing, true_value, false_value), Type::Int}; } UNIMPLEMENTED_MSG("Unmanaged FrontFacing element={}", element); - return Constant(t_float, 0.0f); + return {v_float_zero, Type::Float}; default: if (IsGenericAttribute(attribute)) { - const Id pointer = - AccessElement(t_in_float, input_attributes.at(attribute), element); - return Emit(OpLoad(t_float, pointer)); + const u32 location = GetGenericAttributeLocation(attribute); + const auto type_descriptor = GetAttributeType(location); + const Type type = type_descriptor.type; + const Id attribute_id = input_attributes.at(attribute); + const std::vector elements = {element}; + const Id pointer = ArrayPass(type_descriptor.scalar, attribute_id, elements); + return {OpLoad(GetTypeDefinition(type), pointer), type}; } break; } UNIMPLEMENTED_MSG("Unhandled input attribute: {}", static_cast<u32>(attribute)); + return {v_float_zero, Type::Float}; + } - } else if (const auto cbuf = std::get_if<CbufNode>(&*node)) { + if (const auto cbuf = std::get_if<CbufNode>(&*node)) { const Node& offset = cbuf->GetOffset(); const Id buffer_id = constant_buffers.at(cbuf->GetIndex()); Id pointer{}; if (device.IsKhrUniformBufferStandardLayoutSupported()) { - const Id buffer_offset = Emit(OpShiftRightLogical( - t_uint, BitcastTo<Type::Uint>(Visit(offset)), Constant(t_uint, 2u))); - pointer = Emit( - OpAccessChain(t_cbuf_float, buffer_id, Constant(t_uint, 0u), buffer_offset)); + const Id buffer_offset = + OpShiftRightLogical(t_uint, AsUint(Visit(offset)), Constant(t_uint, 2U)); + pointer = + OpAccessChain(t_cbuf_float, buffer_id, Constant(t_uint, 0U), buffer_offset); } else { Id buffer_index{}; Id buffer_element{}; @@ -613,53 +1099,80 @@ private: buffer_element = Constant(t_uint, (offset_imm / 4) % 4); } else if (std::holds_alternative<OperationNode>(*offset)) { // Indirect access - const Id offset_id = BitcastTo<Type::Uint>(Visit(offset)); - const Id unsafe_offset = Emit(OpUDiv(t_uint, offset_id, Constant(t_uint, 4))); - const Id final_offset = Emit(OpUMod( - t_uint, unsafe_offset, Constant(t_uint, MAX_CONSTBUFFER_ELEMENTS - 1))); - buffer_index = Emit(OpUDiv(t_uint, final_offset, Constant(t_uint, 4))); - buffer_element = Emit(OpUMod(t_uint, final_offset, Constant(t_uint, 4))); + const Id offset_id = AsUint(Visit(offset)); + const Id unsafe_offset = OpUDiv(t_uint, offset_id, Constant(t_uint, 4)); + const Id final_offset = + OpUMod(t_uint, unsafe_offset, Constant(t_uint, MaxConstBufferElements - 1)); + buffer_index = OpUDiv(t_uint, final_offset, Constant(t_uint, 4)); + buffer_element = OpUMod(t_uint, final_offset, Constant(t_uint, 4)); } else { UNREACHABLE_MSG("Unmanaged offset node type"); } - pointer = Emit(OpAccessChain(t_cbuf_float, buffer_id, Constant(t_uint, 0), - buffer_index, buffer_element)); + pointer = OpAccessChain(t_cbuf_float, buffer_id, Constant(t_uint, 0), buffer_index, + buffer_element); } - return Emit(OpLoad(t_float, pointer)); + return {OpLoad(t_float, pointer), Type::Float}; + } - } else if (const auto gmem = std::get_if<GmemNode>(&*node)) { + if (const auto gmem = std::get_if<GmemNode>(&*node)) { const Id gmem_buffer = global_buffers.at(gmem->GetDescriptor()); - const Id real = BitcastTo<Type::Uint>(Visit(gmem->GetRealAddress())); - const Id base = BitcastTo<Type::Uint>(Visit(gmem->GetBaseAddress())); + const Id real = AsUint(Visit(gmem->GetRealAddress())); + const Id base = AsUint(Visit(gmem->GetBaseAddress())); + + Id offset = OpISub(t_uint, real, base); + offset = OpUDiv(t_uint, offset, Constant(t_uint, 4U)); + return {OpLoad(t_float, + OpAccessChain(t_gmem_float, gmem_buffer, Constant(t_uint, 0U), offset)), + Type::Float}; + } + + if (const auto lmem = std::get_if<LmemNode>(&*node)) { + Id address = AsUint(Visit(lmem->GetAddress())); + address = OpShiftRightLogical(t_uint, address, Constant(t_uint, 2U)); + const Id pointer = OpAccessChain(t_prv_float, local_memory, address); + return {OpLoad(t_float, pointer), Type::Float}; + } + + if (const auto smem = std::get_if<SmemNode>(&*node)) { + Id address = AsUint(Visit(smem->GetAddress())); + address = OpShiftRightLogical(t_uint, address, Constant(t_uint, 2U)); + const Id pointer = OpAccessChain(t_smem_uint, shared_memory, address); + return {OpLoad(t_uint, pointer), Type::Uint}; + } - Id offset = Emit(OpISub(t_uint, real, base)); - offset = Emit(OpUDiv(t_uint, offset, Constant(t_uint, 4u))); - return Emit(OpLoad(t_float, Emit(OpAccessChain(t_gmem_float, gmem_buffer, - Constant(t_uint, 0u), offset)))); + if (const auto internal_flag = std::get_if<InternalFlagNode>(&*node)) { + const Id flag = internal_flags.at(static_cast<std::size_t>(internal_flag->GetFlag())); + return {OpLoad(t_bool, flag), Type::Bool}; + } - } else if (const auto conditional = std::get_if<ConditionalNode>(&*node)) { + if (const auto conditional = std::get_if<ConditionalNode>(&*node)) { + if (const auto amend_index = conditional->GetAmendIndex()) { + [[maybe_unused]] const Type type = Visit(ir.GetAmendNode(*amend_index)).type; + ASSERT(type == Type::Void); + } // It's invalid to call conditional on nested nodes, use an operation instead const Id true_label = OpLabel(); const Id skip_label = OpLabel(); - const Id condition = Visit(conditional->GetCondition()); - Emit(OpSelectionMerge(skip_label, spv::SelectionControlMask::MaskNone)); - Emit(OpBranchConditional(condition, true_label, skip_label)); - Emit(true_label); + const Id condition = AsBool(Visit(conditional->GetCondition())); + OpSelectionMerge(skip_label, spv::SelectionControlMask::MaskNone); + OpBranchConditional(condition, true_label, skip_label); + AddLabel(true_label); - ++conditional_nest_count; + conditional_branch_set = true; + inside_branch = false; VisitBasicBlock(conditional->GetCode()); - --conditional_nest_count; - - if (inside_branch == 0) { - Emit(OpBranch(skip_label)); + conditional_branch_set = false; + if (!inside_branch) { + OpBranch(skip_label); } else { - inside_branch--; + inside_branch = false; } - Emit(skip_label); + AddLabel(skip_label); return {}; + } - } else if (const auto comment = std::get_if<CommentNode>(&*node)) { - Name(Emit(OpUndef(t_void)), comment->GetText()); + if (const auto comment = std::get_if<CommentNode>(&*node)) { + Name(OpUndef(t_void), comment->GetText()); return {}; } @@ -668,94 +1181,126 @@ private: } template <Id (Module::*func)(Id, Id), Type result_type, Type type_a = result_type> - Id Unary(Operation operation) { + Expression Unary(Operation operation) { const Id type_def = GetTypeDefinition(result_type); - const Id op_a = VisitOperand<type_a>(operation, 0); + const Id op_a = As(Visit(operation[0]), type_a); - const Id value = BitcastFrom<result_type>(Emit((this->*func)(type_def, op_a))); + const Id value = (this->*func)(type_def, op_a); if (IsPrecise(operation)) { Decorate(value, spv::Decoration::NoContraction); } - return value; + return {value, result_type}; } template <Id (Module::*func)(Id, Id, Id), Type result_type, Type type_a = result_type, Type type_b = type_a> - Id Binary(Operation operation) { + Expression Binary(Operation operation) { const Id type_def = GetTypeDefinition(result_type); - const Id op_a = VisitOperand<type_a>(operation, 0); - const Id op_b = VisitOperand<type_b>(operation, 1); + const Id op_a = As(Visit(operation[0]), type_a); + const Id op_b = As(Visit(operation[1]), type_b); - const Id value = BitcastFrom<result_type>(Emit((this->*func)(type_def, op_a, op_b))); + const Id value = (this->*func)(type_def, op_a, op_b); if (IsPrecise(operation)) { Decorate(value, spv::Decoration::NoContraction); } - return value; + return {value, result_type}; } template <Id (Module::*func)(Id, Id, Id, Id), Type result_type, Type type_a = result_type, Type type_b = type_a, Type type_c = type_b> - Id Ternary(Operation operation) { + Expression Ternary(Operation operation) { const Id type_def = GetTypeDefinition(result_type); - const Id op_a = VisitOperand<type_a>(operation, 0); - const Id op_b = VisitOperand<type_b>(operation, 1); - const Id op_c = VisitOperand<type_c>(operation, 2); + const Id op_a = As(Visit(operation[0]), type_a); + const Id op_b = As(Visit(operation[1]), type_b); + const Id op_c = As(Visit(operation[2]), type_c); - const Id value = BitcastFrom<result_type>(Emit((this->*func)(type_def, op_a, op_b, op_c))); + const Id value = (this->*func)(type_def, op_a, op_b, op_c); if (IsPrecise(operation)) { Decorate(value, spv::Decoration::NoContraction); } - return value; + return {value, result_type}; } template <Id (Module::*func)(Id, Id, Id, Id, Id), Type result_type, Type type_a = result_type, Type type_b = type_a, Type type_c = type_b, Type type_d = type_c> - Id Quaternary(Operation operation) { + Expression Quaternary(Operation operation) { const Id type_def = GetTypeDefinition(result_type); - const Id op_a = VisitOperand<type_a>(operation, 0); - const Id op_b = VisitOperand<type_b>(operation, 1); - const Id op_c = VisitOperand<type_c>(operation, 2); - const Id op_d = VisitOperand<type_d>(operation, 3); + const Id op_a = As(Visit(operation[0]), type_a); + const Id op_b = As(Visit(operation[1]), type_b); + const Id op_c = As(Visit(operation[2]), type_c); + const Id op_d = As(Visit(operation[3]), type_d); - const Id value = - BitcastFrom<result_type>(Emit((this->*func)(type_def, op_a, op_b, op_c, op_d))); + const Id value = (this->*func)(type_def, op_a, op_b, op_c, op_d); if (IsPrecise(operation)) { Decorate(value, spv::Decoration::NoContraction); } - return value; + return {value, result_type}; } - Id Assign(Operation operation) { + Expression Assign(Operation operation) { const Node& dest = operation[0]; const Node& src = operation[1]; - Id target{}; + Expression target{}; if (const auto gpr = std::get_if<GprNode>(&*dest)) { if (gpr->GetIndex() == Register::ZeroIndex) { // Writing to Register::ZeroIndex is a no op return {}; } - target = registers.at(gpr->GetIndex()); + target = {registers.at(gpr->GetIndex()), Type::Float}; } else if (const auto abuf = std::get_if<AbufNode>(&*dest)) { - target = [&]() -> Id { + const auto& buffer = abuf->GetBuffer(); + const auto ArrayPass = [&](Id pointer_type, Id composite, std::vector<u32> indices) { + std::vector<Id> members; + members.reserve(std::size(indices) + 1); + + if (buffer && IsOutputAttributeArray()) { + members.push_back(AsUint(Visit(buffer))); + } + for (const u32 index : indices) { + members.push_back(Constant(t_uint, index)); + } + return OpAccessChain(pointer_type, composite, members); + }; + + target = [&]() -> Expression { + const u32 element = abuf->GetElement(); switch (const auto attribute = abuf->GetIndex(); attribute) { - case Attribute::Index::Position: - return AccessElement(t_out_float, per_vertex, position_index, - abuf->GetElement()); + case Attribute::Index::Position: { + const u32 index = out_indices.position.value(); + return {ArrayPass(t_out_float, out_vertex, {index, element}), Type::Float}; + } case Attribute::Index::LayerViewportPointSize: - UNIMPLEMENTED_IF(abuf->GetElement() != 3); - return AccessElement(t_out_float, per_vertex, point_size_index); - case Attribute::Index::ClipDistances0123: - return AccessElement(t_out_float, per_vertex, clip_distances_index, - abuf->GetElement()); - case Attribute::Index::ClipDistances4567: - return AccessElement(t_out_float, per_vertex, clip_distances_index, - abuf->GetElement() + 4); + switch (element) { + case 2: { + if (!out_indices.viewport) { + return {}; + } + const u32 index = out_indices.viewport.value(); + return {AccessElement(t_out_int, out_vertex, index), Type::Int}; + } + case 3: { + const auto index = out_indices.point_size.value(); + return {AccessElement(t_out_float, out_vertex, index), Type::Float}; + } + default: + UNIMPLEMENTED_MSG("LayerViewportPoint element={}", abuf->GetElement()); + return {}; + } + case Attribute::Index::ClipDistances0123: { + const u32 index = out_indices.clip_distances.value(); + return {AccessElement(t_out_float, out_vertex, index, element), Type::Float}; + } + case Attribute::Index::ClipDistances4567: { + const u32 index = out_indices.clip_distances.value(); + return {AccessElement(t_out_float, out_vertex, index, element + 4), + Type::Float}; + } default: if (IsGenericAttribute(attribute)) { - return AccessElement(t_out_float, output_attributes.at(attribute), - abuf->GetElement()); + const Id composite = output_attributes.at(attribute); + return {ArrayPass(t_out_float, composite, {element}), Type::Float}; } UNIMPLEMENTED_MSG("Unhandled output attribute: {}", static_cast<u32>(attribute)); @@ -763,67 +1308,154 @@ private: } }(); + } else if (const auto patch = std::get_if<PatchNode>(&*dest)) { + target = [&]() -> Expression { + const u32 offset = patch->GetOffset(); + switch (offset) { + case 0: + case 1: + case 2: + case 3: + return {AccessElement(t_out_float, tess_level_outer, offset % 4), Type::Float}; + case 4: + case 5: + return {AccessElement(t_out_float, tess_level_inner, offset % 4), Type::Float}; + } + UNIMPLEMENTED_MSG("Unhandled patch output offset: {}", offset); + return {}; + }(); + } else if (const auto lmem = std::get_if<LmemNode>(&*dest)) { - Id address = BitcastTo<Type::Uint>(Visit(lmem->GetAddress())); - address = Emit(OpUDiv(t_uint, address, Constant(t_uint, 4))); - target = Emit(OpAccessChain(t_prv_float, local_memory, {address})); + Id address = AsUint(Visit(lmem->GetAddress())); + address = OpUDiv(t_uint, address, Constant(t_uint, 4)); + target = {OpAccessChain(t_prv_float, local_memory, address), Type::Float}; + + } else if (const auto smem = std::get_if<SmemNode>(&*dest)) { + ASSERT(stage == ShaderType::Compute); + Id address = AsUint(Visit(smem->GetAddress())); + address = OpShiftRightLogical(t_uint, address, Constant(t_uint, 2U)); + target = {OpAccessChain(t_smem_uint, shared_memory, address), Type::Uint}; + + } else if (const auto gmem = std::get_if<GmemNode>(&*dest)) { + const Id real = AsUint(Visit(gmem->GetRealAddress())); + const Id base = AsUint(Visit(gmem->GetBaseAddress())); + const Id diff = OpISub(t_uint, real, base); + const Id offset = OpShiftRightLogical(t_uint, diff, Constant(t_uint, 2)); + + const Id gmem_buffer = global_buffers.at(gmem->GetDescriptor()); + target = {OpAccessChain(t_gmem_float, gmem_buffer, Constant(t_uint, 0), offset), + Type::Float}; + + } else { + UNIMPLEMENTED(); } - Emit(OpStore(target, Visit(src))); + OpStore(target.id, As(Visit(src), target.type)); return {}; } - Id FCastHalf0(Operation operation) { - UNIMPLEMENTED(); - return {}; + template <u32 offset> + Expression FCastHalf(Operation operation) { + const Id value = AsHalfFloat(Visit(operation[0])); + return {GetFloatFromHalfScalar(OpCompositeExtract(t_scalar_half, value, offset)), + Type::Float}; } - Id FCastHalf1(Operation operation) { - UNIMPLEMENTED(); - return {}; - } + Expression FSwizzleAdd(Operation operation) { + const Id minus = Constant(t_float, -1.0f); + const Id plus = v_float_one; + const Id zero = v_float_zero; + const Id lut_a = ConstantComposite(t_float4, minus, plus, minus, zero); + const Id lut_b = ConstantComposite(t_float4, minus, minus, plus, minus); - Id HNegate(Operation operation) { - UNIMPLEMENTED(); - return {}; + Id mask = OpLoad(t_uint, thread_id); + mask = OpBitwiseAnd(t_uint, mask, Constant(t_uint, 3)); + mask = OpShiftLeftLogical(t_uint, mask, Constant(t_uint, 1)); + mask = OpShiftRightLogical(t_uint, AsUint(Visit(operation[2])), mask); + mask = OpBitwiseAnd(t_uint, mask, Constant(t_uint, 3)); + + const Id modifier_a = OpVectorExtractDynamic(t_float, lut_a, mask); + const Id modifier_b = OpVectorExtractDynamic(t_float, lut_b, mask); + + const Id op_a = OpFMul(t_float, AsFloat(Visit(operation[0])), modifier_a); + const Id op_b = OpFMul(t_float, AsFloat(Visit(operation[1])), modifier_b); + return {OpFAdd(t_float, op_a, op_b), Type::Float}; } - Id HClamp(Operation operation) { - UNIMPLEMENTED(); - return {}; + Expression HNegate(Operation operation) { + const bool is_f16 = device.IsFloat16Supported(); + const Id minus_one = Constant(t_scalar_half, is_f16 ? 0xbc00 : 0xbf800000); + const Id one = Constant(t_scalar_half, is_f16 ? 0x3c00 : 0x3f800000); + const auto GetNegate = [&](std::size_t index) { + return OpSelect(t_scalar_half, AsBool(Visit(operation[index])), minus_one, one); + }; + const Id negation = OpCompositeConstruct(t_half, GetNegate(1), GetNegate(2)); + return {OpFMul(t_half, AsHalfFloat(Visit(operation[0])), negation), Type::HalfFloat}; } - Id HCastFloat(Operation operation) { - UNIMPLEMENTED(); - return {}; + Expression HClamp(Operation operation) { + const auto Pack = [&](std::size_t index) { + const Id scalar = GetHalfScalarFromFloat(AsFloat(Visit(operation[index]))); + return OpCompositeConstruct(t_half, scalar, scalar); + }; + const Id value = AsHalfFloat(Visit(operation[0])); + const Id min = Pack(1); + const Id max = Pack(2); + + const Id clamped = OpFClamp(t_half, value, min, max); + if (IsPrecise(operation)) { + Decorate(clamped, spv::Decoration::NoContraction); + } + return {clamped, Type::HalfFloat}; } - Id HUnpack(Operation operation) { - UNIMPLEMENTED(); - return {}; + Expression HCastFloat(Operation operation) { + const Id value = GetHalfScalarFromFloat(AsFloat(Visit(operation[0]))); + return {OpCompositeConstruct(t_half, value, Constant(t_scalar_half, 0)), Type::HalfFloat}; } - Id HMergeF32(Operation operation) { - UNIMPLEMENTED(); - return {}; + Expression HUnpack(Operation operation) { + Expression operand = Visit(operation[0]); + const auto type = std::get<Tegra::Shader::HalfType>(operation.GetMeta()); + if (type == Tegra::Shader::HalfType::H0_H1) { + return operand; + } + const auto value = [&] { + switch (std::get<Tegra::Shader::HalfType>(operation.GetMeta())) { + case Tegra::Shader::HalfType::F32: + return GetHalfScalarFromFloat(AsFloat(operand)); + case Tegra::Shader::HalfType::H0_H0: + return OpCompositeExtract(t_scalar_half, AsHalfFloat(operand), 0); + case Tegra::Shader::HalfType::H1_H1: + return OpCompositeExtract(t_scalar_half, AsHalfFloat(operand), 1); + default: + UNREACHABLE(); + return ConstantNull(t_half); + } + }(); + return {OpCompositeConstruct(t_half, value, value), Type::HalfFloat}; } - Id HMergeH0(Operation operation) { - UNIMPLEMENTED(); - return {}; + Expression HMergeF32(Operation operation) { + const Id value = AsHalfFloat(Visit(operation[0])); + return {GetFloatFromHalfScalar(OpCompositeExtract(t_scalar_half, value, 0)), Type::Float}; } - Id HMergeH1(Operation operation) { - UNIMPLEMENTED(); - return {}; + template <u32 offset> + Expression HMergeHN(Operation operation) { + const Id target = AsHalfFloat(Visit(operation[0])); + const Id source = AsHalfFloat(Visit(operation[1])); + const Id object = OpCompositeExtract(t_scalar_half, source, offset); + return {OpCompositeInsert(t_half, object, target, offset), Type::HalfFloat}; } - Id HPack2(Operation operation) { - UNIMPLEMENTED(); - return {}; + Expression HPack2(Operation operation) { + const Id low = GetHalfScalarFromFloat(AsFloat(Visit(operation[0]))); + const Id high = GetHalfScalarFromFloat(AsFloat(Visit(operation[1]))); + return {OpCompositeConstruct(t_half, low, high), Type::HalfFloat}; } - Id LogicalAssign(Operation operation) { + Expression LogicalAssign(Operation operation) { const Node& dest = operation[0]; const Node& src = operation[1]; @@ -844,106 +1476,198 @@ private: target = internal_flags.at(static_cast<u32>(flag->GetFlag())); } - Emit(OpStore(target, Visit(src))); + OpStore(target, AsBool(Visit(src))); return {}; } - Id LogicalPick2(Operation operation) { - UNIMPLEMENTED(); - return {}; + Id GetTextureSampler(Operation operation) { + const auto& meta = std::get<MetaTexture>(operation.GetMeta()); + ASSERT(!meta.sampler.IsBuffer()); + + const auto& entry = sampled_images.at(meta.sampler.GetIndex()); + return OpLoad(entry.sampled_image_type, entry.sampler); } - Id LogicalAnd2(Operation operation) { - UNIMPLEMENTED(); - return {}; + Id GetTextureImage(Operation operation) { + const auto& meta = std::get<MetaTexture>(operation.GetMeta()); + const u32 index = meta.sampler.GetIndex(); + if (meta.sampler.IsBuffer()) { + const auto& entry = texel_buffers.at(index); + return OpLoad(entry.image_type, entry.image); + } else { + const auto& entry = sampled_images.at(index); + return OpImage(entry.image_type, GetTextureSampler(operation)); + } } - Id GetTextureSampler(Operation operation) { - const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); - const auto entry = sampler_images.at(static_cast<u32>(meta->sampler.GetIndex())); - return Emit(OpLoad(entry.sampled_image_type, entry.sampler)); + Id GetImage(Operation operation) { + const auto& meta = std::get<MetaImage>(operation.GetMeta()); + const auto entry = images.at(meta.image.GetIndex()); + return OpLoad(entry.image_type, entry.image); } - Id GetTextureImage(Operation operation) { - const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); - const auto entry = sampler_images.at(static_cast<u32>(meta->sampler.GetIndex())); - return Emit(OpImage(entry.image_type, GetTextureSampler(operation))); + Id AssembleVector(const std::vector<Id>& coords, Type type) { + const Id coords_type = GetTypeVectorDefinitionLut(type).at(coords.size() - 1); + return coords.size() == 1 ? coords[0] : OpCompositeConstruct(coords_type, coords); } - Id GetTextureCoordinates(Operation operation) { - const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); + Id GetCoordinates(Operation operation, Type type) { std::vector<Id> coords; for (std::size_t i = 0; i < operation.GetOperandsCount(); ++i) { - coords.push_back(Visit(operation[i])); + coords.push_back(As(Visit(operation[i]), type)); } - if (meta->sampler.IsArray()) { - const Id array_integer = BitcastTo<Type::Int>(Visit(meta->array)); - coords.push_back(Emit(OpConvertSToF(t_float, array_integer))); + if (const auto meta = std::get_if<MetaTexture>(&operation.GetMeta())) { + // Add array coordinate for textures + if (meta->sampler.IsArray()) { + Id array = AsInt(Visit(meta->array)); + if (type == Type::Float) { + array = OpConvertSToF(t_float, array); + } + coords.push_back(array); + } } - if (meta->sampler.IsShadow()) { - coords.push_back(Visit(meta->depth_compare)); + return AssembleVector(coords, type); + } + + Id GetOffsetCoordinates(Operation operation) { + const auto& meta = std::get<MetaTexture>(operation.GetMeta()); + std::vector<Id> coords; + coords.reserve(meta.aoffi.size()); + for (const auto& coord : meta.aoffi) { + coords.push_back(AsInt(Visit(coord))); } + return AssembleVector(coords, Type::Int); + } - const std::array<Id, 4> t_float_lut = {nullptr, t_float2, t_float3, t_float4}; - return coords.size() == 1 - ? coords[0] - : Emit(OpCompositeConstruct(t_float_lut.at(coords.size() - 1), coords)); + std::pair<Id, Id> GetDerivatives(Operation operation) { + const auto& meta = std::get<MetaTexture>(operation.GetMeta()); + const auto& derivatives = meta.derivates; + ASSERT(derivatives.size() % 2 == 0); + + const std::size_t components = derivatives.size() / 2; + std::vector<Id> dx, dy; + dx.reserve(components); + dy.reserve(components); + for (std::size_t index = 0; index < components; ++index) { + dx.push_back(AsFloat(Visit(derivatives.at(index * 2 + 0)))); + dy.push_back(AsFloat(Visit(derivatives.at(index * 2 + 1)))); + } + return {AssembleVector(dx, Type::Float), AssembleVector(dy, Type::Float)}; } - Id GetTextureElement(Operation operation, Id sample_value) { - const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); - ASSERT(meta); - return Emit(OpCompositeExtract(t_float, sample_value, meta->element)); + Expression GetTextureElement(Operation operation, Id sample_value, Type type) { + const auto& meta = std::get<MetaTexture>(operation.GetMeta()); + const auto type_def = GetTypeDefinition(type); + return {OpCompositeExtract(type_def, sample_value, meta.element), type}; } - Id Texture(Operation operation) { - const Id texture = Emit(OpImageSampleImplicitLod(t_float4, GetTextureSampler(operation), - GetTextureCoordinates(operation))); - return GetTextureElement(operation, texture); + Expression Texture(Operation operation) { + const auto& meta = std::get<MetaTexture>(operation.GetMeta()); + + const bool can_implicit = stage == ShaderType::Fragment; + const Id sampler = GetTextureSampler(operation); + const Id coords = GetCoordinates(operation, Type::Float); + + std::vector<Id> operands; + spv::ImageOperandsMask mask{}; + if (meta.bias) { + mask = mask | spv::ImageOperandsMask::Bias; + operands.push_back(AsFloat(Visit(meta.bias))); + } + + if (!can_implicit) { + mask = mask | spv::ImageOperandsMask::Lod; + operands.push_back(v_float_zero); + } + + if (!meta.aoffi.empty()) { + mask = mask | spv::ImageOperandsMask::Offset; + operands.push_back(GetOffsetCoordinates(operation)); + } + + if (meta.depth_compare) { + // Depth sampling + UNIMPLEMENTED_IF(meta.bias); + const Id dref = AsFloat(Visit(meta.depth_compare)); + if (can_implicit) { + return { + OpImageSampleDrefImplicitLod(t_float, sampler, coords, dref, mask, operands), + Type::Float}; + } else { + return { + OpImageSampleDrefExplicitLod(t_float, sampler, coords, dref, mask, operands), + Type::Float}; + } + } + + Id texture; + if (can_implicit) { + texture = OpImageSampleImplicitLod(t_float4, sampler, coords, mask, operands); + } else { + texture = OpImageSampleExplicitLod(t_float4, sampler, coords, mask, operands); + } + return GetTextureElement(operation, texture, Type::Float); } - Id TextureLod(Operation operation) { - const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); - const Id texture = Emit(OpImageSampleExplicitLod( - t_float4, GetTextureSampler(operation), GetTextureCoordinates(operation), - spv::ImageOperandsMask::Lod, Visit(meta->lod))); - return GetTextureElement(operation, texture); + Expression TextureLod(Operation operation) { + const auto& meta = std::get<MetaTexture>(operation.GetMeta()); + + const Id sampler = GetTextureSampler(operation); + const Id coords = GetCoordinates(operation, Type::Float); + const Id lod = AsFloat(Visit(meta.lod)); + + spv::ImageOperandsMask mask = spv::ImageOperandsMask::Lod; + std::vector<Id> operands{lod}; + + if (!meta.aoffi.empty()) { + mask = mask | spv::ImageOperandsMask::Offset; + operands.push_back(GetOffsetCoordinates(operation)); + } + + if (meta.sampler.IsShadow()) { + const Id dref = AsFloat(Visit(meta.depth_compare)); + return {OpImageSampleDrefExplicitLod(t_float, sampler, coords, dref, mask, operands), + Type::Float}; + } + const Id texture = OpImageSampleExplicitLod(t_float4, sampler, coords, mask, operands); + return GetTextureElement(operation, texture, Type::Float); } - Id TextureGather(Operation operation) { - const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); - const auto coords = GetTextureCoordinates(operation); + Expression TextureGather(Operation operation) { + const auto& meta = std::get<MetaTexture>(operation.GetMeta()); + UNIMPLEMENTED_IF(!meta.aoffi.empty()); - Id texture; - if (meta->sampler.IsShadow()) { - texture = Emit(OpImageDrefGather(t_float4, GetTextureSampler(operation), coords, - Visit(meta->component))); + const Id coords = GetCoordinates(operation, Type::Float); + Id texture{}; + if (meta.sampler.IsShadow()) { + texture = OpImageDrefGather(t_float4, GetTextureSampler(operation), coords, + AsFloat(Visit(meta.depth_compare))); } else { u32 component_value = 0; - if (meta->component) { - const auto component = std::get_if<ImmediateNode>(&*meta->component); + if (meta.component) { + const auto component = std::get_if<ImmediateNode>(&*meta.component); ASSERT_MSG(component, "Component is not an immediate value"); component_value = component->GetValue(); } - texture = Emit(OpImageGather(t_float4, GetTextureSampler(operation), coords, - Constant(t_uint, component_value))); + texture = OpImageGather(t_float4, GetTextureSampler(operation), coords, + Constant(t_uint, component_value)); } - - return GetTextureElement(operation, texture); + return GetTextureElement(operation, texture, Type::Float); } - Id TextureQueryDimensions(Operation operation) { - const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); - const auto image_id = GetTextureImage(operation); - AddCapability(spv::Capability::ImageQuery); + Expression TextureQueryDimensions(Operation operation) { + const auto& meta = std::get<MetaTexture>(operation.GetMeta()); + UNIMPLEMENTED_IF(!meta.aoffi.empty()); + UNIMPLEMENTED_IF(meta.depth_compare); - if (meta->element == 3) { - return BitcastTo<Type::Float>(Emit(OpImageQueryLevels(t_int, image_id))); + const auto image_id = GetTextureImage(operation); + if (meta.element == 3) { + return {OpImageQueryLevels(t_int, image_id), Type::Int}; } - const Id lod = VisitOperand<Type::Uint>(operation, 0); + const Id lod = AsUint(Visit(operation[0])); const std::size_t coords_count = [&]() { - switch (const auto type = meta->sampler.GetType(); type) { + switch (const auto type = meta.sampler.GetType(); type) { case Tegra::Shader::TextureType::Texture1D: return 1; case Tegra::Shader::TextureType::Texture2D: @@ -957,136 +1681,190 @@ private: } }(); - if (meta->element >= coords_count) { - return Constant(t_float, 0.0f); + if (meta.element >= coords_count) { + return {v_float_zero, Type::Float}; } const std::array<Id, 3> types = {t_int, t_int2, t_int3}; - const Id sizes = Emit(OpImageQuerySizeLod(types.at(coords_count - 1), image_id, lod)); - const Id size = Emit(OpCompositeExtract(t_int, sizes, meta->element)); - return BitcastTo<Type::Float>(size); + const Id sizes = OpImageQuerySizeLod(types.at(coords_count - 1), image_id, lod); + const Id size = OpCompositeExtract(t_int, sizes, meta.element); + return {size, Type::Int}; + } + + Expression TextureQueryLod(Operation operation) { + const auto& meta = std::get<MetaTexture>(operation.GetMeta()); + UNIMPLEMENTED_IF(!meta.aoffi.empty()); + UNIMPLEMENTED_IF(meta.depth_compare); + + if (meta.element >= 2) { + UNREACHABLE_MSG("Invalid element"); + return {v_float_zero, Type::Float}; + } + const auto sampler_id = GetTextureSampler(operation); + + const Id multiplier = Constant(t_float, 256.0f); + const Id multipliers = ConstantComposite(t_float2, multiplier, multiplier); + + const Id coords = GetCoordinates(operation, Type::Float); + Id size = OpImageQueryLod(t_float2, sampler_id, coords); + size = OpFMul(t_float2, size, multipliers); + size = OpConvertFToS(t_int2, size); + return GetTextureElement(operation, size, Type::Int); + } + + Expression TexelFetch(Operation operation) { + const auto& meta = std::get<MetaTexture>(operation.GetMeta()); + UNIMPLEMENTED_IF(meta.depth_compare); + + const Id image = GetTextureImage(operation); + const Id coords = GetCoordinates(operation, Type::Int); + Id fetch; + if (meta.lod && !meta.sampler.IsBuffer()) { + fetch = OpImageFetch(t_float4, image, coords, spv::ImageOperandsMask::Lod, + AsInt(Visit(meta.lod))); + } else { + fetch = OpImageFetch(t_float4, image, coords); + } + return GetTextureElement(operation, fetch, Type::Float); } - Id TextureQueryLod(Operation operation) { + Expression TextureGradient(Operation operation) { + const auto& meta = std::get<MetaTexture>(operation.GetMeta()); + UNIMPLEMENTED_IF(!meta.aoffi.empty()); + + const Id sampler = GetTextureSampler(operation); + const Id coords = GetCoordinates(operation, Type::Float); + const auto [dx, dy] = GetDerivatives(operation); + const std::vector grad = {dx, dy}; + + static constexpr auto mask = spv::ImageOperandsMask::Grad; + const Id texture = OpImageSampleExplicitLod(t_float4, sampler, coords, mask, grad); + return GetTextureElement(operation, texture, Type::Float); + } + + Expression ImageLoad(Operation operation) { UNIMPLEMENTED(); return {}; } - Id TexelFetch(Operation operation) { - UNIMPLEMENTED(); + Expression ImageStore(Operation operation) { + const auto meta{std::get<MetaImage>(operation.GetMeta())}; + std::vector<Id> colors; + for (const auto& value : meta.values) { + colors.push_back(AsUint(Visit(value))); + } + + const Id coords = GetCoordinates(operation, Type::Int); + const Id texel = OpCompositeConstruct(t_uint4, colors); + + OpImageWrite(GetImage(operation), coords, texel, {}); return {}; } - Id ImageLoad(Operation operation) { + Expression AtomicImageAdd(Operation operation) { UNIMPLEMENTED(); return {}; } - Id ImageStore(Operation operation) { + Expression AtomicImageMin(Operation operation) { UNIMPLEMENTED(); return {}; } - Id AtomicImageAdd(Operation operation) { + Expression AtomicImageMax(Operation operation) { UNIMPLEMENTED(); return {}; } - Id AtomicImageAnd(Operation operation) { + Expression AtomicImageAnd(Operation operation) { UNIMPLEMENTED(); return {}; } - Id AtomicImageOr(Operation operation) { + Expression AtomicImageOr(Operation operation) { UNIMPLEMENTED(); return {}; } - Id AtomicImageXor(Operation operation) { + Expression AtomicImageXor(Operation operation) { UNIMPLEMENTED(); return {}; } - Id AtomicImageExchange(Operation operation) { + Expression AtomicImageExchange(Operation operation) { UNIMPLEMENTED(); return {}; } - Id Branch(Operation operation) { - const auto target = std::get_if<ImmediateNode>(&*operation[0]); - UNIMPLEMENTED_IF(!target); - - Emit(OpStore(jmp_to, Constant(t_uint, target->GetValue()))); - Emit(OpBranch(continue_label)); - inside_branch = conditional_nest_count; - if (conditional_nest_count == 0) { - Emit(OpLabel()); + Expression Branch(Operation operation) { + const auto& target = std::get<ImmediateNode>(*operation[0]); + OpStore(jmp_to, Constant(t_uint, target.GetValue())); + OpBranch(continue_label); + inside_branch = true; + if (!conditional_branch_set) { + AddLabel(); } return {}; } - Id BranchIndirect(Operation operation) { - const Id op_a = VisitOperand<Type::Uint>(operation, 0); + Expression BranchIndirect(Operation operation) { + const Id op_a = AsUint(Visit(operation[0])); - Emit(OpStore(jmp_to, op_a)); - Emit(OpBranch(continue_label)); - inside_branch = conditional_nest_count; - if (conditional_nest_count == 0) { - Emit(OpLabel()); + OpStore(jmp_to, op_a); + OpBranch(continue_label); + inside_branch = true; + if (!conditional_branch_set) { + AddLabel(); } return {}; } - Id PushFlowStack(Operation operation) { - const auto target = std::get_if<ImmediateNode>(&*operation[0]); - ASSERT(target); - + Expression PushFlowStack(Operation operation) { + const auto& target = std::get<ImmediateNode>(*operation[0]); const auto [flow_stack, flow_stack_top] = GetFlowStack(operation); - const Id current = Emit(OpLoad(t_uint, flow_stack_top)); - const Id next = Emit(OpIAdd(t_uint, current, Constant(t_uint, 1))); - const Id access = Emit(OpAccessChain(t_func_uint, flow_stack, current)); + const Id current = OpLoad(t_uint, flow_stack_top); + const Id next = OpIAdd(t_uint, current, Constant(t_uint, 1)); + const Id access = OpAccessChain(t_func_uint, flow_stack, current); - Emit(OpStore(access, Constant(t_uint, target->GetValue()))); - Emit(OpStore(flow_stack_top, next)); + OpStore(access, Constant(t_uint, target.GetValue())); + OpStore(flow_stack_top, next); return {}; } - Id PopFlowStack(Operation operation) { + Expression PopFlowStack(Operation operation) { const auto [flow_stack, flow_stack_top] = GetFlowStack(operation); - const Id current = Emit(OpLoad(t_uint, flow_stack_top)); - const Id previous = Emit(OpISub(t_uint, current, Constant(t_uint, 1))); - const Id access = Emit(OpAccessChain(t_func_uint, flow_stack, previous)); - const Id target = Emit(OpLoad(t_uint, access)); - - Emit(OpStore(flow_stack_top, previous)); - Emit(OpStore(jmp_to, target)); - Emit(OpBranch(continue_label)); - inside_branch = conditional_nest_count; - if (conditional_nest_count == 0) { - Emit(OpLabel()); + const Id current = OpLoad(t_uint, flow_stack_top); + const Id previous = OpISub(t_uint, current, Constant(t_uint, 1)); + const Id access = OpAccessChain(t_func_uint, flow_stack, previous); + const Id target = OpLoad(t_uint, access); + + OpStore(flow_stack_top, previous); + OpStore(jmp_to, target); + OpBranch(continue_label); + inside_branch = true; + if (!conditional_branch_set) { + AddLabel(); } return {}; } - Id PreExit() { - switch (stage) { - case ShaderStage::Vertex: { - // TODO(Rodrigo): We should use VK_EXT_depth_range_unrestricted instead, but it doesn't - // seem to be working on Nvidia's drivers and Intel (mesa and blob) doesn't support it. - const Id z_pointer = AccessElement(t_out_float, per_vertex, position_index, 2u); - Id depth = Emit(OpLoad(t_float, z_pointer)); - depth = Emit(OpFAdd(t_float, depth, Constant(t_float, 1.0f))); - depth = Emit(OpFMul(t_float, depth, Constant(t_float, 0.5f))); - Emit(OpStore(z_pointer, depth)); - break; + void PreExit() { + if (stage == ShaderType::Vertex && specialization.ndc_minus_one_to_one) { + const u32 position_index = out_indices.position.value(); + const Id z_pointer = AccessElement(t_out_float, out_vertex, position_index, 2U); + const Id w_pointer = AccessElement(t_out_float, out_vertex, position_index, 3U); + Id depth = OpLoad(t_float, z_pointer); + depth = OpFAdd(t_float, depth, OpLoad(t_float, w_pointer)); + depth = OpFMul(t_float, depth, Constant(t_float, 0.5f)); + OpStore(z_pointer, depth); } - case ShaderStage::Fragment: { + if (stage == ShaderType::Fragment) { const auto SafeGetRegister = [&](u32 reg) { // TODO(Rodrigo): Replace with contains once C++20 releases if (const auto it = registers.find(reg); it != registers.end()) { - return Emit(OpLoad(t_float, it->second)); + return OpLoad(t_float, it->second); } - return Constant(t_float, 0.0f); + return v_float_zero; }; UNIMPLEMENTED_IF_MSG(header.ps.omap.sample_mask != 0, @@ -1098,159 +1876,145 @@ private: // rendertargets/components are skipped in the register assignment. u32 current_reg = 0; for (u32 rt = 0; rt < Maxwell::NumRenderTargets; ++rt) { + if (!specialization.enabled_rendertargets[rt]) { + // Skip rendertargets that are not enabled + continue; + } // TODO(Subv): Figure out how dual-source blending is configured in the Switch. for (u32 component = 0; component < 4; ++component) { + const Id pointer = AccessElement(t_out_float, frag_colors.at(rt), component); if (header.ps.IsColorComponentOutputEnabled(rt, component)) { - Emit(OpStore(AccessElement(t_out_float, frag_colors.at(rt), component), - SafeGetRegister(current_reg))); + OpStore(pointer, SafeGetRegister(current_reg)); ++current_reg; + } else { + OpStore(pointer, component == 3 ? v_float_one : v_float_zero); } } } if (header.ps.omap.depth) { // The depth output is always 2 registers after the last color output, and // current_reg already contains one past the last color register. - Emit(OpStore(frag_depth, SafeGetRegister(current_reg + 1))); + OpStore(frag_depth, SafeGetRegister(current_reg + 1)); } - break; - } } - - return {}; } - Id Exit(Operation operation) { + Expression Exit(Operation operation) { PreExit(); - inside_branch = conditional_nest_count; - if (conditional_nest_count > 0) { - Emit(OpReturn()); + inside_branch = true; + if (conditional_branch_set) { + OpReturn(); } else { const Id dummy = OpLabel(); - Emit(OpBranch(dummy)); - Emit(dummy); - Emit(OpReturn()); - Emit(OpLabel()); + OpBranch(dummy); + AddLabel(dummy); + OpReturn(); + AddLabel(); } return {}; } - Id Discard(Operation operation) { - inside_branch = conditional_nest_count; - if (conditional_nest_count > 0) { - Emit(OpKill()); + Expression Discard(Operation operation) { + inside_branch = true; + if (conditional_branch_set) { + OpKill(); } else { const Id dummy = OpLabel(); - Emit(OpBranch(dummy)); - Emit(dummy); - Emit(OpKill()); - Emit(OpLabel()); + OpBranch(dummy); + AddLabel(dummy); + OpKill(); + AddLabel(); } return {}; } - Id EmitVertex(Operation operation) { - UNIMPLEMENTED(); + Expression EmitVertex(Operation) { + OpEmitVertex(); return {}; } - Id EndPrimitive(Operation operation) { - UNIMPLEMENTED(); + Expression EndPrimitive(Operation operation) { + OpEndPrimitive(); return {}; } - Id YNegate(Operation operation) { - UNIMPLEMENTED(); - return {}; + Expression InvocationId(Operation) { + return {OpLoad(t_int, invocation_id), Type::Int}; } - template <u32 element> - Id LocalInvocationId(Operation) { - UNIMPLEMENTED(); - return {}; + Expression YNegate(Operation) { + LOG_WARNING(Render_Vulkan, "(STUBBED)"); + return {Constant(t_float, 1.0f), Type::Float}; } template <u32 element> - Id WorkGroupId(Operation) { - UNIMPLEMENTED(); - return {}; - } - - Id BallotThread(Operation) { - UNIMPLEMENTED(); - return {}; + Expression LocalInvocationId(Operation) { + const Id id = OpLoad(t_uint3, local_invocation_id); + return {OpCompositeExtract(t_uint, id, element), Type::Uint}; } - Id VoteAll(Operation) { - UNIMPLEMENTED(); - return {}; - } - - Id VoteAny(Operation) { - UNIMPLEMENTED(); - return {}; + template <u32 element> + Expression WorkGroupId(Operation operation) { + const Id id = OpLoad(t_uint3, workgroup_id); + return {OpCompositeExtract(t_uint, id, element), Type::Uint}; } - Id VoteEqual(Operation) { - UNIMPLEMENTED(); - return {}; - } + Expression BallotThread(Operation operation) { + const Id predicate = AsBool(Visit(operation[0])); + const Id ballot = OpSubgroupBallotKHR(t_uint4, predicate); - Id ShuffleIndexed(Operation) { - UNIMPLEMENTED(); - return {}; - } + if (!device.IsWarpSizePotentiallyBiggerThanGuest()) { + // Guest-like devices can just return the first index. + return {OpCompositeExtract(t_uint, ballot, 0U), Type::Uint}; + } - Id ShuffleUp(Operation) { - UNIMPLEMENTED(); - return {}; + // The others will have to return what is local to the current thread. + // For instance a device with a warp size of 64 will return the upper uint when the current + // thread is 38. + const Id tid = OpLoad(t_uint, thread_id); + const Id thread_index = OpShiftRightLogical(t_uint, tid, Constant(t_uint, 5)); + return {OpVectorExtractDynamic(t_uint, ballot, thread_index), Type::Uint}; } - Id ShuffleDown(Operation) { - UNIMPLEMENTED(); - return {}; + template <Id (Module::*func)(Id, Id)> + Expression Vote(Operation operation) { + // TODO(Rodrigo): Handle devices with different warp sizes + const Id predicate = AsBool(Visit(operation[0])); + return {(this->*func)(t_bool, predicate), Type::Bool}; } - Id ShuffleButterfly(Operation) { - UNIMPLEMENTED(); - return {}; + Expression ThreadId(Operation) { + return {OpLoad(t_uint, thread_id), Type::Uint}; } - Id InRangeShuffleIndexed(Operation) { - UNIMPLEMENTED(); - return {}; - } - - Id InRangeShuffleUp(Operation) { - UNIMPLEMENTED(); - return {}; + Expression ShuffleIndexed(Operation operation) { + const Id value = AsFloat(Visit(operation[0])); + const Id index = AsUint(Visit(operation[1])); + return {OpSubgroupReadInvocationKHR(t_float, value, index), Type::Float}; } - Id InRangeShuffleDown(Operation) { - UNIMPLEMENTED(); - return {}; - } + Expression MemoryBarrierGL(Operation) { + const auto scope = spv::Scope::Device; + const auto semantics = + spv::MemorySemanticsMask::AcquireRelease | spv::MemorySemanticsMask::UniformMemory | + spv::MemorySemanticsMask::WorkgroupMemory | + spv::MemorySemanticsMask::AtomicCounterMemory | spv::MemorySemanticsMask::ImageMemory; - Id InRangeShuffleButterfly(Operation) { - UNIMPLEMENTED(); + OpMemoryBarrier(Constant(t_uint, static_cast<u32>(scope)), + Constant(t_uint, static_cast<u32>(semantics))); return {}; } - Id DeclareBuiltIn(spv::BuiltIn builtin, spv::StorageClass storage, Id type, - const std::string& name) { + Id DeclareBuiltIn(spv::BuiltIn builtin, spv::StorageClass storage, Id type, std::string name) { const Id id = OpVariable(type, storage); Decorate(id, spv::Decoration::BuiltIn, static_cast<u32>(builtin)); - AddGlobalVariable(Name(id, name)); + AddGlobalVariable(Name(id, std::move(name))); interfaces.push_back(id); return id; } - bool IsRenderTargetUsed(u32 rt) const { - for (u32 component = 0; component < 4; ++component) { - if (header.ps.IsColorComponentOutputEnabled(rt, component)) { - return true; - } - } - return false; + Id DeclareInputBuiltIn(spv::BuiltIn builtin, Id type, std::string name) { + return DeclareBuiltIn(builtin, spv::StorageClass::Input, type, std::move(name)); } template <typename... Args> @@ -1261,66 +2025,148 @@ private: members.push_back(Constant(t_uint, element)); } - return Emit(OpAccessChain(pointer_type, composite, members)); + return OpAccessChain(pointer_type, composite, members); } - template <Type type> - Id VisitOperand(Operation operation, std::size_t operand_index) { - const Id value = Visit(operation[operand_index]); - - switch (type) { + Id As(Expression expr, Type wanted_type) { + switch (wanted_type) { case Type::Bool: + return AsBool(expr); case Type::Bool2: + return AsBool2(expr); case Type::Float: - return value; + return AsFloat(expr); case Type::Int: - return Emit(OpBitcast(t_int, value)); + return AsInt(expr); case Type::Uint: - return Emit(OpBitcast(t_uint, value)); + return AsUint(expr); case Type::HalfFloat: - UNIMPLEMENTED(); + return AsHalfFloat(expr); + default: + UNREACHABLE(); + return expr.id; } - UNREACHABLE(); - return value; } - template <Type type> - Id BitcastFrom(Id value) { - switch (type) { - case Type::Bool: - case Type::Bool2: + Id AsBool(Expression expr) { + ASSERT(expr.type == Type::Bool); + return expr.id; + } + + Id AsBool2(Expression expr) { + ASSERT(expr.type == Type::Bool2); + return expr.id; + } + + Id AsFloat(Expression expr) { + switch (expr.type) { case Type::Float: - return value; + return expr.id; case Type::Int: case Type::Uint: - return Emit(OpBitcast(t_float, value)); + return OpBitcast(t_float, expr.id); case Type::HalfFloat: - UNIMPLEMENTED(); + if (device.IsFloat16Supported()) { + return OpBitcast(t_float, expr.id); + } + return OpBitcast(t_float, OpPackHalf2x16(t_uint, expr.id)); + default: + UNREACHABLE(); + return expr.id; } - UNREACHABLE(); - return value; } - template <Type type> - Id BitcastTo(Id value) { - switch (type) { - case Type::Bool: - case Type::Bool2: + Id AsInt(Expression expr) { + switch (expr.type) { + case Type::Int: + return expr.id; + case Type::Float: + case Type::Uint: + return OpBitcast(t_int, expr.id); + case Type::HalfFloat: + if (device.IsFloat16Supported()) { + return OpBitcast(t_int, expr.id); + } + return OpPackHalf2x16(t_int, expr.id); + default: UNREACHABLE(); + return expr.id; + } + } + + Id AsUint(Expression expr) { + switch (expr.type) { + case Type::Uint: + return expr.id; case Type::Float: - return Emit(OpBitcast(t_float, value)); case Type::Int: - return Emit(OpBitcast(t_int, value)); - case Type::Uint: - return Emit(OpBitcast(t_uint, value)); + return OpBitcast(t_uint, expr.id); case Type::HalfFloat: - UNIMPLEMENTED(); + if (device.IsFloat16Supported()) { + return OpBitcast(t_uint, expr.id); + } + return OpPackHalf2x16(t_uint, expr.id); + default: + UNREACHABLE(); + return expr.id; + } + } + + Id AsHalfFloat(Expression expr) { + switch (expr.type) { + case Type::HalfFloat: + return expr.id; + case Type::Float: + case Type::Int: + case Type::Uint: + if (device.IsFloat16Supported()) { + return OpBitcast(t_half, expr.id); + } + return OpUnpackHalf2x16(t_half, AsUint(expr)); + default: + UNREACHABLE(); + return expr.id; + } + } + + Id GetHalfScalarFromFloat(Id value) { + if (device.IsFloat16Supported()) { + return OpFConvert(t_scalar_half, value); + } + return value; + } + + Id GetFloatFromHalfScalar(Id value) { + if (device.IsFloat16Supported()) { + return OpFConvert(t_float, value); } - UNREACHABLE(); return value; } - Id GetTypeDefinition(Type type) { + AttributeType GetAttributeType(u32 location) const { + if (stage != ShaderType::Vertex) { + return {Type::Float, t_in_float, t_in_float4}; + } + switch (specialization.attribute_types.at(location)) { + case Maxwell::VertexAttribute::Type::SignedNorm: + case Maxwell::VertexAttribute::Type::UnsignedNorm: + case Maxwell::VertexAttribute::Type::Float: + return {Type::Float, t_in_float, t_in_float4}; + case Maxwell::VertexAttribute::Type::SignedInt: + return {Type::Int, t_in_int, t_in_int4}; + case Maxwell::VertexAttribute::Type::UnsignedInt: + return {Type::Uint, t_in_uint, t_in_uint4}; + case Maxwell::VertexAttribute::Type::UnsignedScaled: + case Maxwell::VertexAttribute::Type::SignedScaled: + UNIMPLEMENTED(); + return {Type::Float, t_in_float, t_in_float4}; + default: + UNREACHABLE(); + return {Type::Float, t_in_float, t_in_float4}; + } + } + + Id GetTypeDefinition(Type type) const { switch (type) { case Type::Bool: return t_bool; @@ -1333,10 +2179,25 @@ private: case Type::Uint: return t_uint; case Type::HalfFloat: + return t_half; + default: + UNREACHABLE(); + return {}; + } + } + + std::array<Id, 4> GetTypeVectorDefinitionLut(Type type) const { + switch (type) { + case Type::Float: + return {nullptr, t_float2, t_float3, t_float4}; + case Type::Int: + return {nullptr, t_int2, t_int3, t_int4}; + case Type::Uint: + return {nullptr, t_uint2, t_uint3, t_uint4}; + default: UNIMPLEMENTED(); + return {}; } - UNREACHABLE(); - return {}; } std::tuple<Id, Id> CreateFlowStack() { @@ -1346,9 +2207,11 @@ private: constexpr auto storage_class = spv::StorageClass::Function; const Id flow_stack_type = TypeArray(t_uint, Constant(t_uint, FLOW_STACK_SIZE)); - const Id stack = Emit(OpVariable(TypePointer(storage_class, flow_stack_type), storage_class, - ConstantNull(flow_stack_type))); - const Id top = Emit(OpVariable(t_func_uint, storage_class, Constant(t_uint, 0))); + const Id stack = OpVariable(TypePointer(storage_class, flow_stack_type), storage_class, + ConstantNull(flow_stack_type)); + const Id top = OpVariable(t_func_uint, storage_class, Constant(t_uint, 0)); + AddLocalVariable(stack); + AddLocalVariable(top); return std::tie(stack, top); } @@ -1377,8 +2240,8 @@ private: &SPIRVDecompiler::Unary<&Module::OpFNegate, Type::Float>, &SPIRVDecompiler::Unary<&Module::OpFAbs, Type::Float>, &SPIRVDecompiler::Ternary<&Module::OpFClamp, Type::Float>, - &SPIRVDecompiler::FCastHalf0, - &SPIRVDecompiler::FCastHalf1, + &SPIRVDecompiler::FCastHalf<0>, + &SPIRVDecompiler::FCastHalf<1>, &SPIRVDecompiler::Binary<&Module::OpFMin, Type::Float>, &SPIRVDecompiler::Binary<&Module::OpFMax, Type::Float>, &SPIRVDecompiler::Unary<&Module::OpCos, Type::Float>, @@ -1393,6 +2256,7 @@ private: &SPIRVDecompiler::Unary<&Module::OpTrunc, Type::Float>, &SPIRVDecompiler::Unary<&Module::OpConvertSToF, Type::Float, Type::Int>, &SPIRVDecompiler::Unary<&Module::OpConvertUToF, Type::Float, Type::Uint>, + &SPIRVDecompiler::FSwizzleAdd, &SPIRVDecompiler::Binary<&Module::OpIAdd, Type::Int>, &SPIRVDecompiler::Binary<&Module::OpIMul, Type::Int>, @@ -1414,6 +2278,7 @@ private: &SPIRVDecompiler::Quaternary<&Module::OpBitFieldInsert, Type::Int>, &SPIRVDecompiler::Ternary<&Module::OpBitFieldSExtract, Type::Int>, &SPIRVDecompiler::Unary<&Module::OpBitCount, Type::Int>, + &SPIRVDecompiler::Unary<&Module::OpFindSMsb, Type::Int>, &SPIRVDecompiler::Binary<&Module::OpIAdd, Type::Uint>, &SPIRVDecompiler::Binary<&Module::OpIMul, Type::Uint>, @@ -1424,7 +2289,7 @@ private: &SPIRVDecompiler::Unary<&Module::OpBitcast, Type::Uint, Type::Int>, &SPIRVDecompiler::Binary<&Module::OpShiftLeftLogical, Type::Uint>, &SPIRVDecompiler::Binary<&Module::OpShiftRightLogical, Type::Uint>, - &SPIRVDecompiler::Binary<&Module::OpShiftRightArithmetic, Type::Uint>, + &SPIRVDecompiler::Binary<&Module::OpShiftRightLogical, Type::Uint>, &SPIRVDecompiler::Binary<&Module::OpBitwiseAnd, Type::Uint>, &SPIRVDecompiler::Binary<&Module::OpBitwiseOr, Type::Uint>, &SPIRVDecompiler::Binary<&Module::OpBitwiseXor, Type::Uint>, @@ -1432,6 +2297,7 @@ private: &SPIRVDecompiler::Quaternary<&Module::OpBitFieldInsert, Type::Uint>, &SPIRVDecompiler::Ternary<&Module::OpBitFieldUExtract, Type::Uint>, &SPIRVDecompiler::Unary<&Module::OpBitCount, Type::Uint>, + &SPIRVDecompiler::Unary<&Module::OpFindUMsb, Type::Uint>, &SPIRVDecompiler::Binary<&Module::OpFAdd, Type::HalfFloat>, &SPIRVDecompiler::Binary<&Module::OpFMul, Type::HalfFloat>, @@ -1442,8 +2308,8 @@ private: &SPIRVDecompiler::HCastFloat, &SPIRVDecompiler::HUnpack, &SPIRVDecompiler::HMergeF32, - &SPIRVDecompiler::HMergeH0, - &SPIRVDecompiler::HMergeH1, + &SPIRVDecompiler::HMergeHN<0>, + &SPIRVDecompiler::HMergeHN<1>, &SPIRVDecompiler::HPack2, &SPIRVDecompiler::LogicalAssign, @@ -1451,8 +2317,9 @@ private: &SPIRVDecompiler::Binary<&Module::OpLogicalOr, Type::Bool>, &SPIRVDecompiler::Binary<&Module::OpLogicalNotEqual, Type::Bool>, &SPIRVDecompiler::Unary<&Module::OpLogicalNot, Type::Bool>, - &SPIRVDecompiler::LogicalPick2, - &SPIRVDecompiler::LogicalAnd2, + &SPIRVDecompiler::Binary<&Module::OpVectorExtractDynamic, Type::Bool, Type::Bool2, + Type::Uint>, + &SPIRVDecompiler::Unary<&Module::OpAll, Type::Bool, Type::Bool2>, &SPIRVDecompiler::Binary<&Module::OpFOrdLessThan, Type::Bool, Type::Float>, &SPIRVDecompiler::Binary<&Module::OpFOrdEqual, Type::Bool, Type::Float>, @@ -1460,7 +2327,7 @@ private: &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThan, Type::Bool, Type::Float>, &SPIRVDecompiler::Binary<&Module::OpFOrdNotEqual, Type::Bool, Type::Float>, &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThanEqual, Type::Bool, Type::Float>, - &SPIRVDecompiler::Unary<&Module::OpIsNan, Type::Bool>, + &SPIRVDecompiler::Unary<&Module::OpIsNan, Type::Bool, Type::Float>, &SPIRVDecompiler::Binary<&Module::OpSLessThan, Type::Bool, Type::Int>, &SPIRVDecompiler::Binary<&Module::OpIEqual, Type::Bool, Type::Int>, @@ -1476,19 +2343,19 @@ private: &SPIRVDecompiler::Binary<&Module::OpINotEqual, Type::Bool, Type::Uint>, &SPIRVDecompiler::Binary<&Module::OpUGreaterThanEqual, Type::Bool, Type::Uint>, - &SPIRVDecompiler::Binary<&Module::OpFOrdLessThan, Type::Bool, Type::HalfFloat>, - &SPIRVDecompiler::Binary<&Module::OpFOrdEqual, Type::Bool, Type::HalfFloat>, - &SPIRVDecompiler::Binary<&Module::OpFOrdLessThanEqual, Type::Bool, Type::HalfFloat>, - &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThan, Type::Bool, Type::HalfFloat>, - &SPIRVDecompiler::Binary<&Module::OpFOrdNotEqual, Type::Bool, Type::HalfFloat>, - &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThanEqual, Type::Bool, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdLessThan, Type::Bool2, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdEqual, Type::Bool2, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdLessThanEqual, Type::Bool2, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThan, Type::Bool2, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdNotEqual, Type::Bool2, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThanEqual, Type::Bool2, Type::HalfFloat>, // TODO(Rodrigo): Should these use the OpFUnord* variants? - &SPIRVDecompiler::Binary<&Module::OpFOrdLessThan, Type::Bool, Type::HalfFloat>, - &SPIRVDecompiler::Binary<&Module::OpFOrdEqual, Type::Bool, Type::HalfFloat>, - &SPIRVDecompiler::Binary<&Module::OpFOrdLessThanEqual, Type::Bool, Type::HalfFloat>, - &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThan, Type::Bool, Type::HalfFloat>, - &SPIRVDecompiler::Binary<&Module::OpFOrdNotEqual, Type::Bool, Type::HalfFloat>, - &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThanEqual, Type::Bool, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdLessThan, Type::Bool2, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdEqual, Type::Bool2, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdLessThanEqual, Type::Bool2, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThan, Type::Bool2, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdNotEqual, Type::Bool2, Type::HalfFloat>, + &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThanEqual, Type::Bool2, Type::HalfFloat>, &SPIRVDecompiler::Texture, &SPIRVDecompiler::TextureLod, @@ -1496,6 +2363,7 @@ private: &SPIRVDecompiler::TextureQueryDimensions, &SPIRVDecompiler::TextureQueryLod, &SPIRVDecompiler::TexelFetch, + &SPIRVDecompiler::TextureGradient, &SPIRVDecompiler::ImageLoad, &SPIRVDecompiler::ImageStore, @@ -1515,6 +2383,7 @@ private: &SPIRVDecompiler::EmitVertex, &SPIRVDecompiler::EndPrimitive, + &SPIRVDecompiler::InvocationId, &SPIRVDecompiler::YNegate, &SPIRVDecompiler::LocalInvocationId<0>, &SPIRVDecompiler::LocalInvocationId<1>, @@ -1524,28 +2393,22 @@ private: &SPIRVDecompiler::WorkGroupId<2>, &SPIRVDecompiler::BallotThread, - &SPIRVDecompiler::VoteAll, - &SPIRVDecompiler::VoteAny, - &SPIRVDecompiler::VoteEqual, + &SPIRVDecompiler::Vote<&Module::OpSubgroupAllKHR>, + &SPIRVDecompiler::Vote<&Module::OpSubgroupAnyKHR>, + &SPIRVDecompiler::Vote<&Module::OpSubgroupAllEqualKHR>, + &SPIRVDecompiler::ThreadId, &SPIRVDecompiler::ShuffleIndexed, - &SPIRVDecompiler::ShuffleUp, - &SPIRVDecompiler::ShuffleDown, - &SPIRVDecompiler::ShuffleButterfly, - - &SPIRVDecompiler::InRangeShuffleIndexed, - &SPIRVDecompiler::InRangeShuffleUp, - &SPIRVDecompiler::InRangeShuffleDown, - &SPIRVDecompiler::InRangeShuffleButterfly, + + &SPIRVDecompiler::MemoryBarrierGL, }; static_assert(operation_decompilers.size() == static_cast<std::size_t>(OperationCode::Amount)); const VKDevice& device; const ShaderIR& ir; - const ShaderStage stage; + const ShaderType stage; const Tegra::Shader::Header header; - u64 conditional_nest_count{}; - u64 inside_branch{}; + const Specialization& specialization; const Id t_void = Name(TypeVoid(), "void"); @@ -1573,20 +2436,28 @@ private: const Id t_func_uint = Name(TypePointer(spv::StorageClass::Function, t_uint), "func_uint"); const Id t_in_bool = Name(TypePointer(spv::StorageClass::Input, t_bool), "in_bool"); + const Id t_in_int = Name(TypePointer(spv::StorageClass::Input, t_int), "in_int"); + const Id t_in_int4 = Name(TypePointer(spv::StorageClass::Input, t_int4), "in_int4"); const Id t_in_uint = Name(TypePointer(spv::StorageClass::Input, t_uint), "in_uint"); + const Id t_in_uint3 = Name(TypePointer(spv::StorageClass::Input, t_uint3), "in_uint3"); + const Id t_in_uint4 = Name(TypePointer(spv::StorageClass::Input, t_uint4), "in_uint4"); const Id t_in_float = Name(TypePointer(spv::StorageClass::Input, t_float), "in_float"); + const Id t_in_float2 = Name(TypePointer(spv::StorageClass::Input, t_float2), "in_float2"); + const Id t_in_float3 = Name(TypePointer(spv::StorageClass::Input, t_float3), "in_float3"); const Id t_in_float4 = Name(TypePointer(spv::StorageClass::Input, t_float4), "in_float4"); + const Id t_out_int = Name(TypePointer(spv::StorageClass::Output, t_int), "out_int"); + const Id t_out_float = Name(TypePointer(spv::StorageClass::Output, t_float), "out_float"); const Id t_out_float4 = Name(TypePointer(spv::StorageClass::Output, t_float4), "out_float4"); const Id t_cbuf_float = TypePointer(spv::StorageClass::Uniform, t_float); const Id t_cbuf_std140 = Decorate( - Name(TypeArray(t_float4, Constant(t_uint, MAX_CONSTBUFFER_ELEMENTS)), "CbufStd140Array"), - spv::Decoration::ArrayStride, 16u); + Name(TypeArray(t_float4, Constant(t_uint, MaxConstBufferElements)), "CbufStd140Array"), + spv::Decoration::ArrayStride, 16U); const Id t_cbuf_scalar = Decorate( - Name(TypeArray(t_float, Constant(t_uint, MAX_CONSTBUFFER_FLOATS)), "CbufScalarArray"), - spv::Decoration::ArrayStride, 4u); + Name(TypeArray(t_float, Constant(t_uint, MaxConstBufferFloats)), "CbufScalarArray"), + spv::Decoration::ArrayStride, 4U); const Id t_cbuf_std140_struct = MemberDecorate( Decorate(TypeStruct(t_cbuf_std140), spv::Decoration::Block), 0, spv::Decoration::Offset, 0); const Id t_cbuf_scalar_struct = MemberDecorate( @@ -1594,28 +2465,43 @@ private: const Id t_cbuf_std140_ubo = TypePointer(spv::StorageClass::Uniform, t_cbuf_std140_struct); const Id t_cbuf_scalar_ubo = TypePointer(spv::StorageClass::Uniform, t_cbuf_scalar_struct); + Id t_smem_uint{}; + const Id t_gmem_float = TypePointer(spv::StorageClass::StorageBuffer, t_float); const Id t_gmem_array = - Name(Decorate(TypeRuntimeArray(t_float), spv::Decoration::ArrayStride, 4u), "GmemArray"); + Name(Decorate(TypeRuntimeArray(t_float), spv::Decoration::ArrayStride, 4U), "GmemArray"); const Id t_gmem_struct = MemberDecorate( Decorate(TypeStruct(t_gmem_array), spv::Decoration::Block), 0, spv::Decoration::Offset, 0); const Id t_gmem_ssbo = TypePointer(spv::StorageClass::StorageBuffer, t_gmem_struct); const Id v_float_zero = Constant(t_float, 0.0f); + const Id v_float_one = Constant(t_float, 1.0f); + + // Nvidia uses these defaults for varyings (e.g. position and generic attributes) + const Id v_varying_default = + ConstantComposite(t_float4, v_float_zero, v_float_zero, v_float_zero, v_float_one); + const Id v_true = ConstantTrue(t_bool); const Id v_false = ConstantFalse(t_bool); - Id per_vertex{}; + Id t_scalar_half{}; + Id t_half{}; + + Id out_vertex{}; + Id in_vertex{}; std::map<u32, Id> registers; std::map<Tegra::Shader::Pred, Id> predicates; std::map<u32, Id> flow_variables; Id local_memory{}; + Id shared_memory{}; std::array<Id, INTERNAL_FLAGS_COUNT> internal_flags{}; std::map<Attribute::Index, Id> input_attributes; std::map<Attribute::Index, Id> output_attributes; std::map<u32, Id> constant_buffers; std::map<GlobalMemoryBase, Id> global_buffers; - std::map<u32, SamplerImage> sampler_images; + std::map<u32, TexelBuffer> texel_buffers; + std::map<u32, SampledImage> sampled_images; + std::map<u32, StorageImage> images; Id instance_index{}; Id vertex_index{}; @@ -1623,18 +2509,20 @@ private: Id frag_depth{}; Id frag_coord{}; Id front_facing{}; - - u32 position_index{}; - u32 point_size_index{}; - u32 clip_distances_index{}; + Id point_coord{}; + Id tess_level_outer{}; + Id tess_level_inner{}; + Id tess_coord{}; + Id invocation_id{}; + Id workgroup_id{}; + Id local_invocation_id{}; + Id thread_id{}; + + VertexIndices in_indices; + VertexIndices out_indices; std::vector<Id> interfaces; - u32 const_buffers_base_binding{}; - u32 global_buffers_base_binding{}; - u32 samplers_base_binding{}; - - Id execute_function{}; Id jmp_to{}; Id ssy_flow_stack_top{}; Id pbk_flow_stack_top{}; @@ -1642,6 +2530,9 @@ private: Id pbk_flow_stack{}; Id continue_label{}; std::map<u32, Id> labels; + + bool conditional_branch_set{}; + bool inside_branch{}; }; class ExprDecompiler { @@ -1652,52 +2543,33 @@ public: const Id type_def = decomp.GetTypeDefinition(Type::Bool); const Id op1 = Visit(expr.operand1); const Id op2 = Visit(expr.operand2); - return decomp.Emit(decomp.OpLogicalAnd(type_def, op1, op2)); + return decomp.OpLogicalAnd(type_def, op1, op2); } Id operator()(const ExprOr& expr) { const Id type_def = decomp.GetTypeDefinition(Type::Bool); const Id op1 = Visit(expr.operand1); const Id op2 = Visit(expr.operand2); - return decomp.Emit(decomp.OpLogicalOr(type_def, op1, op2)); + return decomp.OpLogicalOr(type_def, op1, op2); } Id operator()(const ExprNot& expr) { const Id type_def = decomp.GetTypeDefinition(Type::Bool); const Id op1 = Visit(expr.operand1); - return decomp.Emit(decomp.OpLogicalNot(type_def, op1)); + return decomp.OpLogicalNot(type_def, op1); } Id operator()(const ExprPredicate& expr) { const auto pred = static_cast<Tegra::Shader::Pred>(expr.predicate); - return decomp.Emit(decomp.OpLoad(decomp.t_bool, decomp.predicates.at(pred))); + return decomp.OpLoad(decomp.t_bool, decomp.predicates.at(pred)); } Id operator()(const ExprCondCode& expr) { - const Node cc = decomp.ir.GetConditionCode(expr.cc); - Id target; - - if (const auto pred = std::get_if<PredicateNode>(&*cc)) { - const auto index = pred->GetIndex(); - switch (index) { - case Tegra::Shader::Pred::NeverExecute: - target = decomp.v_false; - break; - case Tegra::Shader::Pred::UnusedIndex: - target = decomp.v_true; - break; - default: - target = decomp.predicates.at(index); - break; - } - } else if (const auto flag = std::get_if<InternalFlagNode>(&*cc)) { - target = decomp.internal_flags.at(static_cast<u32>(flag->GetFlag())); - } - return decomp.Emit(decomp.OpLoad(decomp.t_bool, target)); + return decomp.AsBool(decomp.Visit(decomp.ir.GetConditionCode(expr.cc))); } Id operator()(const ExprVar& expr) { - return decomp.Emit(decomp.OpLoad(decomp.t_bool, decomp.flow_variables.at(expr.var_index))); + return decomp.OpLoad(decomp.t_bool, decomp.flow_variables.at(expr.var_index)); } Id operator()(const ExprBoolean& expr) { @@ -1706,9 +2578,9 @@ public: Id operator()(const ExprGprEqual& expr) { const Id target = decomp.Constant(decomp.t_uint, expr.value); - const Id gpr = decomp.BitcastTo<Type::Uint>( - decomp.Emit(decomp.OpLoad(decomp.t_float, decomp.registers.at(expr.gpr)))); - return decomp.Emit(decomp.OpLogicalEqual(decomp.t_uint, gpr, target)); + Id gpr = decomp.OpLoad(decomp.t_float, decomp.registers.at(expr.gpr)); + gpr = decomp.OpBitcast(decomp.t_uint, gpr); + return decomp.OpIEqual(decomp.t_bool, gpr, target); } Id Visit(const Expr& node) { @@ -1736,16 +2608,16 @@ public: const Id condition = expr_parser.Visit(ast.condition); const Id then_label = decomp.OpLabel(); const Id endif_label = decomp.OpLabel(); - decomp.Emit(decomp.OpSelectionMerge(endif_label, spv::SelectionControlMask::MaskNone)); - decomp.Emit(decomp.OpBranchConditional(condition, then_label, endif_label)); - decomp.Emit(then_label); + decomp.OpSelectionMerge(endif_label, spv::SelectionControlMask::MaskNone); + decomp.OpBranchConditional(condition, then_label, endif_label); + decomp.AddLabel(then_label); ASTNode current = ast.nodes.GetFirst(); while (current) { Visit(current); current = current->GetNext(); } - decomp.Emit(decomp.OpBranch(endif_label)); - decomp.Emit(endif_label); + decomp.OpBranch(endif_label); + decomp.AddLabel(endif_label); } void operator()([[maybe_unused]] const ASTIfElse& ast) { @@ -1763,7 +2635,7 @@ public: void operator()(const ASTVarSet& ast) { ExprDecompiler expr_parser{decomp}; const Id condition = expr_parser.Visit(ast.condition); - decomp.Emit(decomp.OpStore(decomp.flow_variables.at(ast.index), condition)); + decomp.OpStore(decomp.flow_variables.at(ast.index), condition); } void operator()([[maybe_unused]] const ASTLabel& ast) { @@ -1778,23 +2650,24 @@ public: const Id loop_label = decomp.OpLabel(); const Id endloop_label = decomp.OpLabel(); const Id loop_start_block = decomp.OpLabel(); - const Id loop_end_block = decomp.OpLabel(); + const Id loop_continue_block = decomp.OpLabel(); current_loop_exit = endloop_label; - decomp.Emit(decomp.OpBranch(loop_label)); - decomp.Emit(loop_label); - decomp.Emit( - decomp.OpLoopMerge(endloop_label, loop_end_block, spv::LoopControlMask::MaskNone)); - decomp.Emit(decomp.OpBranch(loop_start_block)); - decomp.Emit(loop_start_block); + decomp.OpBranch(loop_label); + decomp.AddLabel(loop_label); + decomp.OpLoopMerge(endloop_label, loop_continue_block, spv::LoopControlMask::MaskNone); + decomp.OpBranch(loop_start_block); + decomp.AddLabel(loop_start_block); ASTNode current = ast.nodes.GetFirst(); while (current) { Visit(current); current = current->GetNext(); } + decomp.OpBranch(loop_continue_block); + decomp.AddLabel(loop_continue_block); ExprDecompiler expr_parser{decomp}; const Id condition = expr_parser.Visit(ast.condition); - decomp.Emit(decomp.OpBranchConditional(condition, loop_label, endloop_label)); - decomp.Emit(endloop_label); + decomp.OpBranchConditional(condition, loop_label, endloop_label); + decomp.AddLabel(endloop_label); } void operator()(const ASTReturn& ast) { @@ -1803,27 +2676,27 @@ public: const Id condition = expr_parser.Visit(ast.condition); const Id then_label = decomp.OpLabel(); const Id endif_label = decomp.OpLabel(); - decomp.Emit(decomp.OpSelectionMerge(endif_label, spv::SelectionControlMask::MaskNone)); - decomp.Emit(decomp.OpBranchConditional(condition, then_label, endif_label)); - decomp.Emit(then_label); + decomp.OpSelectionMerge(endif_label, spv::SelectionControlMask::MaskNone); + decomp.OpBranchConditional(condition, then_label, endif_label); + decomp.AddLabel(then_label); if (ast.kills) { - decomp.Emit(decomp.OpKill()); + decomp.OpKill(); } else { decomp.PreExit(); - decomp.Emit(decomp.OpReturn()); + decomp.OpReturn(); } - decomp.Emit(endif_label); + decomp.AddLabel(endif_label); } else { const Id next_block = decomp.OpLabel(); - decomp.Emit(decomp.OpBranch(next_block)); - decomp.Emit(next_block); + decomp.OpBranch(next_block); + decomp.AddLabel(next_block); if (ast.kills) { - decomp.Emit(decomp.OpKill()); + decomp.OpKill(); } else { decomp.PreExit(); - decomp.Emit(decomp.OpReturn()); + decomp.OpReturn(); } - decomp.Emit(decomp.OpLabel()); + decomp.AddLabel(decomp.OpLabel()); } } @@ -1833,17 +2706,17 @@ public: const Id condition = expr_parser.Visit(ast.condition); const Id then_label = decomp.OpLabel(); const Id endif_label = decomp.OpLabel(); - decomp.Emit(decomp.OpSelectionMerge(endif_label, spv::SelectionControlMask::MaskNone)); - decomp.Emit(decomp.OpBranchConditional(condition, then_label, endif_label)); - decomp.Emit(then_label); - decomp.Emit(decomp.OpBranch(current_loop_exit)); - decomp.Emit(endif_label); + decomp.OpSelectionMerge(endif_label, spv::SelectionControlMask::MaskNone); + decomp.OpBranchConditional(condition, then_label, endif_label); + decomp.AddLabel(then_label); + decomp.OpBranch(current_loop_exit); + decomp.AddLabel(endif_label); } else { const Id next_block = decomp.OpLabel(); - decomp.Emit(decomp.OpBranch(next_block)); - decomp.Emit(next_block); - decomp.Emit(decomp.OpBranch(current_loop_exit)); - decomp.Emit(decomp.OpLabel()); + decomp.OpBranch(next_block); + decomp.AddLabel(next_block); + decomp.OpBranch(current_loop_exit); + decomp.AddLabel(decomp.OpLabel()); } } @@ -1864,20 +2737,51 @@ void SPIRVDecompiler::DecompileAST() { flow_variables.emplace(i, AddGlobalVariable(id)); } + DefinePrologue(); + const ASTNode program = ir.GetASTProgram(); ASTDecompiler decompiler{*this}; decompiler.Visit(program); const Id next_block = OpLabel(); - Emit(OpBranch(next_block)); - Emit(next_block); + OpBranch(next_block); + AddLabel(next_block); +} + +} // Anonymous namespace + +ShaderEntries GenerateShaderEntries(const VideoCommon::Shader::ShaderIR& ir) { + ShaderEntries entries; + for (const auto& cbuf : ir.GetConstantBuffers()) { + entries.const_buffers.emplace_back(cbuf.second, cbuf.first); + } + for (const auto& [base, usage] : ir.GetGlobalMemory()) { + entries.global_buffers.emplace_back(base.cbuf_index, base.cbuf_offset, usage.is_written); + } + for (const auto& sampler : ir.GetSamplers()) { + if (sampler.IsBuffer()) { + entries.texel_buffers.emplace_back(sampler); + } else { + entries.samplers.emplace_back(sampler); + } + } + for (const auto& image : ir.GetImages()) { + entries.images.emplace_back(image); + } + for (const auto& attribute : ir.GetInputAttributes()) { + if (IsGenericAttribute(attribute)) { + entries.attributes.insert(GetGenericAttributeLocation(attribute)); + } + } + entries.clip_distances = ir.GetClipDistances(); + entries.shader_length = ir.GetLength(); + entries.uses_warps = ir.UsesWarps(); + return entries; } -DecompilerResult Decompile(const VKDevice& device, const VideoCommon::Shader::ShaderIR& ir, - Maxwell::ShaderStage stage) { - auto decompiler = std::make_unique<SPIRVDecompiler>(device, ir, stage); - decompiler->Decompile(); - return {std::move(decompiler), decompiler->GetShaderEntries()}; +std::vector<u32> Decompile(const VKDevice& device, const VideoCommon::Shader::ShaderIR& ir, + ShaderType stage, const Specialization& specialization) { + return SPIRVDecompiler(device, ir, stage, specialization).Assemble(); } -} // namespace Vulkan::VKShader +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.h b/src/video_core/renderer_vulkan/vk_shader_decompiler.h index f90541cc1..10794be1c 100644 --- a/src/video_core/renderer_vulkan/vk_shader_decompiler.h +++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.h @@ -5,29 +5,28 @@ #pragma once #include <array> +#include <bitset> #include <memory> #include <set> +#include <type_traits> #include <utility> #include <vector> -#include <sirit/sirit.h> - #include "common/common_types.h" #include "video_core/engines/maxwell_3d.h" +#include "video_core/engines/shader_type.h" #include "video_core/shader/shader_ir.h" -namespace VideoCommon::Shader { -class ShaderIR; -} - namespace Vulkan { class VKDevice; } -namespace Vulkan::VKShader { +namespace Vulkan { using Maxwell = Tegra::Engines::Maxwell3D::Regs; +using TexelBufferEntry = VideoCommon::Shader::Sampler; using SamplerEntry = VideoCommon::Shader::Sampler; +using ImageEntry = VideoCommon::Shader::Image; constexpr u32 DESCRIPTOR_SET = 0; @@ -46,39 +45,78 @@ private: class GlobalBufferEntry { public: - explicit GlobalBufferEntry(u32 cbuf_index, u32 cbuf_offset) - : cbuf_index{cbuf_index}, cbuf_offset{cbuf_offset} {} + constexpr explicit GlobalBufferEntry(u32 cbuf_index, u32 cbuf_offset, bool is_written) + : cbuf_index{cbuf_index}, cbuf_offset{cbuf_offset}, is_written{is_written} {} - u32 GetCbufIndex() const { + constexpr u32 GetCbufIndex() const { return cbuf_index; } - u32 GetCbufOffset() const { + constexpr u32 GetCbufOffset() const { return cbuf_offset; } + constexpr bool IsWritten() const { + return is_written; + } + private: u32 cbuf_index{}; u32 cbuf_offset{}; + bool is_written{}; }; struct ShaderEntries { - u32 const_buffers_base_binding{}; - u32 global_buffers_base_binding{}; - u32 samplers_base_binding{}; + u32 NumBindings() const { + return static_cast<u32>(const_buffers.size() + global_buffers.size() + + texel_buffers.size() + samplers.size() + images.size()); + } + std::vector<ConstBufferEntry> const_buffers; std::vector<GlobalBufferEntry> global_buffers; + std::vector<TexelBufferEntry> texel_buffers; std::vector<SamplerEntry> samplers; + std::vector<ImageEntry> images; std::set<u32> attributes; std::array<bool, Maxwell::NumClipDistances> clip_distances{}; std::size_t shader_length{}; - Sirit::Id entry_function{}; - std::vector<Sirit::Id> interfaces; + bool uses_warps{}; +}; + +struct Specialization final { + u32 base_binding{}; + + // Compute specific + std::array<u32, 3> workgroup_size{}; + u32 shared_memory_size{}; + + // Graphics specific + Maxwell::PrimitiveTopology primitive_topology{}; + std::optional<float> point_size{}; + std::array<Maxwell::VertexAttribute::Type, Maxwell::NumVertexAttributes> attribute_types{}; + bool ndc_minus_one_to_one{}; + + // Tessellation specific + struct { + Maxwell::TessellationPrimitive primitive{}; + Maxwell::TessellationSpacing spacing{}; + bool clockwise{}; + } tessellation; + + // Fragment specific + std::bitset<8> enabled_rendertargets; +}; +// Old gcc versions don't consider this trivially copyable. +// static_assert(std::is_trivially_copyable_v<Specialization>); + +struct SPIRVShader { + std::vector<u32> code; + ShaderEntries entries; }; -using DecompilerResult = std::pair<std::unique_ptr<Sirit::Module>, ShaderEntries>; +ShaderEntries GenerateShaderEntries(const VideoCommon::Shader::ShaderIR& ir); -DecompilerResult Decompile(const VKDevice& device, const VideoCommon::Shader::ShaderIR& ir, - Maxwell::ShaderStage stage); +std::vector<u32> Decompile(const VKDevice& device, const VideoCommon::Shader::ShaderIR& ir, + Tegra::Engines::ShaderType stage, const Specialization& specialization); -} // namespace Vulkan::VKShader +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp new file mode 100644 index 000000000..171d78afc --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp @@ -0,0 +1,127 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <algorithm> +#include <unordered_map> +#include <utility> +#include <vector> + +#include "common/bit_util.h" +#include "common/common_types.h" +#include "video_core/renderer_vulkan/vk_device.h" +#include "video_core/renderer_vulkan/vk_resource_manager.h" +#include "video_core/renderer_vulkan/vk_scheduler.h" +#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" + +namespace Vulkan { + +VKStagingBufferPool::StagingBuffer::StagingBuffer(std::unique_ptr<VKBuffer> buffer, VKFence& fence, + u64 last_epoch) + : buffer{std::move(buffer)}, watch{fence}, last_epoch{last_epoch} {} + +VKStagingBufferPool::StagingBuffer::StagingBuffer(StagingBuffer&& rhs) noexcept { + buffer = std::move(rhs.buffer); + watch = std::move(rhs.watch); + last_epoch = rhs.last_epoch; +} + +VKStagingBufferPool::StagingBuffer::~StagingBuffer() = default; + +VKStagingBufferPool::StagingBuffer& VKStagingBufferPool::StagingBuffer::operator=( + StagingBuffer&& rhs) noexcept { + buffer = std::move(rhs.buffer); + watch = std::move(rhs.watch); + last_epoch = rhs.last_epoch; + return *this; +} + +VKStagingBufferPool::VKStagingBufferPool(const VKDevice& device, VKMemoryManager& memory_manager, + VKScheduler& scheduler) + : device{device}, memory_manager{memory_manager}, scheduler{scheduler}, + is_device_integrated{device.IsIntegrated()} {} + +VKStagingBufferPool::~VKStagingBufferPool() = default; + +VKBuffer& VKStagingBufferPool::GetUnusedBuffer(std::size_t size, bool host_visible) { + if (const auto buffer = TryGetReservedBuffer(size, host_visible)) { + return *buffer; + } + return CreateStagingBuffer(size, host_visible); +} + +void VKStagingBufferPool::TickFrame() { + ++epoch; + current_delete_level = (current_delete_level + 1) % NumLevels; + + ReleaseCache(true); + if (!is_device_integrated) { + ReleaseCache(false); + } +} + +VKBuffer* VKStagingBufferPool::TryGetReservedBuffer(std::size_t size, bool host_visible) { + for (auto& entry : GetCache(host_visible)[Common::Log2Ceil64(size)].entries) { + if (entry.watch.TryWatch(scheduler.GetFence())) { + entry.last_epoch = epoch; + return &*entry.buffer; + } + } + return nullptr; +} + +VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_visible) { + const auto usage = + vk::BufferUsageFlagBits::eTransferSrc | vk::BufferUsageFlagBits::eTransferDst | + vk::BufferUsageFlagBits::eStorageBuffer | vk::BufferUsageFlagBits::eIndexBuffer; + const u32 log2 = Common::Log2Ceil64(size); + const vk::BufferCreateInfo buffer_ci({}, 1ULL << log2, usage, vk::SharingMode::eExclusive, 0, + nullptr); + const auto dev = device.GetLogical(); + auto buffer = std::make_unique<VKBuffer>(); + buffer->handle = dev.createBufferUnique(buffer_ci, nullptr, device.GetDispatchLoader()); + buffer->commit = memory_manager.Commit(*buffer->handle, host_visible); + + auto& entries = GetCache(host_visible)[log2].entries; + return *entries.emplace_back(std::move(buffer), scheduler.GetFence(), epoch).buffer; +} + +VKStagingBufferPool::StagingBuffersCache& VKStagingBufferPool::GetCache(bool host_visible) { + return is_device_integrated || host_visible ? host_staging_buffers : device_staging_buffers; +} + +void VKStagingBufferPool::ReleaseCache(bool host_visible) { + auto& cache = GetCache(host_visible); + const u64 size = ReleaseLevel(cache, current_delete_level); + if (size == 0) { + return; + } +} + +u64 VKStagingBufferPool::ReleaseLevel(StagingBuffersCache& cache, std::size_t log2) { + static constexpr u64 epochs_to_destroy = 180; + static constexpr std::size_t deletions_per_tick = 16; + + auto& staging = cache[log2]; + auto& entries = staging.entries; + const std::size_t old_size = entries.size(); + + const auto is_deleteable = [this](const auto& entry) { + return entry.last_epoch + epochs_to_destroy < epoch && !entry.watch.IsUsed(); + }; + const std::size_t begin_offset = staging.delete_index; + const std::size_t end_offset = std::min(begin_offset + deletions_per_tick, old_size); + const auto begin = std::begin(entries) + begin_offset; + const auto end = std::begin(entries) + end_offset; + entries.erase(std::remove_if(begin, end, is_deleteable), end); + + const std::size_t new_size = entries.size(); + staging.delete_index += deletions_per_tick; + if (staging.delete_index >= new_size) { + staging.delete_index = 0; + } + + return (1ULL << log2) * (old_size - new_size); +} + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h new file mode 100644 index 000000000..02310375f --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h @@ -0,0 +1,83 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <climits> +#include <unordered_map> +#include <utility> +#include <vector> + +#include "common/common_types.h" + +#include "video_core/renderer_vulkan/declarations.h" +#include "video_core/renderer_vulkan/vk_memory_manager.h" + +namespace Vulkan { + +class VKDevice; +class VKFenceWatch; +class VKScheduler; + +struct VKBuffer final { + UniqueBuffer handle; + VKMemoryCommit commit; +}; + +class VKStagingBufferPool final { +public: + explicit VKStagingBufferPool(const VKDevice& device, VKMemoryManager& memory_manager, + VKScheduler& scheduler); + ~VKStagingBufferPool(); + + VKBuffer& GetUnusedBuffer(std::size_t size, bool host_visible); + + void TickFrame(); + +private: + struct StagingBuffer final { + explicit StagingBuffer(std::unique_ptr<VKBuffer> buffer, VKFence& fence, u64 last_epoch); + StagingBuffer(StagingBuffer&& rhs) noexcept; + StagingBuffer(const StagingBuffer&) = delete; + ~StagingBuffer(); + + StagingBuffer& operator=(StagingBuffer&& rhs) noexcept; + + std::unique_ptr<VKBuffer> buffer; + VKFenceWatch watch; + u64 last_epoch = 0; + }; + + struct StagingBuffers final { + std::vector<StagingBuffer> entries; + std::size_t delete_index = 0; + }; + + static constexpr std::size_t NumLevels = sizeof(std::size_t) * CHAR_BIT; + using StagingBuffersCache = std::array<StagingBuffers, NumLevels>; + + VKBuffer* TryGetReservedBuffer(std::size_t size, bool host_visible); + + VKBuffer& CreateStagingBuffer(std::size_t size, bool host_visible); + + StagingBuffersCache& GetCache(bool host_visible); + + void ReleaseCache(bool host_visible); + + u64 ReleaseLevel(StagingBuffersCache& cache, std::size_t log2); + + const VKDevice& device; + VKMemoryManager& memory_manager; + VKScheduler& scheduler; + const bool is_device_integrated; + + StagingBuffersCache host_staging_buffers; + StagingBuffersCache device_staging_buffers; + + u64 epoch = 0; + + std::size_t current_delete_level = 0; +}; + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_swapchain.cpp b/src/video_core/renderer_vulkan/vk_swapchain.cpp index 08279e562..ebc68f030 100644 --- a/src/video_core/renderer_vulkan/vk_swapchain.cpp +++ b/src/video_core/renderer_vulkan/vk_swapchain.cpp @@ -19,12 +19,18 @@ namespace Vulkan { namespace { -vk::SurfaceFormatKHR ChooseSwapSurfaceFormat(const std::vector<vk::SurfaceFormatKHR>& formats) { + +vk::SurfaceFormatKHR ChooseSwapSurfaceFormat(const std::vector<vk::SurfaceFormatKHR>& formats, + bool srgb) { if (formats.size() == 1 && formats[0].format == vk::Format::eUndefined) { - return {vk::Format::eB8G8R8A8Unorm, vk::ColorSpaceKHR::eSrgbNonlinear}; + vk::SurfaceFormatKHR format; + format.format = vk::Format::eB8G8R8A8Unorm; + format.colorSpace = vk::ColorSpaceKHR::eSrgbNonlinear; + return format; } - const auto& found = std::find_if(formats.begin(), formats.end(), [](const auto& format) { - return format.format == vk::Format::eB8G8R8A8Unorm && + const auto& found = std::find_if(formats.begin(), formats.end(), [srgb](const auto& format) { + const auto request_format = srgb ? vk::Format::eB8G8R8A8Srgb : vk::Format::eB8G8R8A8Unorm; + return format.format == request_format && format.colorSpace == vk::ColorSpaceKHR::eSrgbNonlinear; }); return found != formats.end() ? *found : formats[0]; @@ -51,28 +57,26 @@ vk::Extent2D ChooseSwapExtent(const vk::SurfaceCapabilitiesKHR& capabilities, u3 std::min(capabilities.maxImageExtent.height, extent.height)); return extent; } -} // namespace + +} // Anonymous namespace VKSwapchain::VKSwapchain(vk::SurfaceKHR surface, const VKDevice& device) : surface{surface}, device{device} {} VKSwapchain::~VKSwapchain() = default; -void VKSwapchain::Create(u32 width, u32 height) { - const auto dev = device.GetLogical(); +void VKSwapchain::Create(u32 width, u32 height, bool srgb) { const auto& dld = device.GetDispatchLoader(); const auto physical_device = device.GetPhysical(); - - const vk::SurfaceCapabilitiesKHR capabilities{ - physical_device.getSurfaceCapabilitiesKHR(surface, dld)}; + const auto capabilities{physical_device.getSurfaceCapabilitiesKHR(surface, dld)}; if (capabilities.maxImageExtent.width == 0 || capabilities.maxImageExtent.height == 0) { return; } - dev.waitIdle(dld); + device.GetLogical().waitIdle(dld); Destroy(); - CreateSwapchain(capabilities, width, height); + CreateSwapchain(capabilities, width, height, srgb); CreateSemaphores(); CreateImageViews(); @@ -107,7 +111,7 @@ bool VKSwapchain::Present(vk::Semaphore render_semaphore, VKFence& fence) { break; case vk::Result::eErrorOutOfDateKHR: if (current_width > 0 && current_height > 0) { - Create(current_width, current_height); + Create(current_width, current_height, current_srgb); recreated = true; } break; @@ -129,23 +133,19 @@ bool VKSwapchain::HasFramebufferChanged(const Layout::FramebufferLayout& framebu } void VKSwapchain::CreateSwapchain(const vk::SurfaceCapabilitiesKHR& capabilities, u32 width, - u32 height) { - const auto dev{device.GetLogical()}; + u32 height, bool srgb) { const auto& dld{device.GetDispatchLoader()}; const auto physical_device{device.GetPhysical()}; + const auto formats{physical_device.getSurfaceFormatsKHR(surface, dld)}; + const auto present_modes{physical_device.getSurfacePresentModesKHR(surface, dld)}; - const std::vector<vk::SurfaceFormatKHR> formats{ - physical_device.getSurfaceFormatsKHR(surface, dld)}; - - const std::vector<vk::PresentModeKHR> present_modes{ - physical_device.getSurfacePresentModesKHR(surface, dld)}; - - const vk::SurfaceFormatKHR surface_format{ChooseSwapSurfaceFormat(formats)}; + const vk::SurfaceFormatKHR surface_format{ChooseSwapSurfaceFormat(formats, srgb)}; const vk::PresentModeKHR present_mode{ChooseSwapPresentMode(present_modes)}; extent = ChooseSwapExtent(capabilities, width, height); current_width = extent.width; current_height = extent.height; + current_srgb = srgb; u32 requested_image_count{capabilities.minImageCount + 1}; if (capabilities.maxImageCount > 0 && requested_image_count > capabilities.maxImageCount) { @@ -169,6 +169,7 @@ void VKSwapchain::CreateSwapchain(const vk::SurfaceCapabilitiesKHR& capabilities swapchain_ci.imageSharingMode = vk::SharingMode::eExclusive; } + const auto dev{device.GetLogical()}; swapchain = dev.createSwapchainKHRUnique(swapchain_ci, nullptr, dld); images = dev.getSwapchainImagesKHR(*swapchain, dld); diff --git a/src/video_core/renderer_vulkan/vk_swapchain.h b/src/video_core/renderer_vulkan/vk_swapchain.h index 2ad84f185..a1e7938d2 100644 --- a/src/video_core/renderer_vulkan/vk_swapchain.h +++ b/src/video_core/renderer_vulkan/vk_swapchain.h @@ -24,7 +24,7 @@ public: ~VKSwapchain(); /// Creates (or recreates) the swapchain with a given size. - void Create(u32 width, u32 height); + void Create(u32 width, u32 height, bool srgb); /// Acquires the next image in the swapchain, waits as needed. void AcquireNextImage(); @@ -60,8 +60,13 @@ public: return image_format; } + bool GetSrgbState() const { + return current_srgb; + } + private: - void CreateSwapchain(const vk::SurfaceCapabilitiesKHR& capabilities, u32 width, u32 height); + void CreateSwapchain(const vk::SurfaceCapabilitiesKHR& capabilities, u32 width, u32 height, + bool srgb); void CreateSemaphores(); void CreateImageViews(); @@ -87,6 +92,7 @@ private: u32 current_width{}; u32 current_height{}; + bool current_srgb{}; }; } // namespace Vulkan diff --git a/src/video_core/shader/const_buffer_locker.cpp b/src/video_core/shader/const_buffer_locker.cpp index fe467608e..a4a0319eb 100644 --- a/src/video_core/shader/const_buffer_locker.cpp +++ b/src/video_core/shader/const_buffer_locker.cpp @@ -2,13 +2,12 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. -#pragma once - #include <algorithm> -#include <memory> -#include "common/assert.h" +#include <tuple> + #include "common/common_types.h" #include "video_core/engines/maxwell_3d.h" +#include "video_core/engines/shader_type.h" #include "video_core/shader/const_buffer_locker.h" namespace VideoCommon::Shader { @@ -103,8 +102,8 @@ bool ConstBufferLocker::IsConsistent() const { } bool ConstBufferLocker::HasEqualKeys(const ConstBufferLocker& rhs) const { - return keys == rhs.keys && bound_samplers == rhs.bound_samplers && - bindless_samplers == rhs.bindless_samplers; + return std::tie(keys, bound_samplers, bindless_samplers) == + std::tie(rhs.keys, rhs.bound_samplers, rhs.bindless_samplers); } } // namespace VideoCommon::Shader diff --git a/src/video_core/shader/const_buffer_locker.h b/src/video_core/shader/const_buffer_locker.h index 600e2f3c3..d32e2d657 100644 --- a/src/video_core/shader/const_buffer_locker.h +++ b/src/video_core/shader/const_buffer_locker.h @@ -4,10 +4,12 @@ #pragma once +#include <optional> #include <unordered_map> #include "common/common_types.h" #include "common/hash.h" #include "video_core/engines/const_buffer_engine_interface.h" +#include "video_core/engines/shader_type.h" namespace VideoCommon::Shader { @@ -20,7 +22,7 @@ using BindlessSamplerMap = * The ConstBufferLocker is a class use to interface the 3D and compute engines with the shader * compiler. with it, the shader can obtain required data from GPU state and store it for disk * shader compilation. - **/ + */ class ConstBufferLocker { public: explicit ConstBufferLocker(Tegra::Engines::ShaderType shader_stage); diff --git a/src/video_core/shader/control_flow.cpp b/src/video_core/shader/control_flow.cpp index d47c63d9f..b427ac873 100644 --- a/src/video_core/shader/control_flow.cpp +++ b/src/video_core/shader/control_flow.cpp @@ -16,7 +16,9 @@ #include "video_core/shader/shader_ir.h" namespace VideoCommon::Shader { + namespace { + using Tegra::Shader::Instruction; using Tegra::Shader::OpCode; @@ -68,15 +70,15 @@ struct CFGRebuildState { const ProgramCode& program_code; ConstBufferLocker& locker; u32 start{}; - std::vector<BlockInfo> block_info{}; - std::list<u32> inspect_queries{}; - std::list<Query> queries{}; - std::unordered_map<u32, u32> registered{}; - std::set<u32> labels{}; - std::map<u32, u32> ssy_labels{}; - std::map<u32, u32> pbk_labels{}; - std::unordered_map<u32, BlockStack> stacks{}; - ASTManager* manager; + std::vector<BlockInfo> block_info; + std::list<u32> inspect_queries; + std::list<Query> queries; + std::unordered_map<u32, u32> registered; + std::set<u32> labels; + std::map<u32, u32> ssy_labels; + std::map<u32, u32> pbk_labels; + std::unordered_map<u32, BlockStack> stacks; + ASTManager* manager{}; }; enum class BlockCollision : u32 { None, Found, Inside }; @@ -109,7 +111,7 @@ BlockInfo& CreateBlockInfo(CFGRebuildState& state, u32 start, u32 end) { } Pred GetPredicate(u32 index, bool negated) { - return static_cast<Pred>(index + (negated ? 8 : 0)); + return static_cast<Pred>(static_cast<u64>(index) + (negated ? 8ULL : 0ULL)); } /** @@ -136,15 +138,13 @@ struct BranchIndirectInfo { s32 relative_position{}; }; -std::optional<BranchIndirectInfo> TrackBranchIndirectInfo(const CFGRebuildState& state, - u32 start_address, u32 current_position) { - const u32 shader_start = state.start; - u32 pos = current_position; - BranchIndirectInfo result{}; - u64 track_register = 0; +struct BufferInfo { + u32 index; + u32 offset; +}; - // Step 0 Get BRX Info - const Instruction instr = {state.program_code[pos]}; +std::optional<std::pair<s32, u64>> GetBRXInfo(const CFGRebuildState& state, u32& pos) { + const Instruction instr = state.program_code[pos]; const auto opcode = OpCode::Decode(instr); if (opcode->get().GetId() != OpCode::Id::BRX) { return std::nullopt; @@ -152,86 +152,94 @@ std::optional<BranchIndirectInfo> TrackBranchIndirectInfo(const CFGRebuildState& if (instr.brx.constant_buffer != 0) { return std::nullopt; } - track_register = instr.gpr8.Value(); - result.relative_position = instr.brx.GetBranchExtend(); - pos--; - bool found_track = false; + --pos; + return std::make_pair(instr.brx.GetBranchExtend(), instr.gpr8.Value()); +} - // Step 1 Track LDC - while (pos >= shader_start) { - if (IsSchedInstruction(pos, shader_start)) { - pos--; +template <typename Result, typename TestCallable, typename PackCallable> +// requires std::predicate<TestCallable, Instruction, const OpCode::Matcher&> +// requires std::invocable<PackCallable, Instruction, const OpCode::Matcher&> +std::optional<Result> TrackInstruction(const CFGRebuildState& state, u32& pos, TestCallable test, + PackCallable pack) { + for (; pos >= state.start; --pos) { + if (IsSchedInstruction(pos, state.start)) { continue; } - const Instruction instr = {state.program_code[pos]}; + const Instruction instr = state.program_code[pos]; const auto opcode = OpCode::Decode(instr); - if (opcode->get().GetId() == OpCode::Id::LD_C) { - if (instr.gpr0.Value() == track_register && - instr.ld_c.type.Value() == Tegra::Shader::UniformType::Single) { - result.buffer = instr.cbuf36.index.Value(); - result.offset = static_cast<u32>(instr.cbuf36.GetOffset()); - track_register = instr.gpr8.Value(); - pos--; - found_track = true; - break; - } + if (!opcode) { + continue; + } + if (test(instr, opcode->get())) { + --pos; + return std::make_optional(pack(instr, opcode->get())); } - pos--; } + return std::nullopt; +} - if (!found_track) { - return std::nullopt; - } - found_track = false; +std::optional<std::pair<BufferInfo, u64>> TrackLDC(const CFGRebuildState& state, u32& pos, + u64 brx_tracked_register) { + return TrackInstruction<std::pair<BufferInfo, u64>>( + state, pos, + [brx_tracked_register](auto instr, const auto& opcode) { + return opcode.GetId() == OpCode::Id::LD_C && + instr.gpr0.Value() == brx_tracked_register && + instr.ld_c.type.Value() == Tegra::Shader::UniformType::Single; + }, + [](auto instr, const auto& opcode) { + const BufferInfo info = {static_cast<u32>(instr.cbuf36.index.Value()), + static_cast<u32>(instr.cbuf36.GetOffset())}; + return std::make_pair(info, instr.gpr8.Value()); + }); +} - // Step 2 Track SHL - while (pos >= shader_start) { - if (IsSchedInstruction(pos, shader_start)) { - pos--; - continue; - } - const Instruction instr = state.program_code[pos]; - const auto opcode = OpCode::Decode(instr); - if (opcode->get().GetId() == OpCode::Id::SHL_IMM) { - if (instr.gpr0.Value() == track_register) { - track_register = instr.gpr8.Value(); - pos--; - found_track = true; - break; - } - } - pos--; +std::optional<u64> TrackSHLRegister(const CFGRebuildState& state, u32& pos, + u64 ldc_tracked_register) { + return TrackInstruction<u64>(state, pos, + [ldc_tracked_register](auto instr, const auto& opcode) { + return opcode.GetId() == OpCode::Id::SHL_IMM && + instr.gpr0.Value() == ldc_tracked_register; + }, + [](auto instr, const auto&) { return instr.gpr8.Value(); }); +} + +std::optional<u32> TrackIMNMXValue(const CFGRebuildState& state, u32& pos, + u64 shl_tracked_register) { + return TrackInstruction<u32>(state, pos, + [shl_tracked_register](auto instr, const auto& opcode) { + return opcode.GetId() == OpCode::Id::IMNMX_IMM && + instr.gpr0.Value() == shl_tracked_register; + }, + [](auto instr, const auto&) { + return static_cast<u32>(instr.alu.GetSignedImm20_20() + 1); + }); +} + +std::optional<BranchIndirectInfo> TrackBranchIndirectInfo(const CFGRebuildState& state, u32 pos) { + const auto brx_info = GetBRXInfo(state, pos); + if (!brx_info) { + return std::nullopt; } + const auto [relative_position, brx_tracked_register] = *brx_info; - if (!found_track) { + const auto ldc_info = TrackLDC(state, pos, brx_tracked_register); + if (!ldc_info) { return std::nullopt; } - found_track = false; + const auto [buffer_info, ldc_tracked_register] = *ldc_info; - // Step 3 Track IMNMX - while (pos >= shader_start) { - if (IsSchedInstruction(pos, shader_start)) { - pos--; - continue; - } - const Instruction instr = state.program_code[pos]; - const auto opcode = OpCode::Decode(instr); - if (opcode->get().GetId() == OpCode::Id::IMNMX_IMM) { - if (instr.gpr0.Value() == track_register) { - track_register = instr.gpr8.Value(); - result.entries = instr.alu.GetSignedImm20_20() + 1; - pos--; - found_track = true; - break; - } - } - pos--; + const auto shl_tracked_register = TrackSHLRegister(state, pos, ldc_tracked_register); + if (!shl_tracked_register) { + return std::nullopt; } - if (!found_track) { + const auto entries = TrackIMNMXValue(state, pos, *shl_tracked_register); + if (!entries) { return std::nullopt; } - return result; + + return BranchIndirectInfo{buffer_info.index, buffer_info.offset, *entries, relative_position}; } std::pair<ParseResult, ParseInfo> ParseCode(CFGRebuildState& state, u32 address) { @@ -420,30 +428,30 @@ std::pair<ParseResult, ParseInfo> ParseCode(CFGRebuildState& state, u32 address) break; } case OpCode::Id::BRX: { - auto tmp = TrackBranchIndirectInfo(state, address, offset); - if (tmp) { - auto result = *tmp; - std::vector<CaseBranch> branches{}; - s32 pc_target = offset + result.relative_position; - for (u32 i = 0; i < result.entries; i++) { - auto k = state.locker.ObtainKey(result.buffer, result.offset + i * 4); - if (!k) { - return {ParseResult::AbnormalFlow, parse_info}; - } - u32 value = *k; - u32 target = static_cast<u32>((value >> 3) + pc_target); - insert_label(state, target); - branches.emplace_back(value, target); - } - parse_info.end_address = offset; - parse_info.branch_info = MakeBranchInfo<MultiBranch>( - static_cast<u32>(instr.gpr8.Value()), std::move(branches)); - - return {ParseResult::ControlCaught, parse_info}; - } else { + const auto tmp = TrackBranchIndirectInfo(state, offset); + if (!tmp) { LOG_WARNING(HW_GPU, "BRX Track Unsuccesful"); + return {ParseResult::AbnormalFlow, parse_info}; } - return {ParseResult::AbnormalFlow, parse_info}; + + const auto result = *tmp; + const s32 pc_target = offset + result.relative_position; + std::vector<CaseBranch> branches; + for (u32 i = 0; i < result.entries; i++) { + auto key = state.locker.ObtainKey(result.buffer, result.offset + i * 4); + if (!key) { + return {ParseResult::AbnormalFlow, parse_info}; + } + u32 value = *key; + u32 target = static_cast<u32>((value >> 3) + pc_target); + insert_label(state, target); + branches.emplace_back(value, target); + } + parse_info.end_address = offset; + parse_info.branch_info = MakeBranchInfo<MultiBranch>( + static_cast<u32>(instr.gpr8.Value()), std::move(branches)); + + return {ParseResult::ControlCaught, parse_info}; } default: break; diff --git a/src/video_core/shader/decode.cpp b/src/video_core/shader/decode.cpp index 21fb9cb83..22c3e5120 100644 --- a/src/video_core/shader/decode.cpp +++ b/src/video_core/shader/decode.cpp @@ -154,10 +154,10 @@ void ShaderIR::Decode() { LOG_CRITICAL(HW_GPU, "Unknown decompilation mode!"); [[fallthrough]]; case CompileDepth::BruteForce: { + const auto shader_end = static_cast<u32>(program_code.size()); coverage_begin = main_offset; - const std::size_t shader_end = program_code.size(); coverage_end = shader_end; - for (u32 label = main_offset; label < shader_end; label++) { + for (u32 label = main_offset; label < shader_end; ++label) { basic_blocks.insert({label, DecodeRange(label, label + 1)}); } break; diff --git a/src/video_core/shader/decode/arithmetic.cpp b/src/video_core/shader/decode/arithmetic.cpp index 1473c282a..fcedd2af6 100644 --- a/src/video_core/shader/decode/arithmetic.cpp +++ b/src/video_core/shader/decode/arithmetic.cpp @@ -43,12 +43,12 @@ u32 ShaderIR::DecodeArithmetic(NodeBlock& bb, u32 pc) { case OpCode::Id::FMUL_IMM: { // FMUL does not have 'abs' bits and only the second operand has a 'neg' bit. if (instr.fmul.tab5cb8_2 != 0) { - LOG_WARNING(HW_GPU, "FMUL tab5cb8_2({}) is not implemented", - instr.fmul.tab5cb8_2.Value()); + LOG_DEBUG(HW_GPU, "FMUL tab5cb8_2({}) is not implemented", + instr.fmul.tab5cb8_2.Value()); } if (instr.fmul.tab5c68_0 != 1) { - LOG_WARNING(HW_GPU, "FMUL tab5cb8_0({}) is not implemented", - instr.fmul.tab5c68_0.Value()); + LOG_DEBUG(HW_GPU, "FMUL tab5cb8_0({}) is not implemented", + instr.fmul.tab5c68_0.Value()); } op_b = GetOperandAbsNegFloat(op_b, false, instr.fmul.negate_b); @@ -144,10 +144,11 @@ u32 ShaderIR::DecodeArithmetic(NodeBlock& bb, u32 pc) { case OpCode::Id::RRO_C: case OpCode::Id::RRO_R: case OpCode::Id::RRO_IMM: { + LOG_DEBUG(HW_GPU, "(STUBBED) RRO used"); + // Currently RRO is only implemented as a register move. op_b = GetOperandAbsNegFloat(op_b, instr.alu.abs_b, instr.alu.negate_b); SetRegister(bb, instr.gpr0, op_b); - LOG_WARNING(HW_GPU, "RRO instruction is incomplete"); break; } default: diff --git a/src/video_core/shader/decode/arithmetic_half.cpp b/src/video_core/shader/decode/arithmetic_half.cpp index b06cbe441..ee7d9a29d 100644 --- a/src/video_core/shader/decode/arithmetic_half.cpp +++ b/src/video_core/shader/decode/arithmetic_half.cpp @@ -21,8 +21,8 @@ u32 ShaderIR::DecodeArithmeticHalf(NodeBlock& bb, u32 pc) { if (opcode->get().GetId() == OpCode::Id::HADD2_C || opcode->get().GetId() == OpCode::Id::HADD2_R) { - if (instr.alu_half.ftz != 0) { - LOG_WARNING(HW_GPU, "{} FTZ not implemented", opcode->get().GetName()); + if (instr.alu_half.ftz == 0) { + LOG_DEBUG(HW_GPU, "{} without FTZ is not implemented", opcode->get().GetName()); } } diff --git a/src/video_core/shader/decode/arithmetic_half_immediate.cpp b/src/video_core/shader/decode/arithmetic_half_immediate.cpp index 6466fc011..d179b9873 100644 --- a/src/video_core/shader/decode/arithmetic_half_immediate.cpp +++ b/src/video_core/shader/decode/arithmetic_half_immediate.cpp @@ -19,12 +19,12 @@ u32 ShaderIR::DecodeArithmeticHalfImmediate(NodeBlock& bb, u32 pc) { const auto opcode = OpCode::Decode(instr); if (opcode->get().GetId() == OpCode::Id::HADD2_IMM) { - if (instr.alu_half_imm.ftz != 0) { - LOG_WARNING(HW_GPU, "{} FTZ not implemented", opcode->get().GetName()); + if (instr.alu_half_imm.ftz == 0) { + LOG_DEBUG(HW_GPU, "{} without FTZ is not implemented", opcode->get().GetName()); } } else { - if (instr.alu_half_imm.precision != Tegra::Shader::HalfPrecision::None) { - LOG_WARNING(HW_GPU, "{} FTZ not implemented", opcode->get().GetName()); + if (instr.alu_half_imm.precision != Tegra::Shader::HalfPrecision::FTZ) { + LOG_DEBUG(HW_GPU, "{} without FTZ is not implemented", opcode->get().GetName()); } } diff --git a/src/video_core/shader/decode/arithmetic_integer.cpp b/src/video_core/shader/decode/arithmetic_integer.cpp index a33d242e9..371fae127 100644 --- a/src/video_core/shader/decode/arithmetic_integer.cpp +++ b/src/video_core/shader/decode/arithmetic_integer.cpp @@ -130,6 +130,25 @@ u32 ShaderIR::DecodeArithmeticInteger(NodeBlock& bb, u32 pc) { SetRegister(bb, instr.gpr0, value); break; } + case OpCode::Id::FLO_R: + case OpCode::Id::FLO_C: + case OpCode::Id::FLO_IMM: { + Node value; + if (instr.flo.invert) { + op_b = Operation(OperationCode::IBitwiseNot, NO_PRECISE, std::move(op_b)); + } + if (instr.flo.is_signed) { + value = Operation(OperationCode::IBitMSB, NO_PRECISE, std::move(op_b)); + } else { + value = Operation(OperationCode::UBitMSB, NO_PRECISE, std::move(op_b)); + } + if (instr.flo.sh) { + value = + Operation(OperationCode::UBitwiseXor, NO_PRECISE, std::move(value), Immediate(31)); + } + SetRegister(bb, instr.gpr0, std::move(value)); + break; + } case OpCode::Id::SEL_C: case OpCode::Id::SEL_R: case OpCode::Id::SEL_IMM: { diff --git a/src/video_core/shader/decode/conversion.cpp b/src/video_core/shader/decode/conversion.cpp index 32facd6ba..0eeb75559 100644 --- a/src/video_core/shader/decode/conversion.cpp +++ b/src/video_core/shader/decode/conversion.cpp @@ -63,12 +63,11 @@ u32 ShaderIR::DecodeConversion(NodeBlock& bb, u32 pc) { case OpCode::Id::I2F_R: case OpCode::Id::I2F_C: case OpCode::Id::I2F_IMM: { - UNIMPLEMENTED_IF(instr.conversion.int_src.selector != 0); UNIMPLEMENTED_IF(instr.conversion.dst_size == Register::Size::Long); UNIMPLEMENTED_IF_MSG(instr.generates_cc, "Condition codes generation in I2F is not implemented"); - Node value = [&]() { + Node value = [&] { switch (opcode->get().GetId()) { case OpCode::Id::I2F_R: return GetRegister(instr.gpr20); @@ -81,7 +80,19 @@ u32 ShaderIR::DecodeConversion(NodeBlock& bb, u32 pc) { return Immediate(0); } }(); + const bool input_signed = instr.conversion.is_input_signed; + + if (instr.conversion.src_size == Register::Size::Byte) { + const u32 offset = static_cast<u32>(instr.conversion.int_src.selector) * 8; + if (offset > 0) { + value = SignedOperation(OperationCode::ILogicalShiftRight, input_signed, + std::move(value), Immediate(offset)); + } + } else { + UNIMPLEMENTED_IF(instr.conversion.int_src.selector != 0); + } + value = ConvertIntegerSize(value, instr.conversion.src_size, input_signed); value = GetOperandAbsNegInteger(value, instr.conversion.abs_a, false, input_signed); value = SignedOperation(OperationCode::FCastInteger, input_signed, PRECISE, value); diff --git a/src/video_core/shader/decode/ffma.cpp b/src/video_core/shader/decode/ffma.cpp index ca2f39e8d..5973588d6 100644 --- a/src/video_core/shader/decode/ffma.cpp +++ b/src/video_core/shader/decode/ffma.cpp @@ -19,10 +19,10 @@ u32 ShaderIR::DecodeFfma(NodeBlock& bb, u32 pc) { UNIMPLEMENTED_IF_MSG(instr.ffma.cc != 0, "FFMA cc not implemented"); if (instr.ffma.tab5980_0 != 1) { - LOG_WARNING(HW_GPU, "FFMA tab5980_0({}) not implemented", instr.ffma.tab5980_0.Value()); + LOG_DEBUG(HW_GPU, "FFMA tab5980_0({}) not implemented", instr.ffma.tab5980_0.Value()); } if (instr.ffma.tab5980_1 != 0) { - LOG_WARNING(HW_GPU, "FFMA tab5980_1({}) not implemented", instr.ffma.tab5980_1.Value()); + LOG_DEBUG(HW_GPU, "FFMA tab5980_1({}) not implemented", instr.ffma.tab5980_1.Value()); } const Node op_a = GetRegister(instr.gpr8); diff --git a/src/video_core/shader/decode/half_set.cpp b/src/video_core/shader/decode/half_set.cpp index 48ca7a4af..848e46874 100644 --- a/src/video_core/shader/decode/half_set.cpp +++ b/src/video_core/shader/decode/half_set.cpp @@ -20,8 +20,8 @@ u32 ShaderIR::DecodeHalfSet(NodeBlock& bb, u32 pc) { const Instruction instr = {program_code[pc]}; const auto opcode = OpCode::Decode(instr); - if (instr.hset2.ftz != 0) { - LOG_WARNING(HW_GPU, "{} FTZ not implemented", opcode->get().GetName()); + if (instr.hset2.ftz == 0) { + LOG_DEBUG(HW_GPU, "{} without FTZ is not implemented", opcode->get().GetName()); } Node op_a = UnpackHalfFloat(GetRegister(instr.gpr8), instr.hset2.type_a); diff --git a/src/video_core/shader/decode/half_set_predicate.cpp b/src/video_core/shader/decode/half_set_predicate.cpp index fec8f2dbe..310655619 100644 --- a/src/video_core/shader/decode/half_set_predicate.cpp +++ b/src/video_core/shader/decode/half_set_predicate.cpp @@ -19,7 +19,9 @@ u32 ShaderIR::DecodeHalfSetPredicate(NodeBlock& bb, u32 pc) { const Instruction instr = {program_code[pc]}; const auto opcode = OpCode::Decode(instr); - LOG_DEBUG(HW_GPU, "ftz={}", static_cast<u32>(instr.hsetp2.ftz)); + if (instr.hsetp2.ftz != 0) { + LOG_DEBUG(HW_GPU, "{} without FTZ is not implemented", opcode->get().GetName()); + } Node op_a = UnpackHalfFloat(GetRegister(instr.gpr8), instr.hsetp2.type_a); op_a = GetOperandAbsNegHalf(op_a, instr.hsetp2.abs_a, instr.hsetp2.negate_a); diff --git a/src/video_core/shader/decode/memory.cpp b/src/video_core/shader/decode/memory.cpp index 335d78146..c934d0719 100644 --- a/src/video_core/shader/decode/memory.cpp +++ b/src/video_core/shader/decode/memory.cpp @@ -21,8 +21,10 @@ using Tegra::Shader::OpCode; using Tegra::Shader::Register; namespace { -u32 GetUniformTypeElementsCount(Tegra::Shader::UniformType uniform_type) { + +u32 GetLdgMemorySize(Tegra::Shader::UniformType uniform_type) { switch (uniform_type) { + case Tegra::Shader::UniformType::UnsignedByte: case Tegra::Shader::UniformType::Single: return 1; case Tegra::Shader::UniformType::Double: @@ -35,6 +37,22 @@ u32 GetUniformTypeElementsCount(Tegra::Shader::UniformType uniform_type) { return 1; } } + +u32 GetStgMemorySize(Tegra::Shader::UniformType uniform_type) { + switch (uniform_type) { + case Tegra::Shader::UniformType::Single: + return 1; + case Tegra::Shader::UniformType::Double: + return 2; + case Tegra::Shader::UniformType::Quad: + case Tegra::Shader::UniformType::UnsignedQuad: + return 4; + default: + UNIMPLEMENTED_MSG("Unimplemented size={}!", static_cast<u32>(uniform_type)); + return 1; + } +} + } // Anonymous namespace u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { @@ -168,7 +186,7 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { const auto [real_address_base, base_address, descriptor] = TrackGlobalMemory(bb, instr, false); - const u32 count = GetUniformTypeElementsCount(type); + const u32 count = GetLdgMemorySize(type); if (!real_address_base || !base_address) { // Tracking failed, load zeroes. for (u32 i = 0; i < count; ++i) { @@ -179,12 +197,22 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { for (u32 i = 0; i < count; ++i) { const Node it_offset = Immediate(i * 4); - const Node real_address = - Operation(OperationCode::UAdd, NO_PRECISE, real_address_base, it_offset); - const Node gmem = MakeNode<GmemNode>(real_address, base_address, descriptor); + const Node real_address = Operation(OperationCode::UAdd, real_address_base, it_offset); + Node gmem = MakeNode<GmemNode>(real_address, base_address, descriptor); + + if (type == Tegra::Shader::UniformType::UnsignedByte) { + // To handle unaligned loads get the byte used to dereferenced global memory + // and extract that byte from the loaded uint32. + Node byte = Operation(OperationCode::UBitwiseAnd, real_address, Immediate(3)); + byte = Operation(OperationCode::ULogicalShiftLeft, std::move(byte), Immediate(3)); + + gmem = Operation(OperationCode::UBitfieldExtract, std::move(gmem), std::move(byte), + Immediate(8)); + } SetTemporary(bb, i, gmem); } + for (u32 i = 0; i < count; ++i) { SetRegister(bb, instr.gpr0.Value() + i, GetTemporary(i)); } @@ -196,28 +224,28 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { UNIMPLEMENTED_IF_MSG((instr.attribute.fmt20.immediate.Value() % sizeof(u32)) != 0, "Unaligned attribute loads are not supported"); - u64 next_element = instr.attribute.fmt20.element; - auto next_index = static_cast<u64>(instr.attribute.fmt20.index.Value()); + u64 element = instr.attribute.fmt20.element; + auto index = static_cast<u64>(instr.attribute.fmt20.index.Value()); - const auto StoreNextElement = [&](u32 reg_offset) { - const auto dest = GetOutputAttribute(static_cast<Attribute::Index>(next_index), - next_element, GetRegister(instr.gpr39)); + const u32 num_words = static_cast<u32>(instr.attribute.fmt20.size.Value()) + 1; + for (u32 reg_offset = 0; reg_offset < num_words; ++reg_offset) { + Node dest; + if (instr.attribute.fmt20.patch) { + const u32 offset = static_cast<u32>(index) * 4 + static_cast<u32>(element); + dest = MakeNode<PatchNode>(offset); + } else { + dest = GetOutputAttribute(static_cast<Attribute::Index>(index), element, + GetRegister(instr.gpr39)); + } const auto src = GetRegister(instr.gpr0.Value() + reg_offset); bb.push_back(Operation(OperationCode::Assign, dest, src)); - // Load the next attribute element into the following register. If the element - // to load goes beyond the vec4 size, load the first element of the next - // attribute. - next_element = (next_element + 1) % 4; - next_index = next_index + (next_element == 0 ? 1 : 0); - }; - - const u32 num_words = static_cast<u32>(instr.attribute.fmt20.size.Value()) + 1; - for (u32 reg_offset = 0; reg_offset < num_words; ++reg_offset) { - StoreNextElement(reg_offset); + // Load the next attribute element into the following register. If the element to load + // goes beyond the vec4 size, load the first element of the next attribute. + element = (element + 1) % 4; + index = index + (element == 0 ? 1 : 0); } - break; } case OpCode::Id::ST_L: @@ -274,7 +302,7 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { break; } - const u32 count = GetUniformTypeElementsCount(type); + const u32 count = GetStgMemorySize(type); for (u32 i = 0; i < count; ++i) { const Node it_offset = Immediate(i * 4); const Node real_address = Operation(OperationCode::UAdd, real_address_base, it_offset); diff --git a/src/video_core/shader/decode/other.cpp b/src/video_core/shader/decode/other.cpp index 116b95f76..7321698b2 100644 --- a/src/video_core/shader/decode/other.cpp +++ b/src/video_core/shader/decode/other.cpp @@ -69,6 +69,8 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) { case OpCode::Id::MOV_SYS: { const Node value = [this, instr] { switch (instr.sys20) { + case SystemVariable::InvocationId: + return Operation(OperationCode::InvocationId); case SystemVariable::Ydirection: return Operation(OperationCode::YNegate); case SystemVariable::InvocationInfo: @@ -255,8 +257,14 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) { SetRegister(bb, instr.gpr0, GetRegister(instr.gpr8)); break; } + case OpCode::Id::MEMBAR: { + UNIMPLEMENTED_IF(instr.membar.type != Tegra::Shader::MembarType::GL); + UNIMPLEMENTED_IF(instr.membar.unknown != Tegra::Shader::MembarUnknown::Default); + bb.push_back(Operation(OperationCode::MemoryBarrierGL)); + break; + } case OpCode::Id::DEPBAR: { - LOG_WARNING(HW_GPU, "DEPBAR instruction is stubbed"); + LOG_DEBUG(HW_GPU, "DEPBAR instruction is stubbed"); break; } default: diff --git a/src/video_core/shader/decode/register_set_predicate.cpp b/src/video_core/shader/decode/register_set_predicate.cpp index e6c9d287e..8d54cce34 100644 --- a/src/video_core/shader/decode/register_set_predicate.cpp +++ b/src/video_core/shader/decode/register_set_predicate.cpp @@ -13,37 +13,65 @@ namespace VideoCommon::Shader { using Tegra::Shader::Instruction; using Tegra::Shader::OpCode; +namespace { +constexpr u64 NUM_PROGRAMMABLE_PREDICATES = 7; +} + u32 ShaderIR::DecodeRegisterSetPredicate(NodeBlock& bb, u32 pc) { const Instruction instr = {program_code[pc]}; const auto opcode = OpCode::Decode(instr); - UNIMPLEMENTED_IF(instr.r2p.mode != Tegra::Shader::R2pMode::Pr); + UNIMPLEMENTED_IF(instr.p2r_r2p.mode != Tegra::Shader::R2pMode::Pr); - const Node apply_mask = [&]() { + const Node apply_mask = [&] { switch (opcode->get().GetId()) { case OpCode::Id::R2P_IMM: - return Immediate(static_cast<u32>(instr.r2p.immediate_mask)); + case OpCode::Id::P2R_IMM: + return Immediate(static_cast<u32>(instr.p2r_r2p.immediate_mask)); default: UNREACHABLE(); - return Immediate(static_cast<u32>(instr.r2p.immediate_mask)); + return Immediate(0); } }(); - const Node mask = GetRegister(instr.gpr8); - const auto offset = static_cast<u32>(instr.r2p.byte) * 8; - constexpr u32 programmable_preds = 7; - for (u64 pred = 0; pred < programmable_preds; ++pred) { - const auto shift = static_cast<u32>(pred); + const auto offset = static_cast<u32>(instr.p2r_r2p.byte) * 8; + + switch (opcode->get().GetId()) { + case OpCode::Id::R2P_IMM: { + const Node mask = GetRegister(instr.gpr8); - const Node apply_compare = BitfieldExtract(apply_mask, shift, 1); - const Node condition = - Operation(OperationCode::LogicalUNotEqual, apply_compare, Immediate(0)); + for (u64 pred = 0; pred < NUM_PROGRAMMABLE_PREDICATES; ++pred) { + const auto shift = static_cast<u32>(pred); - const Node value_compare = BitfieldExtract(mask, offset + shift, 1); - const Node value = Operation(OperationCode::LogicalUNotEqual, value_compare, Immediate(0)); + const Node apply_compare = BitfieldExtract(apply_mask, shift, 1); + const Node condition = + Operation(OperationCode::LogicalUNotEqual, apply_compare, Immediate(0)); - const Node code = Operation(OperationCode::LogicalAssign, GetPredicate(pred), value); - bb.push_back(Conditional(condition, {code})); + const Node value_compare = BitfieldExtract(mask, offset + shift, 1); + const Node value = + Operation(OperationCode::LogicalUNotEqual, value_compare, Immediate(0)); + + const Node code = Operation(OperationCode::LogicalAssign, GetPredicate(pred), value); + bb.push_back(Conditional(condition, {code})); + } + break; + } + case OpCode::Id::P2R_IMM: { + Node value = Immediate(0); + for (u64 pred = 0; pred < NUM_PROGRAMMABLE_PREDICATES; ++pred) { + Node bit = Operation(OperationCode::Select, GetPredicate(pred), Immediate(1U << pred), + Immediate(0)); + value = Operation(OperationCode::UBitwiseOr, std::move(value), std::move(bit)); + } + value = Operation(OperationCode::UBitwiseAnd, std::move(value), apply_mask); + value = BitfieldInsert(GetRegister(instr.gpr8), std::move(value), offset, 8); + + SetRegister(bb, instr.gpr0, std::move(value)); + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled P2R/R2R instruction: {}", opcode->get().GetName()); + break; } return pc; diff --git a/src/video_core/shader/decode/texture.cpp b/src/video_core/shader/decode/texture.cpp index ca690b58b..4b14cdf58 100644 --- a/src/video_core/shader/decode/texture.cpp +++ b/src/video_core/shader/decode/texture.cpp @@ -44,10 +44,6 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { bool is_bindless = false; switch (opcode->get().GetId()) { case OpCode::Id::TEX: { - if (instr.tex.UsesMiscMode(TextureMiscMode::NODEP)) { - LOG_WARNING(HW_GPU, "TEX.NODEP implementation is incomplete"); - } - const TextureType texture_type{instr.tex.texture_type}; const bool is_array = instr.tex.array != 0; const bool is_aoffi = instr.tex.UsesMiscMode(TextureMiscMode::AOFFI); @@ -62,10 +58,6 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { UNIMPLEMENTED_IF_MSG(instr.tex.UsesMiscMode(TextureMiscMode::AOFFI), "AOFFI is not implemented"); - if (instr.tex.UsesMiscMode(TextureMiscMode::NODEP)) { - LOG_WARNING(HW_GPU, "TEX.NODEP implementation is incomplete"); - } - const TextureType texture_type{instr.tex_b.texture_type}; const bool is_array = instr.tex_b.array != 0; const bool is_aoffi = instr.tex.UsesMiscMode(TextureMiscMode::AOFFI); @@ -82,10 +74,6 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { const bool depth_compare = instr.texs.UsesMiscMode(TextureMiscMode::DC); const auto process_mode = instr.texs.GetTextureProcessMode(); - if (instr.texs.UsesMiscMode(TextureMiscMode::NODEP)) { - LOG_WARNING(HW_GPU, "TEXS.NODEP implementation is incomplete"); - } - const Node4 components = GetTexsCode(instr, texture_type, process_mode, depth_compare, is_array); @@ -101,78 +89,142 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { [[fallthrough]]; } case OpCode::Id::TLD4: { - ASSERT(instr.tld4.array == 0); UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::NDV), "NDV is not implemented"); - UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::PTP), - "PTP is not implemented"); - - if (instr.tld4.UsesMiscMode(TextureMiscMode::NODEP)) { - LOG_WARNING(HW_GPU, "TLD4.NODEP implementation is incomplete"); - } - const auto texture_type = instr.tld4.texture_type.Value(); const bool depth_compare = is_bindless ? instr.tld4_b.UsesMiscMode(TextureMiscMode::DC) : instr.tld4.UsesMiscMode(TextureMiscMode::DC); const bool is_array = instr.tld4.array != 0; const bool is_aoffi = is_bindless ? instr.tld4_b.UsesMiscMode(TextureMiscMode::AOFFI) : instr.tld4.UsesMiscMode(TextureMiscMode::AOFFI); - WriteTexInstructionFloat( - bb, instr, - GetTld4Code(instr, texture_type, depth_compare, is_array, is_aoffi, is_bindless)); + const bool is_ptp = is_bindless ? instr.tld4_b.UsesMiscMode(TextureMiscMode::PTP) + : instr.tld4.UsesMiscMode(TextureMiscMode::PTP); + WriteTexInstructionFloat(bb, instr, + GetTld4Code(instr, texture_type, depth_compare, is_array, is_aoffi, + is_ptp, is_bindless)); break; } case OpCode::Id::TLD4S: { - UNIMPLEMENTED_IF_MSG(instr.tld4s.UsesMiscMode(TextureMiscMode::AOFFI), - "AOFFI is not implemented"); - if (instr.tld4s.UsesMiscMode(TextureMiscMode::NODEP)) { - LOG_WARNING(HW_GPU, "TLD4S.NODEP implementation is incomplete"); - } - - const bool depth_compare = instr.tld4s.UsesMiscMode(TextureMiscMode::DC); + constexpr std::size_t num_coords = 2; + const bool is_aoffi = instr.tld4s.UsesMiscMode(TextureMiscMode::AOFFI); + const bool is_depth_compare = instr.tld4s.UsesMiscMode(TextureMiscMode::DC); const Node op_a = GetRegister(instr.gpr8); const Node op_b = GetRegister(instr.gpr20); // TODO(Subv): Figure out how the sampler type is encoded in the TLD4S instruction. std::vector<Node> coords; - if (depth_compare) { + std::vector<Node> aoffi; + Node depth_compare; + if (is_depth_compare) { // Note: TLD4S coordinate encoding works just like TEXS's const Node op_y = GetRegister(instr.gpr8.Value() + 1); coords.push_back(op_a); coords.push_back(op_y); - coords.push_back(op_b); + if (is_aoffi) { + aoffi = GetAoffiCoordinates(op_b, num_coords, true); + depth_compare = GetRegister(instr.gpr20.Value() + 1); + } else { + depth_compare = op_b; + } } else { + // There's no depth compare coords.push_back(op_a); - coords.push_back(op_b); + if (is_aoffi) { + coords.push_back(GetRegister(instr.gpr8.Value() + 1)); + aoffi = GetAoffiCoordinates(op_b, num_coords, true); + } else { + coords.push_back(op_b); + } } const Node component = Immediate(static_cast<u32>(instr.tld4s.component)); - const auto& sampler = - GetSampler(instr.sampler, {{TextureType::Texture2D, false, depth_compare}}); + const SamplerInfo info{TextureType::Texture2D, false, is_depth_compare}; + const Sampler& sampler = *GetSampler(instr.sampler, info); Node4 values; for (u32 element = 0; element < values.size(); ++element) { auto coords_copy = coords; - MetaTexture meta{sampler, {}, {}, {}, {}, {}, component, element}; + MetaTexture meta{sampler, {}, depth_compare, aoffi, {}, {}, {}, {}, component, element}; values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy)); } - WriteTexsInstructionFloat(bb, instr, values, true); + if (instr.tld4s.fp16_flag) { + WriteTexsInstructionHalfFloat(bb, instr, values, true); + } else { + WriteTexsInstructionFloat(bb, instr, values, true); + } break; } - case OpCode::Id::TXQ_B: + case OpCode::Id::TXD_B: is_bindless = true; [[fallthrough]]; - case OpCode::Id::TXQ: { - if (instr.txq.UsesMiscMode(TextureMiscMode::NODEP)) { - LOG_WARNING(HW_GPU, "TXQ.NODEP implementation is incomplete"); + case OpCode::Id::TXD: { + UNIMPLEMENTED_IF_MSG(instr.txd.UsesMiscMode(TextureMiscMode::AOFFI), + "AOFFI is not implemented"); + UNIMPLEMENTED_IF_MSG(instr.txd.is_array != 0, "TXD Array is not implemented"); + + u64 base_reg = instr.gpr8.Value(); + const auto derivate_reg = instr.gpr20.Value(); + const auto texture_type = instr.txd.texture_type.Value(); + const auto coord_count = GetCoordCount(texture_type); + + const Sampler* sampler = is_bindless + ? GetBindlessSampler(base_reg, {{texture_type, false, false}}) + : GetSampler(instr.sampler, {{texture_type, false, false}}); + Node4 values; + if (sampler == nullptr) { + for (u32 element = 0; element < values.size(); ++element) { + values[element] = Immediate(0); + } + WriteTexInstructionFloat(bb, instr, values); + break; + } + if (is_bindless) { + base_reg++; } + std::vector<Node> coords; + std::vector<Node> derivates; + for (std::size_t i = 0; i < coord_count; ++i) { + coords.push_back(GetRegister(base_reg + i)); + const std::size_t derivate = i * 2; + derivates.push_back(GetRegister(derivate_reg + derivate)); + derivates.push_back(GetRegister(derivate_reg + derivate + 1)); + } + + for (u32 element = 0; element < values.size(); ++element) { + MetaTexture meta{*sampler, {}, {}, {}, {}, derivates, {}, {}, {}, element}; + values[element] = Operation(OperationCode::TextureGradient, std::move(meta), coords); + } + + WriteTexInstructionFloat(bb, instr, values); + + break; + } + case OpCode::Id::TXQ_B: + is_bindless = true; + [[fallthrough]]; + case OpCode::Id::TXQ: { // TODO: The new commits on the texture refactor, change the way samplers work. // Sadly, not all texture instructions specify the type of texture their sampler // uses. This must be fixed at a later instance. - const auto& sampler = - is_bindless ? GetBindlessSampler(instr.gpr8, {}) : GetSampler(instr.sampler, {}); + const Sampler* sampler = + is_bindless ? GetBindlessSampler(instr.gpr8) : GetSampler(instr.sampler); + + if (sampler == nullptr) { + u32 indexer = 0; + for (u32 element = 0; element < 4; ++element) { + if (!instr.txq.IsComponentEnabled(element)) { + continue; + } + const Node value = Immediate(0); + SetTemporary(bb, indexer++, value); + } + for (u32 i = 0; i < indexer; ++i) { + SetRegister(bb, instr.gpr0.Value() + i, GetTemporary(i)); + } + break; + } u32 indexer = 0; switch (instr.txq.query_type) { @@ -181,7 +233,7 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { if (!instr.txq.IsComponentEnabled(element)) { continue; } - MetaTexture meta{sampler, {}, {}, {}, {}, {}, {}, element}; + MetaTexture meta{*sampler, {}, {}, {}, {}, {}, {}, {}, {}, element}; const Node value = Operation(OperationCode::TextureQueryDimensions, meta, GetRegister(instr.gpr8.Value() + (is_bindless ? 1 : 0))); @@ -205,15 +257,25 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { UNIMPLEMENTED_IF_MSG(instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV), "NDV is not implemented"); - if (instr.tmml.UsesMiscMode(TextureMiscMode::NODEP)) { - LOG_WARNING(HW_GPU, "TMML.NODEP implementation is incomplete"); - } - auto texture_type = instr.tmml.texture_type.Value(); const bool is_array = instr.tmml.array != 0; - const auto& sampler = - is_bindless ? GetBindlessSampler(instr.gpr20, {{texture_type, is_array, false}}) - : GetSampler(instr.sampler, {{texture_type, is_array, false}}); + const Sampler* sampler = + is_bindless ? GetBindlessSampler(instr.gpr20) : GetSampler(instr.sampler); + + if (sampler == nullptr) { + u32 indexer = 0; + for (u32 element = 0; element < 2; ++element) { + if (!instr.tmml.IsComponentEnabled(element)) { + continue; + } + const Node value = Immediate(0); + SetTemporary(bb, indexer++, value); + } + for (u32 i = 0; i < indexer; ++i) { + SetRegister(bb, instr.gpr0.Value() + i, GetTemporary(i)); + } + break; + } std::vector<Node> coords; @@ -240,7 +302,7 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { continue; } auto params = coords; - MetaTexture meta{sampler, {}, {}, {}, {}, {}, {}, element}; + MetaTexture meta{*sampler, {}, {}, {}, {}, {}, {}, {}, {}, element}; const Node value = Operation(OperationCode::TextureQueryLod, meta, std::move(params)); SetTemporary(bb, indexer++, value); } @@ -254,10 +316,6 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { UNIMPLEMENTED_IF_MSG(instr.tld.ms, "MS is not implemented"); UNIMPLEMENTED_IF_MSG(instr.tld.cl, "CL is not implemented"); - if (instr.tld.nodep_flag) { - LOG_WARNING(HW_GPU, "TLD.NODEP implementation is incomplete"); - } - WriteTexInstructionFloat(bb, instr, GetTldCode(instr)); break; } @@ -269,10 +327,6 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { "AOFFI is not implemented"); UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(TextureMiscMode::MZ), "MZ is not implemented"); - if (instr.tlds.UsesMiscMode(TextureMiscMode::NODEP)) { - LOG_WARNING(HW_GPU, "TLDS.NODEP implementation is incomplete"); - } - const Node4 components = GetTldsCode(instr, texture_type, is_array); if (instr.tlds.fp32_flag) { @@ -289,68 +343,54 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { return pc; } -const Sampler& ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler, - std::optional<SamplerInfo> sampler_info) { - const auto offset = static_cast<u32>(sampler.index.Value()); - - TextureType type; - bool is_array; - bool is_shadow; +ShaderIR::SamplerInfo ShaderIR::GetSamplerInfo(std::optional<SamplerInfo> sampler_info, u32 offset, + std::optional<u32> buffer) { if (sampler_info) { - type = sampler_info->type; - is_array = sampler_info->is_array; - is_shadow = sampler_info->is_shadow; - } else if (const auto sampler = locker.ObtainBoundSampler(offset)) { - type = sampler->texture_type.Value(); - is_array = sampler->is_array.Value() != 0; - is_shadow = sampler->is_shadow.Value() != 0; - } else { + return *sampler_info; + } + const auto sampler = + buffer ? locker.ObtainBindlessSampler(*buffer, offset) : locker.ObtainBoundSampler(offset); + if (!sampler) { LOG_WARNING(HW_GPU, "Unknown sampler info"); - type = TextureType::Texture2D; - is_array = false; - is_shadow = false; + return SamplerInfo{TextureType::Texture2D, false, false, false}; } + return SamplerInfo{sampler->texture_type, sampler->is_array != 0, sampler->is_shadow != 0, + sampler->is_buffer != 0}; +} + +const Sampler* ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler, + std::optional<SamplerInfo> sampler_info) { + const auto offset = static_cast<u32>(sampler.index.Value()); + const auto info = GetSamplerInfo(sampler_info, offset); // If this sampler has already been used, return the existing mapping. const auto it = std::find_if(used_samplers.begin(), used_samplers.end(), [offset](const Sampler& entry) { return entry.GetOffset() == offset; }); if (it != used_samplers.end()) { - ASSERT(!it->IsBindless() && it->GetType() == type && it->IsArray() == is_array && - it->IsShadow() == is_shadow); - return *it; + ASSERT(!it->IsBindless() && it->GetType() == info.type && it->IsArray() == info.is_array && + it->IsShadow() == info.is_shadow && it->IsBuffer() == info.is_buffer); + return &*it; } // Otherwise create a new mapping for this sampler const auto next_index = static_cast<u32>(used_samplers.size()); - return used_samplers.emplace_back(Sampler(next_index, offset, type, is_array, is_shadow)); + return &used_samplers.emplace_back(next_index, offset, info.type, info.is_array, info.is_shadow, + info.is_buffer); } -const Sampler& ShaderIR::GetBindlessSampler(const Tegra::Shader::Register& reg, +const Sampler* ShaderIR::GetBindlessSampler(Tegra::Shader::Register reg, std::optional<SamplerInfo> sampler_info) { const Node sampler_register = GetRegister(reg); const auto [base_sampler, buffer, offset] = TrackCbuf(sampler_register, global_code, static_cast<s64>(global_code.size())); ASSERT(base_sampler != nullptr); - - TextureType type; - bool is_array; - bool is_shadow; - if (sampler_info) { - type = sampler_info->type; - is_array = sampler_info->is_array; - is_shadow = sampler_info->is_shadow; - } else if (const auto sampler = locker.ObtainBindlessSampler(buffer, offset)) { - type = sampler->texture_type.Value(); - is_array = sampler->is_array.Value() != 0; - is_shadow = sampler->is_shadow.Value() != 0; - } else { - LOG_WARNING(HW_GPU, "Unknown sampler info"); - type = TextureType::Texture2D; - is_array = false; - is_shadow = false; + if (base_sampler == nullptr) { + return nullptr; } + const auto info = GetSamplerInfo(sampler_info, offset, buffer); + // If this sampler has already been used, return the existing mapping. const auto it = std::find_if(used_samplers.begin(), used_samplers.end(), @@ -358,15 +398,15 @@ const Sampler& ShaderIR::GetBindlessSampler(const Tegra::Shader::Register& reg, return entry.GetBuffer() == buffer && entry.GetOffset() == offset; }); if (it != used_samplers.end()) { - ASSERT(it->IsBindless() && it->GetType() == type && it->IsArray() == is_array && - it->IsShadow() == is_shadow); - return *it; + ASSERT(it->IsBindless() && it->GetType() == info.type && it->IsArray() == info.is_array && + it->IsShadow() == info.is_shadow); + return &*it; } // Otherwise create a new mapping for this sampler const auto next_index = static_cast<u32>(used_samplers.size()); - return used_samplers.emplace_back( - Sampler(next_index, offset, buffer, type, is_array, is_shadow)); + return &used_samplers.emplace_back(next_index, offset, buffer, info.type, info.is_array, + info.is_shadow, info.is_buffer); } void ShaderIR::WriteTexInstructionFloat(NodeBlock& bb, Instruction instr, const Node4& components) { @@ -409,14 +449,14 @@ void ShaderIR::WriteTexsInstructionFloat(NodeBlock& bb, Instruction instr, const } void ShaderIR::WriteTexsInstructionHalfFloat(NodeBlock& bb, Instruction instr, - const Node4& components) { + const Node4& components, bool ignore_mask) { // TEXS.F16 destionation registers are packed in two registers in pairs (just like any half // float instruction). Node4 values; u32 dest_elem = 0; for (u32 component = 0; component < 4; ++component) { - if (!instr.texs.IsComponentEnabled(component)) + if (!instr.texs.IsComponentEnabled(component) && !ignore_mask) continue; values[dest_elem++] = components[component]; } @@ -451,17 +491,23 @@ Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type, (texture_type == TextureType::TextureCube && is_array && is_shadow), "This method is not supported."); - const auto& sampler = - is_bindless ? GetBindlessSampler(*bindless_reg, {{texture_type, is_array, is_shadow}}) - : GetSampler(instr.sampler, {{texture_type, is_array, is_shadow}}); + const SamplerInfo info{texture_type, is_array, is_shadow, false}; + const Sampler* sampler = + is_bindless ? GetBindlessSampler(*bindless_reg, info) : GetSampler(instr.sampler, info); + Node4 values; + if (sampler == nullptr) { + for (u32 element = 0; element < values.size(); ++element) { + values[element] = Immediate(0); + } + return values; + } const bool lod_needed = process_mode == TextureProcessMode::LZ || process_mode == TextureProcessMode::LL || process_mode == TextureProcessMode::LLA; - // LOD selection (either via bias or explicit textureLod) not - // supported in GL for sampler2DArrayShadow and - // samplerCubeArrayShadow. + // LOD selection (either via bias or explicit textureLod) not supported in GL for + // sampler2DArrayShadow and samplerCubeArrayShadow. const bool gl_lod_supported = !((texture_type == Tegra::Shader::TextureType::Texture2D && is_array && is_shadow) || (texture_type == Tegra::Shader::TextureType::TextureCube && is_array && is_shadow)); @@ -471,8 +517,8 @@ Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type, UNIMPLEMENTED_IF(process_mode != TextureProcessMode::None && !gl_lod_supported); - Node bias = {}; - Node lod = {}; + Node bias; + Node lod; if (process_mode != TextureProcessMode::None && gl_lod_supported) { switch (process_mode) { case TextureProcessMode::LZ: @@ -493,10 +539,9 @@ Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type, } } - Node4 values; for (u32 element = 0; element < values.size(); ++element) { auto copy_coords = coords; - MetaTexture meta{sampler, array, depth_compare, aoffi, bias, lod, {}, element}; + MetaTexture meta{*sampler, array, depth_compare, aoffi, {}, {}, bias, lod, {}, element}; values[element] = Operation(read_method, meta, std::move(copy_coords)); } @@ -593,7 +638,9 @@ Node4 ShaderIR::GetTexsCode(Instruction instr, TextureType texture_type, } Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool depth_compare, - bool is_array, bool is_aoffi, bool is_bindless) { + bool is_array, bool is_aoffi, bool is_ptp, bool is_bindless) { + ASSERT_MSG(!(is_aoffi && is_ptp), "AOFFI and PTP can't be enabled at the same time"); + const std::size_t coord_count = GetCoordCount(texture_type); // If enabled arrays index is always stored in the gpr8 field @@ -608,17 +655,26 @@ Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool de u64 parameter_register = instr.gpr20.Value(); - const auto& sampler = - is_bindless - ? GetBindlessSampler(parameter_register++, {{texture_type, is_array, depth_compare}}) - : GetSampler(instr.sampler, {{texture_type, is_array, depth_compare}}); + const SamplerInfo info{texture_type, is_array, depth_compare, false}; + const Sampler* sampler = is_bindless ? GetBindlessSampler(parameter_register++, info) + : GetSampler(instr.sampler, info); + Node4 values; + if (sampler == nullptr) { + for (u32 element = 0; element < values.size(); ++element) { + values[element] = Immediate(0); + } + return values; + } - std::vector<Node> aoffi; + std::vector<Node> aoffi, ptp; if (is_aoffi) { aoffi = GetAoffiCoordinates(GetRegister(parameter_register++), coord_count, true); + } else if (is_ptp) { + ptp = GetPtpCoordinates( + {GetRegister(parameter_register++), GetRegister(parameter_register++)}); } - Node dc{}; + Node dc; if (depth_compare) { dc = GetRegister(parameter_register++); } @@ -626,11 +682,10 @@ Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool de const Node component = is_bindless ? Immediate(static_cast<u32>(instr.tld4_b.component)) : Immediate(static_cast<u32>(instr.tld4.component)); - Node4 values; for (u32 element = 0; element < values.size(); ++element) { auto coords_copy = coords; - MetaTexture meta{sampler, GetRegister(array_register), dc, aoffi, {}, {}, component, - element}; + MetaTexture meta{ + *sampler, GetRegister(array_register), dc, aoffi, ptp, {}, {}, {}, component, element}; values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy)); } @@ -658,12 +713,12 @@ Node4 ShaderIR::GetTldCode(Tegra::Shader::Instruction instr) { // const Node aoffi_register{is_aoffi ? GetRegister(gpr20_cursor++) : nullptr}; // const Node multisample{is_multisample ? GetRegister(gpr20_cursor++) : nullptr}; - const auto& sampler = GetSampler(instr.sampler, {{texture_type, is_array, false}}); + const auto& sampler = *GetSampler(instr.sampler); Node4 values; for (u32 element = 0; element < values.size(); ++element) { auto coords_copy = coords; - MetaTexture meta{sampler, array_register, {}, {}, {}, lod, {}, element}; + MetaTexture meta{sampler, array_register, {}, {}, {}, {}, {}, lod, {}, element}; values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy)); } @@ -671,6 +726,8 @@ Node4 ShaderIR::GetTldCode(Tegra::Shader::Instruction instr) { } Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is_array) { + const Sampler& sampler = *GetSampler(instr.sampler); + const std::size_t type_coord_count = GetCoordCount(texture_type); const bool lod_enabled = instr.tlds.GetTextureProcessMode() == TextureProcessMode::LL; @@ -694,12 +751,24 @@ Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is // When lod is used always is in gpr20 const Node lod = lod_enabled ? GetRegister(instr.gpr20) : Immediate(0); - const auto& sampler = GetSampler(instr.sampler, {{texture_type, is_array, false}}); + // Fill empty entries from the guest sampler + const std::size_t entry_coord_count = GetCoordCount(sampler.GetType()); + if (type_coord_count != entry_coord_count) { + LOG_WARNING(HW_GPU, "Bound and built texture types mismatch"); + + // When the size is higher we insert zeroes + for (std::size_t i = type_coord_count; i < entry_coord_count; ++i) { + coords.push_back(GetRegister(Register::ZeroIndex)); + } + + // Then we ensure the size matches the number of entries (dropping unused values) + coords.resize(entry_coord_count); + } Node4 values; for (u32 element = 0; element < values.size(); ++element) { auto coords_copy = coords; - MetaTexture meta{sampler, array, {}, {}, {}, lod, {}, element}; + MetaTexture meta{sampler, array, {}, {}, {}, {}, {}, lod, {}, element}; values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy)); } return values; @@ -764,4 +833,38 @@ std::vector<Node> ShaderIR::GetAoffiCoordinates(Node aoffi_reg, std::size_t coor return aoffi; } +std::vector<Node> ShaderIR::GetPtpCoordinates(std::array<Node, 2> ptp_regs) { + static constexpr u32 num_entries = 8; + + std::vector<Node> ptp; + ptp.reserve(num_entries); + + const auto global_size = static_cast<s64>(global_code.size()); + const std::optional low = TrackImmediate(ptp_regs[0], global_code, global_size); + const std::optional high = TrackImmediate(ptp_regs[1], global_code, global_size); + if (!low || !high) { + for (u32 entry = 0; entry < num_entries; ++entry) { + const u32 reg = entry / 4; + const u32 offset = entry % 4; + const Node value = BitfieldExtract(ptp_regs[reg], offset * 8, 6); + const Node condition = + Operation(OperationCode::LogicalIGreaterEqual, value, Immediate(32)); + const Node negative = Operation(OperationCode::IAdd, value, Immediate(-64)); + ptp.push_back(Operation(OperationCode::Select, condition, negative, value)); + } + return ptp; + } + + const u64 immediate = (static_cast<u64>(*high) << 32) | static_cast<u64>(*low); + for (u32 entry = 0; entry < num_entries; ++entry) { + s32 value = (immediate >> (entry * 8)) & 0b111111; + if (value >= 32) { + value -= 64; + } + ptp.push_back(Immediate(value)); + } + + return ptp; +} + } // namespace VideoCommon::Shader diff --git a/src/video_core/shader/decode/warp.cpp b/src/video_core/shader/decode/warp.cpp index fa8a250cc..11b77f795 100644 --- a/src/video_core/shader/decode/warp.cpp +++ b/src/video_core/shader/decode/warp.cpp @@ -17,6 +17,7 @@ using Tegra::Shader::ShuffleOperation; using Tegra::Shader::VoteOperation; namespace { + OperationCode GetOperationCode(VoteOperation vote_op) { switch (vote_op) { case VoteOperation::All: @@ -30,12 +31,16 @@ OperationCode GetOperationCode(VoteOperation vote_op) { return OperationCode::VoteAll; } } + } // Anonymous namespace u32 ShaderIR::DecodeWarp(NodeBlock& bb, u32 pc) { const Instruction instr = {program_code[pc]}; const auto opcode = OpCode::Decode(instr); + // Signal the backend that this shader uses warp instructions. + uses_warps = true; + switch (opcode->get().GetId()) { case OpCode::Id::VOTE: { const Node value = GetPredicate(instr.vote.value, instr.vote.negate_value != 0); @@ -46,50 +51,59 @@ u32 ShaderIR::DecodeWarp(NodeBlock& bb, u32 pc) { break; } case OpCode::Id::SHFL: { - Node width = [this, instr] { - Node mask = instr.shfl.is_mask_imm ? Immediate(static_cast<u32>(instr.shfl.mask_imm)) - : GetRegister(instr.gpr39); - - // Convert the obscure SHFL mask back into GL_NV_shader_thread_shuffle's width. This has - // been done reversing Nvidia's math. It won't work on all cases due to SHFL having - // different parameters that don't properly map to GLSL's interface, but it should work - // for cases emitted by Nvidia's compiler. - if (instr.shfl.operation == ShuffleOperation::Up) { - return Operation( - OperationCode::ILogicalShiftRight, - Operation(OperationCode::IAdd, std::move(mask), Immediate(-0x2000)), - Immediate(8)); - } else { - return Operation(OperationCode::ILogicalShiftRight, - Operation(OperationCode::IAdd, Immediate(0x201F), - Operation(OperationCode::INegate, std::move(mask))), - Immediate(8)); - } - }(); + Node mask = instr.shfl.is_mask_imm ? Immediate(static_cast<u32>(instr.shfl.mask_imm)) + : GetRegister(instr.gpr39); + Node index = instr.shfl.is_index_imm ? Immediate(static_cast<u32>(instr.shfl.index_imm)) + : GetRegister(instr.gpr20); - const auto [operation, in_range] = [instr]() -> std::pair<OperationCode, OperationCode> { + Node thread_id = Operation(OperationCode::ThreadId); + Node clamp = Operation(OperationCode::IBitwiseAnd, mask, Immediate(0x1FU)); + Node seg_mask = BitfieldExtract(mask, 8, 16); + + Node neg_seg_mask = Operation(OperationCode::IBitwiseNot, seg_mask); + Node min_thread_id = Operation(OperationCode::IBitwiseAnd, thread_id, seg_mask); + Node max_thread_id = Operation(OperationCode::IBitwiseOr, min_thread_id, + Operation(OperationCode::IBitwiseAnd, clamp, neg_seg_mask)); + + Node src_thread_id = [instr, index, neg_seg_mask, min_thread_id, thread_id] { switch (instr.shfl.operation) { case ShuffleOperation::Idx: - return {OperationCode::ShuffleIndexed, OperationCode::InRangeShuffleIndexed}; - case ShuffleOperation::Up: - return {OperationCode::ShuffleUp, OperationCode::InRangeShuffleUp}; + return Operation(OperationCode::IBitwiseOr, + Operation(OperationCode::IBitwiseAnd, index, neg_seg_mask), + min_thread_id); case ShuffleOperation::Down: - return {OperationCode::ShuffleDown, OperationCode::InRangeShuffleDown}; + return Operation(OperationCode::IAdd, thread_id, index); + case ShuffleOperation::Up: + return Operation(OperationCode::IAdd, thread_id, + Operation(OperationCode::INegate, index)); case ShuffleOperation::Bfly: - return {OperationCode::ShuffleButterfly, OperationCode::InRangeShuffleButterfly}; + return Operation(OperationCode::IBitwiseXor, thread_id, index); } - UNREACHABLE_MSG("Invalid SHFL operation: {}", - static_cast<u64>(instr.shfl.operation.Value())); - return {}; + UNREACHABLE(); + return Immediate(0U); }(); - // Setting the predicate before the register is intentional to avoid overwriting. - Node index = instr.shfl.is_index_imm ? Immediate(static_cast<u32>(instr.shfl.index_imm)) - : GetRegister(instr.gpr20); - SetPredicate(bb, instr.shfl.pred48, Operation(in_range, index, width)); + Node in_bounds = [instr, src_thread_id, min_thread_id, max_thread_id] { + if (instr.shfl.operation == ShuffleOperation::Up) { + return Operation(OperationCode::LogicalIGreaterEqual, src_thread_id, min_thread_id); + } else { + return Operation(OperationCode::LogicalILessEqual, src_thread_id, max_thread_id); + } + }(); + + SetPredicate(bb, instr.shfl.pred48, in_bounds); SetRegister( bb, instr.gpr0, - Operation(operation, GetRegister(instr.gpr8), std::move(index), std::move(width))); + Operation(OperationCode::ShuffleIndexed, GetRegister(instr.gpr8), src_thread_id)); + break; + } + case OpCode::Id::FSWZADD: { + UNIMPLEMENTED_IF(instr.fswzadd.ndv); + + Node op_a = GetRegister(instr.gpr8); + Node op_b = GetRegister(instr.gpr20); + Node mask = Immediate(static_cast<u32>(instr.fswzadd.swizzle)); + SetRegister(bb, instr.gpr0, Operation(OperationCode::FSwizzleAdd, op_a, op_b, mask)); break; } default: diff --git a/src/video_core/shader/node.h b/src/video_core/shader/node.h index 4300d9ff4..4e155542a 100644 --- a/src/video_core/shader/node.h +++ b/src/video_core/shader/node.h @@ -47,6 +47,7 @@ enum class OperationCode { FTrunc, /// (MetaArithmetic, float a) -> float FCastInteger, /// (MetaArithmetic, int a) -> float FCastUInteger, /// (MetaArithmetic, uint a) -> float + FSwizzleAdd, /// (float a, float b, uint mask) -> float IAdd, /// (MetaArithmetic, int a, int b) -> int IMul, /// (MetaArithmetic, int a, int b) -> int @@ -67,6 +68,7 @@ enum class OperationCode { IBitfieldInsert, /// (MetaArithmetic, int base, int insert, int offset, int bits) -> int IBitfieldExtract, /// (MetaArithmetic, int value, int offset, int offset) -> int IBitCount, /// (MetaArithmetic, int) -> int + IBitMSB, /// (MetaArithmetic, int) -> int UAdd, /// (MetaArithmetic, uint a, uint b) -> uint UMul, /// (MetaArithmetic, uint a, uint b) -> uint @@ -85,6 +87,7 @@ enum class OperationCode { UBitfieldInsert, /// (MetaArithmetic, uint base, uint insert, int offset, int bits) -> uint UBitfieldExtract, /// (MetaArithmetic, uint value, int offset, int offset) -> uint UBitCount, /// (MetaArithmetic, uint) -> uint + UBitMSB, /// (MetaArithmetic, uint) -> uint HAdd, /// (MetaArithmetic, f16vec2 a, f16vec2 b) -> f16vec2 HMul, /// (MetaArithmetic, f16vec2 a, f16vec2 b) -> f16vec2 @@ -148,6 +151,7 @@ enum class OperationCode { TextureQueryDimensions, /// (MetaTexture, float a) -> float4 TextureQueryLod, /// (MetaTexture, float[N] coords) -> float4 TexelFetch, /// (MetaTexture, int[N], int) -> float4 + TextureGradient, /// (MetaTexture, float[N] coords, float[N*2] derivates) -> float4 ImageLoad, /// (MetaImage, int[N] coords) -> void ImageStore, /// (MetaImage, int[N] coords) -> void @@ -168,6 +172,7 @@ enum class OperationCode { EmitVertex, /// () -> void EndPrimitive, /// () -> void + InvocationId, /// () -> int YNegate, /// () -> float LocalInvocationIdX, /// () -> uint LocalInvocationIdY, /// () -> uint @@ -181,15 +186,10 @@ enum class OperationCode { VoteAny, /// (bool) -> bool VoteEqual, /// (bool) -> bool - ShuffleIndexed, /// (uint value, uint index, uint width) -> uint - ShuffleUp, /// (uint value, uint index, uint width) -> uint - ShuffleDown, /// (uint value, uint index, uint width) -> uint - ShuffleButterfly, /// (uint value, uint index, uint width) -> uint + ThreadId, /// () -> uint + ShuffleIndexed, /// (uint value, uint index) -> uint - InRangeShuffleIndexed, /// (uint index, uint width) -> bool - InRangeShuffleUp, /// (uint index, uint width) -> bool - InRangeShuffleDown, /// (uint index, uint width) -> bool - InRangeShuffleButterfly, /// (uint index, uint width) -> bool + MemoryBarrierGL, /// () -> void Amount, }; @@ -216,13 +216,14 @@ class PredicateNode; class AbufNode; class CbufNode; class LmemNode; +class PatchNode; class SmemNode; class GmemNode; class CommentNode; -using NodeData = - std::variant<OperationNode, ConditionalNode, GprNode, ImmediateNode, InternalFlagNode, - PredicateNode, AbufNode, CbufNode, LmemNode, SmemNode, GmemNode, CommentNode>; +using NodeData = std::variant<OperationNode, ConditionalNode, GprNode, ImmediateNode, + InternalFlagNode, PredicateNode, AbufNode, PatchNode, CbufNode, + LmemNode, SmemNode, GmemNode, CommentNode>; using Node = std::shared_ptr<NodeData>; using Node4 = std::array<Node, 4>; using NodeBlock = std::vector<Node>; @@ -231,14 +232,15 @@ class Sampler { public: /// This constructor is for bound samplers constexpr explicit Sampler(u32 index, u32 offset, Tegra::Shader::TextureType type, - bool is_array, bool is_shadow) - : index{index}, offset{offset}, type{type}, is_array{is_array}, is_shadow{is_shadow} {} + bool is_array, bool is_shadow, bool is_buffer) + : index{index}, offset{offset}, type{type}, is_array{is_array}, is_shadow{is_shadow}, + is_buffer{is_buffer} {} /// This constructor is for bindless samplers constexpr explicit Sampler(u32 index, u32 offset, u32 buffer, Tegra::Shader::TextureType type, - bool is_array, bool is_shadow) + bool is_array, bool is_shadow, bool is_buffer) : index{index}, offset{offset}, buffer{buffer}, type{type}, is_array{is_array}, - is_shadow{is_shadow}, is_bindless{true} {} + is_shadow{is_shadow}, is_buffer{is_buffer}, is_bindless{true} {} constexpr u32 GetIndex() const { return index; @@ -264,6 +266,10 @@ public: return is_shadow; } + constexpr bool IsBuffer() const { + return is_buffer; + } + constexpr bool IsBindless() const { return is_bindless; } @@ -276,6 +282,7 @@ private: Tegra::Shader::TextureType type{}; ///< The type used to sample this texture (Texture2D, etc) bool is_array{}; ///< Whether the texture is being sampled as an array texture or not. bool is_shadow{}; ///< Whether the texture is being sampled as a depth texture or not. + bool is_buffer{}; ///< Whether the texture is a texture buffer without sampler. bool is_bindless{}; ///< Whether this sampler belongs to a bindless texture or not. }; @@ -367,6 +374,8 @@ struct MetaTexture { Node array; Node depth_compare; std::vector<Node> aoffi; + std::vector<Node> ptp; + std::vector<Node> derivates; Node bias; Node lod; Node component{}; @@ -383,8 +392,30 @@ struct MetaImage { using Meta = std::variant<MetaArithmetic, MetaTexture, MetaImage, MetaStackClass, Tegra::Shader::HalfType>; +class AmendNode { +public: + std::optional<std::size_t> GetAmendIndex() const { + if (amend_index == amend_null_index) { + return std::nullopt; + } + return {amend_index}; + } + + void SetAmendIndex(std::size_t index) { + amend_index = index; + } + + void ClearAmend() { + amend_index = amend_null_index; + } + +private: + static constexpr std::size_t amend_null_index = 0xFFFFFFFFFFFFFFFFULL; + std::size_t amend_index{amend_null_index}; +}; + /// Holds any kind of operation that can be done in the IR -class OperationNode final { +class OperationNode final : public AmendNode { public: explicit OperationNode(OperationCode code) : OperationNode(code, Meta{}) {} @@ -424,7 +455,7 @@ private: }; /// Encloses inside any kind of node that returns a boolean conditionally-executed code -class ConditionalNode final { +class ConditionalNode final : public AmendNode { public: explicit ConditionalNode(Node condition, std::vector<Node>&& code) : condition{std::move(condition)}, code{std::move(code)} {} @@ -538,6 +569,19 @@ private: u32 element{}; }; +/// Patch memory (used to communicate tessellation stages). +class PatchNode final { +public: + explicit PatchNode(u32 offset) : offset{offset} {} + + u32 GetOffset() const { + return offset; + } + +private: + u32 offset{}; +}; + /// Constant buffer node, usually mapped to uniform buffers in GLSL class CbufNode final { public: diff --git a/src/video_core/shader/shader_ir.cpp b/src/video_core/shader/shader_ir.cpp index 1d9825c76..31eecb3f4 100644 --- a/src/video_core/shader/shader_ir.cpp +++ b/src/video_core/shader/shader_ir.cpp @@ -446,4 +446,10 @@ Node ShaderIR::BitfieldInsert(Node base, Node insert, u32 offset, u32 bits) { Immediate(bits)); } +std::size_t ShaderIR::DeclareAmend(Node new_amend) { + const std::size_t id = amend_code.size(); + amend_code.push_back(new_amend); + return id; +} + } // namespace VideoCommon::Shader diff --git a/src/video_core/shader/shader_ir.h b/src/video_core/shader/shader_ir.h index 26c8fde22..aacd0a0da 100644 --- a/src/video_core/shader/shader_ir.h +++ b/src/video_core/shader/shader_ir.h @@ -49,7 +49,7 @@ public: } u32 GetSize() const { - return max_offset + sizeof(float); + return max_offset + static_cast<u32>(sizeof(float)); } u32 GetMaxOffset() const { @@ -137,6 +137,10 @@ public: return uses_vertex_id; } + bool UsesWarps() const { + return uses_warps; + } + bool HasPhysicalAttributes() const { return uses_physical_attributes; } @@ -165,13 +169,17 @@ public: return program_manager.GetVariables(); } - u32 ConvertAddressToNvidiaSpace(const u32 address) const { - return (address - main_offset) * sizeof(Tegra::Shader::Instruction); + u32 ConvertAddressToNvidiaSpace(u32 address) const { + return (address - main_offset) * static_cast<u32>(sizeof(Tegra::Shader::Instruction)); } /// Returns a condition code evaluated from internal flags Node GetConditionCode(Tegra::Shader::ConditionCode cc) const; + const Node& GetAmendNode(std::size_t index) const { + return amend_code[index]; + } + private: friend class ASTDecoder; @@ -179,6 +187,7 @@ private: Tegra::Shader::TextureType type; bool is_array; bool is_shadow; + bool is_buffer; }; void Decode(); @@ -303,13 +312,17 @@ private: /// Returns a predicate combiner operation OperationCode GetPredicateCombiner(Tegra::Shader::PredOperation operation); + /// Queries the missing sampler info from the execution context. + SamplerInfo GetSamplerInfo(std::optional<SamplerInfo> sampler_info, u32 offset, + std::optional<u32> buffer = std::nullopt); + /// Accesses a texture sampler - const Sampler& GetSampler(const Tegra::Shader::Sampler& sampler, - std::optional<SamplerInfo> sampler_info); + const Sampler* GetSampler(const Tegra::Shader::Sampler& sampler, + std::optional<SamplerInfo> sampler_info = std::nullopt); - // Accesses a texture sampler for a bindless texture. - const Sampler& GetBindlessSampler(const Tegra::Shader::Register& reg, - std::optional<SamplerInfo> sampler_info); + /// Accesses a texture sampler for a bindless texture. + const Sampler* GetBindlessSampler(Tegra::Shader::Register reg, + std::optional<SamplerInfo> sampler_info = std::nullopt); /// Accesses an image. Image& GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type); @@ -329,7 +342,7 @@ private: void WriteTexsInstructionFloat(NodeBlock& bb, Tegra::Shader::Instruction instr, const Node4& components, bool ignore_mask = false); void WriteTexsInstructionHalfFloat(NodeBlock& bb, Tegra::Shader::Instruction instr, - const Node4& components); + const Node4& components, bool ignore_mask = false); Node4 GetTexCode(Tegra::Shader::Instruction instr, Tegra::Shader::TextureType texture_type, Tegra::Shader::TextureProcessMode process_mode, bool depth_compare, @@ -341,7 +354,8 @@ private: bool is_array); Node4 GetTld4Code(Tegra::Shader::Instruction instr, Tegra::Shader::TextureType texture_type, - bool depth_compare, bool is_array, bool is_aoffi, bool is_bindless); + bool depth_compare, bool is_array, bool is_aoffi, bool is_ptp, + bool is_bindless); Node4 GetTldCode(Tegra::Shader::Instruction instr); @@ -354,6 +368,8 @@ private: std::vector<Node> GetAoffiCoordinates(Node aoffi_reg, std::size_t coord_count, bool is_tld4); + std::vector<Node> GetPtpCoordinates(std::array<Node, 2> ptp_regs); + Node4 GetTextureCode(Tegra::Shader::Instruction instr, Tegra::Shader::TextureType texture_type, Tegra::Shader::TextureProcessMode process_mode, std::vector<Node> coords, Node array, Node depth_compare, u32 bias_offset, std::vector<Node> aoffi, @@ -380,6 +396,9 @@ private: Tegra::Shader::Instruction instr, bool is_write); + /// Register new amending code and obtain the reference id. + std::size_t DeclareAmend(Node new_amend); + const ProgramCode& program_code; const u32 main_offset; const CompilerSettings settings; @@ -394,6 +413,7 @@ private: std::map<u32, NodeBlock> basic_blocks; NodeBlock global_code; ASTManager program_manager{true, true}; + std::vector<Node> amend_code; std::set<u32> used_registers; std::set<Tegra::Shader::Pred> used_predicates; @@ -410,6 +430,7 @@ private: bool uses_physical_attributes{}; // Shader uses AL2P or physical attribute read/writes bool uses_instance_id{}; bool uses_vertex_id{}; + bool uses_warps{}; Tegra::Shader::Header header; }; diff --git a/src/video_core/shader/track.cpp b/src/video_core/shader/track.cpp index 55f5949e4..165c79330 100644 --- a/src/video_core/shader/track.cpp +++ b/src/video_core/shader/track.cpp @@ -7,6 +7,7 @@ #include <variant> #include "common/common_types.h" +#include "video_core/shader/node.h" #include "video_core/shader/shader_ir.h" namespace VideoCommon::Shader { diff --git a/src/video_core/surface.cpp b/src/video_core/surface.cpp index 621136b6e..1655ccf16 100644 --- a/src/video_core/surface.cpp +++ b/src/video_core/surface.cpp @@ -168,309 +168,6 @@ PixelFormat PixelFormatFromRenderTargetFormat(Tegra::RenderTargetFormat format) } } -PixelFormat PixelFormatFromTextureFormat(Tegra::Texture::TextureFormat format, - Tegra::Texture::ComponentType component_type, - bool is_srgb) { - // TODO(Subv): Properly implement this - switch (format) { - case Tegra::Texture::TextureFormat::A8R8G8B8: - if (is_srgb) { - return PixelFormat::RGBA8_SRGB; - } - switch (component_type) { - case Tegra::Texture::ComponentType::UNORM: - return PixelFormat::ABGR8U; - case Tegra::Texture::ComponentType::SNORM: - return PixelFormat::ABGR8S; - case Tegra::Texture::ComponentType::UINT: - return PixelFormat::ABGR8UI; - default: - break; - } - break; - case Tegra::Texture::TextureFormat::B5G6R5: - switch (component_type) { - case Tegra::Texture::ComponentType::UNORM: - return PixelFormat::B5G6R5U; - default: - break; - } - break; - case Tegra::Texture::TextureFormat::A2B10G10R10: - switch (component_type) { - case Tegra::Texture::ComponentType::UNORM: - return PixelFormat::A2B10G10R10U; - default: - break; - } - break; - case Tegra::Texture::TextureFormat::A1B5G5R5: - switch (component_type) { - case Tegra::Texture::ComponentType::UNORM: - return PixelFormat::A1B5G5R5U; - default: - break; - } - break; - case Tegra::Texture::TextureFormat::A4B4G4R4: - switch (component_type) { - case Tegra::Texture::ComponentType::UNORM: - return PixelFormat::R4G4B4A4U; - default: - break; - } - break; - case Tegra::Texture::TextureFormat::R8: - switch (component_type) { - case Tegra::Texture::ComponentType::UNORM: - return PixelFormat::R8U; - case Tegra::Texture::ComponentType::UINT: - return PixelFormat::R8UI; - default: - break; - } - break; - case Tegra::Texture::TextureFormat::G8R8: - // TextureFormat::G8R8 is actually ordered red then green, as such we can use - // PixelFormat::RG8U and PixelFormat::RG8S. This was tested with The Legend of Zelda: Breath - // of the Wild, which uses this format to render the hearts on the UI. - switch (component_type) { - case Tegra::Texture::ComponentType::UNORM: - return PixelFormat::RG8U; - case Tegra::Texture::ComponentType::SNORM: - return PixelFormat::RG8S; - default: - break; - } - break; - case Tegra::Texture::TextureFormat::R16_G16_B16_A16: - switch (component_type) { - case Tegra::Texture::ComponentType::UNORM: - return PixelFormat::RGBA16U; - case Tegra::Texture::ComponentType::FLOAT: - return PixelFormat::RGBA16F; - default: - break; - } - break; - case Tegra::Texture::TextureFormat::BF10GF11RF11: - switch (component_type) { - case Tegra::Texture::ComponentType::FLOAT: - return PixelFormat::R11FG11FB10F; - default: - break; - } - break; - case Tegra::Texture::TextureFormat::R32_G32_B32_A32: - switch (component_type) { - case Tegra::Texture::ComponentType::FLOAT: - return PixelFormat::RGBA32F; - case Tegra::Texture::ComponentType::UINT: - return PixelFormat::RGBA32UI; - default: - break; - } - break; - case Tegra::Texture::TextureFormat::R32_G32: - switch (component_type) { - case Tegra::Texture::ComponentType::FLOAT: - return PixelFormat::RG32F; - case Tegra::Texture::ComponentType::UINT: - return PixelFormat::RG32UI; - default: - break; - } - break; - case Tegra::Texture::TextureFormat::R32_G32_B32: - switch (component_type) { - case Tegra::Texture::ComponentType::FLOAT: - return PixelFormat::RGB32F; - default: - break; - } - break; - case Tegra::Texture::TextureFormat::R16: - switch (component_type) { - case Tegra::Texture::ComponentType::FLOAT: - return PixelFormat::R16F; - case Tegra::Texture::ComponentType::UNORM: - return PixelFormat::R16U; - case Tegra::Texture::ComponentType::SNORM: - return PixelFormat::R16S; - case Tegra::Texture::ComponentType::UINT: - return PixelFormat::R16UI; - case Tegra::Texture::ComponentType::SINT: - return PixelFormat::R16I; - default: - break; - } - break; - case Tegra::Texture::TextureFormat::R32: - switch (component_type) { - case Tegra::Texture::ComponentType::FLOAT: - return PixelFormat::R32F; - case Tegra::Texture::ComponentType::UINT: - return PixelFormat::R32UI; - default: - break; - } - break; - case Tegra::Texture::TextureFormat::E5B9G9R9_SHAREDEXP: - switch (component_type) { - case Tegra::Texture::ComponentType::FLOAT: - return PixelFormat::E5B9G9R9F; - default: - break; - } - break; - case Tegra::Texture::TextureFormat::ZF32: - return PixelFormat::Z32F; - case Tegra::Texture::TextureFormat::Z16: - return PixelFormat::Z16; - case Tegra::Texture::TextureFormat::S8Z24: - return PixelFormat::S8Z24; - case Tegra::Texture::TextureFormat::ZF32_X24S8: - return PixelFormat::Z32FS8; - case Tegra::Texture::TextureFormat::DXT1: - return is_srgb ? PixelFormat::DXT1_SRGB : PixelFormat::DXT1; - case Tegra::Texture::TextureFormat::DXT23: - return is_srgb ? PixelFormat::DXT23_SRGB : PixelFormat::DXT23; - case Tegra::Texture::TextureFormat::DXT45: - return is_srgb ? PixelFormat::DXT45_SRGB : PixelFormat::DXT45; - case Tegra::Texture::TextureFormat::DXN1: - return PixelFormat::DXN1; - case Tegra::Texture::TextureFormat::DXN2: - switch (component_type) { - case Tegra::Texture::ComponentType::UNORM: - return PixelFormat::DXN2UNORM; - case Tegra::Texture::ComponentType::SNORM: - return PixelFormat::DXN2SNORM; - default: - break; - } - break; - case Tegra::Texture::TextureFormat::BC7U: - return is_srgb ? PixelFormat::BC7U_SRGB : PixelFormat::BC7U; - case Tegra::Texture::TextureFormat::BC6H_UF16: - return PixelFormat::BC6H_UF16; - case Tegra::Texture::TextureFormat::BC6H_SF16: - return PixelFormat::BC6H_SF16; - case Tegra::Texture::TextureFormat::ASTC_2D_4X4: - return is_srgb ? PixelFormat::ASTC_2D_4X4_SRGB : PixelFormat::ASTC_2D_4X4; - case Tegra::Texture::TextureFormat::ASTC_2D_5X4: - return is_srgb ? PixelFormat::ASTC_2D_5X4_SRGB : PixelFormat::ASTC_2D_5X4; - case Tegra::Texture::TextureFormat::ASTC_2D_5X5: - return is_srgb ? PixelFormat::ASTC_2D_5X5_SRGB : PixelFormat::ASTC_2D_5X5; - case Tegra::Texture::TextureFormat::ASTC_2D_8X8: - return is_srgb ? PixelFormat::ASTC_2D_8X8_SRGB : PixelFormat::ASTC_2D_8X8; - case Tegra::Texture::TextureFormat::ASTC_2D_8X5: - return is_srgb ? PixelFormat::ASTC_2D_8X5_SRGB : PixelFormat::ASTC_2D_8X5; - case Tegra::Texture::TextureFormat::ASTC_2D_10X8: - return is_srgb ? PixelFormat::ASTC_2D_10X8_SRGB : PixelFormat::ASTC_2D_10X8; - case Tegra::Texture::TextureFormat::ASTC_2D_6X6: - return is_srgb ? PixelFormat::ASTC_2D_6X6_SRGB : PixelFormat::ASTC_2D_6X6; - case Tegra::Texture::TextureFormat::ASTC_2D_10X10: - return is_srgb ? PixelFormat::ASTC_2D_10X10_SRGB : PixelFormat::ASTC_2D_10X10; - case Tegra::Texture::TextureFormat::ASTC_2D_12X12: - return is_srgb ? PixelFormat::ASTC_2D_12X12_SRGB : PixelFormat::ASTC_2D_12X12; - case Tegra::Texture::TextureFormat::ASTC_2D_8X6: - return is_srgb ? PixelFormat::ASTC_2D_8X6_SRGB : PixelFormat::ASTC_2D_8X6; - case Tegra::Texture::TextureFormat::ASTC_2D_6X5: - return is_srgb ? PixelFormat::ASTC_2D_6X5_SRGB : PixelFormat::ASTC_2D_6X5; - case Tegra::Texture::TextureFormat::R16_G16: - switch (component_type) { - case Tegra::Texture::ComponentType::FLOAT: - return PixelFormat::RG16F; - case Tegra::Texture::ComponentType::UNORM: - return PixelFormat::RG16; - case Tegra::Texture::ComponentType::SNORM: - return PixelFormat::RG16S; - case Tegra::Texture::ComponentType::UINT: - return PixelFormat::RG16UI; - case Tegra::Texture::ComponentType::SINT: - return PixelFormat::RG16I; - default: - break; - } - break; - default: - break; - } - LOG_CRITICAL(HW_GPU, "Unimplemented format={}, component_type={}", static_cast<u32>(format), - static_cast<u32>(component_type)); - UNREACHABLE(); - return PixelFormat::ABGR8U; -} - -ComponentType ComponentTypeFromTexture(Tegra::Texture::ComponentType type) { - // TODO(Subv): Implement more component types - switch (type) { - case Tegra::Texture::ComponentType::UNORM: - return ComponentType::UNorm; - case Tegra::Texture::ComponentType::FLOAT: - return ComponentType::Float; - case Tegra::Texture::ComponentType::SNORM: - return ComponentType::SNorm; - case Tegra::Texture::ComponentType::UINT: - return ComponentType::UInt; - case Tegra::Texture::ComponentType::SINT: - return ComponentType::SInt; - default: - LOG_CRITICAL(HW_GPU, "Unimplemented component type={}", static_cast<u32>(type)); - UNREACHABLE(); - return ComponentType::UNorm; - } -} - -ComponentType ComponentTypeFromRenderTarget(Tegra::RenderTargetFormat format) { - // TODO(Subv): Implement more render targets - switch (format) { - case Tegra::RenderTargetFormat::RGBA8_UNORM: - case Tegra::RenderTargetFormat::RGBA8_SRGB: - case Tegra::RenderTargetFormat::BGRA8_UNORM: - case Tegra::RenderTargetFormat::BGRA8_SRGB: - case Tegra::RenderTargetFormat::RGB10_A2_UNORM: - case Tegra::RenderTargetFormat::R8_UNORM: - case Tegra::RenderTargetFormat::RG16_UNORM: - case Tegra::RenderTargetFormat::R16_UNORM: - case Tegra::RenderTargetFormat::B5G6R5_UNORM: - case Tegra::RenderTargetFormat::BGR5A1_UNORM: - case Tegra::RenderTargetFormat::RG8_UNORM: - case Tegra::RenderTargetFormat::RGBA16_UNORM: - return ComponentType::UNorm; - case Tegra::RenderTargetFormat::RGBA8_SNORM: - case Tegra::RenderTargetFormat::RG16_SNORM: - case Tegra::RenderTargetFormat::R16_SNORM: - case Tegra::RenderTargetFormat::RG8_SNORM: - return ComponentType::SNorm; - case Tegra::RenderTargetFormat::RGBA16_FLOAT: - case Tegra::RenderTargetFormat::RGBX16_FLOAT: - case Tegra::RenderTargetFormat::R11G11B10_FLOAT: - case Tegra::RenderTargetFormat::RGBA32_FLOAT: - case Tegra::RenderTargetFormat::RG32_FLOAT: - case Tegra::RenderTargetFormat::RG16_FLOAT: - case Tegra::RenderTargetFormat::R16_FLOAT: - case Tegra::RenderTargetFormat::R32_FLOAT: - return ComponentType::Float; - case Tegra::RenderTargetFormat::RGBA32_UINT: - case Tegra::RenderTargetFormat::RGBA16_UINT: - case Tegra::RenderTargetFormat::RG16_UINT: - case Tegra::RenderTargetFormat::R8_UINT: - case Tegra::RenderTargetFormat::R16_UINT: - case Tegra::RenderTargetFormat::RG32_UINT: - case Tegra::RenderTargetFormat::R32_UINT: - case Tegra::RenderTargetFormat::RGBA8_UINT: - return ComponentType::UInt; - case Tegra::RenderTargetFormat::RG16_SINT: - case Tegra::RenderTargetFormat::R16_SINT: - return ComponentType::SInt; - default: - LOG_CRITICAL(HW_GPU, "Unimplemented format={}", static_cast<u32>(format)); - UNREACHABLE(); - return ComponentType::UNorm; - } -} - PixelFormat PixelFormatFromGPUPixelFormat(Tegra::FramebufferConfig::PixelFormat format) { switch (format) { case Tegra::FramebufferConfig::PixelFormat::ABGR8: @@ -485,22 +182,6 @@ PixelFormat PixelFormatFromGPUPixelFormat(Tegra::FramebufferConfig::PixelFormat } } -ComponentType ComponentTypeFromDepthFormat(Tegra::DepthFormat format) { - switch (format) { - case Tegra::DepthFormat::Z16_UNORM: - case Tegra::DepthFormat::S8_Z24_UNORM: - case Tegra::DepthFormat::Z24_S8_UNORM: - return ComponentType::UNorm; - case Tegra::DepthFormat::Z32_FLOAT: - case Tegra::DepthFormat::Z32_S8_X24_FLOAT: - return ComponentType::Float; - default: - LOG_CRITICAL(HW_GPU, "Unimplemented format={}", static_cast<u32>(format)); - UNREACHABLE(); - return ComponentType::UNorm; - } -} - SurfaceType GetFormatType(PixelFormat pixel_format) { if (static_cast<std::size_t>(pixel_format) < static_cast<std::size_t>(PixelFormat::MaxColorFormat)) { diff --git a/src/video_core/surface.h b/src/video_core/surface.h index d3bcd38c5..0d17a93ed 100644 --- a/src/video_core/surface.h +++ b/src/video_core/surface.h @@ -106,18 +106,8 @@ enum class PixelFormat { Max = MaxDepthStencilFormat, Invalid = 255, }; - static constexpr std::size_t MaxPixelFormat = static_cast<std::size_t>(PixelFormat::Max); -enum class ComponentType { - Invalid = 0, - SNorm = 1, - UNorm = 2, - SInt = 3, - UInt = 4, - Float = 5, -}; - enum class SurfaceType { ColorTexture = 0, Depth = 1, @@ -609,18 +599,8 @@ PixelFormat PixelFormatFromDepthFormat(Tegra::DepthFormat format); PixelFormat PixelFormatFromRenderTargetFormat(Tegra::RenderTargetFormat format); -PixelFormat PixelFormatFromTextureFormat(Tegra::Texture::TextureFormat format, - Tegra::Texture::ComponentType component_type, - bool is_srgb); - -ComponentType ComponentTypeFromTexture(Tegra::Texture::ComponentType type); - -ComponentType ComponentTypeFromRenderTarget(Tegra::RenderTargetFormat format); - PixelFormat PixelFormatFromGPUPixelFormat(Tegra::FramebufferConfig::PixelFormat format); -ComponentType ComponentTypeFromDepthFormat(Tegra::DepthFormat format); - SurfaceType GetFormatType(PixelFormat pixel_format); bool IsPixelFormatASTC(PixelFormat format); diff --git a/src/video_core/texture_cache/format_lookup_table.cpp b/src/video_core/texture_cache/format_lookup_table.cpp new file mode 100644 index 000000000..271e67533 --- /dev/null +++ b/src/video_core/texture_cache/format_lookup_table.cpp @@ -0,0 +1,208 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <array> +#include "common/common_types.h" +#include "common/logging/log.h" +#include "video_core/texture_cache/format_lookup_table.h" + +namespace VideoCommon { + +using Tegra::Texture::ComponentType; +using Tegra::Texture::TextureFormat; +using VideoCore::Surface::PixelFormat; + +namespace { + +constexpr auto SNORM = ComponentType::SNORM; +constexpr auto UNORM = ComponentType::UNORM; +constexpr auto SINT = ComponentType::SINT; +constexpr auto UINT = ComponentType::UINT; +constexpr auto SNORM_FORCE_FP16 = ComponentType::SNORM_FORCE_FP16; +constexpr auto UNORM_FORCE_FP16 = ComponentType::UNORM_FORCE_FP16; +constexpr auto FLOAT = ComponentType::FLOAT; +constexpr bool C = false; // Normal color +constexpr bool S = true; // Srgb + +struct Table { + constexpr Table(TextureFormat texture_format, bool is_srgb, ComponentType red_component, + ComponentType green_component, ComponentType blue_component, + ComponentType alpha_component, PixelFormat pixel_format) + : texture_format{texture_format}, pixel_format{pixel_format}, red_component{red_component}, + green_component{green_component}, blue_component{blue_component}, + alpha_component{alpha_component}, is_srgb{is_srgb} {} + + TextureFormat texture_format; + PixelFormat pixel_format; + ComponentType red_component; + ComponentType green_component; + ComponentType blue_component; + ComponentType alpha_component; + bool is_srgb; +}; +constexpr std::array<Table, 74> DefinitionTable = {{ + {TextureFormat::A8R8G8B8, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::ABGR8U}, + {TextureFormat::A8R8G8B8, C, SNORM, SNORM, SNORM, SNORM, PixelFormat::ABGR8S}, + {TextureFormat::A8R8G8B8, C, UINT, UINT, UINT, UINT, PixelFormat::ABGR8UI}, + {TextureFormat::A8R8G8B8, S, UNORM, UNORM, UNORM, UNORM, PixelFormat::RGBA8_SRGB}, + + {TextureFormat::B5G6R5, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::B5G6R5U}, + + {TextureFormat::A2B10G10R10, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::A2B10G10R10U}, + + {TextureFormat::A1B5G5R5, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::A1B5G5R5U}, + + {TextureFormat::A4B4G4R4, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::R4G4B4A4U}, + + {TextureFormat::R8, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::R8U}, + {TextureFormat::R8, C, UINT, UINT, UINT, UINT, PixelFormat::R8UI}, + + {TextureFormat::G8R8, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::RG8U}, + {TextureFormat::G8R8, C, SNORM, SNORM, SNORM, SNORM, PixelFormat::RG8S}, + + {TextureFormat::R16_G16_B16_A16, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::RGBA16U}, + {TextureFormat::R16_G16_B16_A16, C, FLOAT, FLOAT, FLOAT, FLOAT, PixelFormat::RGBA16F}, + {TextureFormat::R16_G16_B16_A16, C, UINT, UINT, UINT, UINT, PixelFormat::RGBA16UI}, + + {TextureFormat::R16_G16, C, FLOAT, FLOAT, FLOAT, FLOAT, PixelFormat::RG16F}, + {TextureFormat::R16_G16, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::RG16}, + {TextureFormat::R16_G16, C, SNORM, SNORM, SNORM, SNORM, PixelFormat::RG16S}, + {TextureFormat::R16_G16, C, UINT, UINT, UINT, UINT, PixelFormat::RG16UI}, + {TextureFormat::R16_G16, C, SINT, SINT, SINT, SINT, PixelFormat::RG16I}, + + {TextureFormat::R16, C, FLOAT, FLOAT, FLOAT, FLOAT, PixelFormat::R16F}, + {TextureFormat::R16, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::R16U}, + {TextureFormat::R16, C, SNORM, SNORM, SNORM, SNORM, PixelFormat::R16S}, + {TextureFormat::R16, C, UINT, UINT, UINT, UINT, PixelFormat::R16UI}, + {TextureFormat::R16, C, SINT, SINT, SINT, SINT, PixelFormat::R16I}, + + {TextureFormat::BF10GF11RF11, C, FLOAT, FLOAT, FLOAT, FLOAT, PixelFormat::R11FG11FB10F}, + + {TextureFormat::R32_G32_B32_A32, C, FLOAT, FLOAT, FLOAT, FLOAT, PixelFormat::RGBA32F}, + {TextureFormat::R32_G32_B32_A32, C, UINT, UINT, UINT, UINT, PixelFormat::RGBA32UI}, + + {TextureFormat::R32_G32_B32, C, FLOAT, FLOAT, FLOAT, FLOAT, PixelFormat::RGB32F}, + + {TextureFormat::R32_G32, C, FLOAT, FLOAT, FLOAT, FLOAT, PixelFormat::RG32F}, + {TextureFormat::R32_G32, C, UINT, UINT, UINT, UINT, PixelFormat::RG32UI}, + + {TextureFormat::R32, C, FLOAT, FLOAT, FLOAT, FLOAT, PixelFormat::R32F}, + {TextureFormat::R32, C, UINT, UINT, UINT, UINT, PixelFormat::R32UI}, + + {TextureFormat::E5B9G9R9_SHAREDEXP, C, FLOAT, FLOAT, FLOAT, FLOAT, PixelFormat::E5B9G9R9F}, + + {TextureFormat::ZF32, C, FLOAT, FLOAT, FLOAT, FLOAT, PixelFormat::Z32F}, + {TextureFormat::Z16, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::Z16}, + {TextureFormat::S8Z24, C, UINT, UNORM, UNORM, UNORM, PixelFormat::S8Z24}, + {TextureFormat::ZF32_X24S8, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::Z32FS8}, + + {TextureFormat::DXT1, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::DXT1}, + {TextureFormat::DXT1, S, UNORM, UNORM, UNORM, UNORM, PixelFormat::DXT1_SRGB}, + + {TextureFormat::DXT23, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::DXT23}, + {TextureFormat::DXT23, S, UNORM, UNORM, UNORM, UNORM, PixelFormat::DXT23_SRGB}, + + {TextureFormat::DXT45, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::DXT45}, + {TextureFormat::DXT45, S, UNORM, UNORM, UNORM, UNORM, PixelFormat::DXT45_SRGB}, + + // TODO: Use a different pixel format for SNORM + {TextureFormat::DXN1, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::DXN1}, + {TextureFormat::DXN1, C, SNORM, SNORM, SNORM, SNORM, PixelFormat::DXN1}, + + {TextureFormat::DXN2, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::DXN2UNORM}, + {TextureFormat::DXN2, C, SNORM, SNORM, SNORM, SNORM, PixelFormat::DXN2SNORM}, + + {TextureFormat::BC7U, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::BC7U}, + {TextureFormat::BC7U, S, UNORM, UNORM, UNORM, UNORM, PixelFormat::BC7U_SRGB}, + + {TextureFormat::BC6H_SF16, C, FLOAT, FLOAT, FLOAT, FLOAT, PixelFormat::BC6H_SF16}, + {TextureFormat::BC6H_UF16, C, FLOAT, FLOAT, FLOAT, FLOAT, PixelFormat::BC6H_UF16}, + + {TextureFormat::ASTC_2D_4X4, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::ASTC_2D_4X4}, + {TextureFormat::ASTC_2D_4X4, S, UNORM, UNORM, UNORM, UNORM, PixelFormat::ASTC_2D_4X4_SRGB}, + + {TextureFormat::ASTC_2D_5X4, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::ASTC_2D_5X4}, + {TextureFormat::ASTC_2D_5X4, S, UNORM, UNORM, UNORM, UNORM, PixelFormat::ASTC_2D_5X4_SRGB}, + + {TextureFormat::ASTC_2D_5X5, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::ASTC_2D_5X5}, + {TextureFormat::ASTC_2D_5X5, S, UNORM, UNORM, UNORM, UNORM, PixelFormat::ASTC_2D_5X5_SRGB}, + + {TextureFormat::ASTC_2D_8X8, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::ASTC_2D_8X8}, + {TextureFormat::ASTC_2D_8X8, S, UNORM, UNORM, UNORM, UNORM, PixelFormat::ASTC_2D_8X8_SRGB}, + + {TextureFormat::ASTC_2D_8X5, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::ASTC_2D_8X5}, + {TextureFormat::ASTC_2D_8X5, S, UNORM, UNORM, UNORM, UNORM, PixelFormat::ASTC_2D_8X5_SRGB}, + + {TextureFormat::ASTC_2D_10X8, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::ASTC_2D_10X8}, + {TextureFormat::ASTC_2D_10X8, S, UNORM, UNORM, UNORM, UNORM, PixelFormat::ASTC_2D_10X8_SRGB}, + + {TextureFormat::ASTC_2D_6X6, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::ASTC_2D_6X6}, + {TextureFormat::ASTC_2D_6X6, S, UNORM, UNORM, UNORM, UNORM, PixelFormat::ASTC_2D_6X6_SRGB}, + + {TextureFormat::ASTC_2D_10X10, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::ASTC_2D_10X10}, + {TextureFormat::ASTC_2D_10X10, S, UNORM, UNORM, UNORM, UNORM, PixelFormat::ASTC_2D_10X10_SRGB}, + + {TextureFormat::ASTC_2D_12X12, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::ASTC_2D_12X12}, + {TextureFormat::ASTC_2D_12X12, S, UNORM, UNORM, UNORM, UNORM, PixelFormat::ASTC_2D_12X12_SRGB}, + + {TextureFormat::ASTC_2D_8X6, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::ASTC_2D_8X6}, + {TextureFormat::ASTC_2D_8X6, S, UNORM, UNORM, UNORM, UNORM, PixelFormat::ASTC_2D_8X6_SRGB}, + + {TextureFormat::ASTC_2D_6X5, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::ASTC_2D_6X5}, + {TextureFormat::ASTC_2D_6X5, S, UNORM, UNORM, UNORM, UNORM, PixelFormat::ASTC_2D_6X5_SRGB}, +}}; + +} // Anonymous namespace + +FormatLookupTable::FormatLookupTable() { + table.fill(static_cast<u8>(PixelFormat::Invalid)); + + for (const auto& entry : DefinitionTable) { + table[CalculateIndex(entry.texture_format, entry.is_srgb != 0, entry.red_component, + entry.green_component, entry.blue_component, entry.alpha_component)] = + static_cast<u8>(entry.pixel_format); + } +} + +PixelFormat FormatLookupTable::GetPixelFormat(TextureFormat format, bool is_srgb, + ComponentType red_component, + ComponentType green_component, + ComponentType blue_component, + ComponentType alpha_component) const noexcept { + const auto pixel_format = static_cast<PixelFormat>(table[CalculateIndex( + format, is_srgb, red_component, green_component, blue_component, alpha_component)]); + // [[likely]] + if (pixel_format != PixelFormat::Invalid) { + return pixel_format; + } + UNIMPLEMENTED_MSG("texture format={} srgb={} components={{{} {} {} {}}}", + static_cast<int>(format), is_srgb, static_cast<int>(red_component), + static_cast<int>(green_component), static_cast<int>(blue_component), + static_cast<int>(alpha_component)); + return PixelFormat::ABGR8U; +} + +void FormatLookupTable::Set(TextureFormat format, bool is_srgb, ComponentType red_component, + ComponentType green_component, ComponentType blue_component, + ComponentType alpha_component, PixelFormat pixel_format) {} + +std::size_t FormatLookupTable::CalculateIndex(TextureFormat format, bool is_srgb, + ComponentType red_component, + ComponentType green_component, + ComponentType blue_component, + ComponentType alpha_component) noexcept { + const auto format_index = static_cast<std::size_t>(format); + const auto red_index = static_cast<std::size_t>(red_component); + const auto green_index = static_cast<std::size_t>(red_component); + const auto blue_index = static_cast<std::size_t>(red_component); + const auto alpha_index = static_cast<std::size_t>(red_component); + const std::size_t srgb_index = is_srgb ? 1 : 0; + + return format_index * PerFormat + + srgb_index * PerComponent * PerComponent * PerComponent * PerComponent + + alpha_index * PerComponent * PerComponent * PerComponent + + blue_index * PerComponent * PerComponent + green_index * PerComponent + red_index; +} + +} // namespace VideoCommon diff --git a/src/video_core/texture_cache/format_lookup_table.h b/src/video_core/texture_cache/format_lookup_table.h new file mode 100644 index 000000000..aa77e0a5a --- /dev/null +++ b/src/video_core/texture_cache/format_lookup_table.h @@ -0,0 +1,51 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <array> +#include <limits> +#include "video_core/surface.h" +#include "video_core/textures/texture.h" + +namespace VideoCommon { + +class FormatLookupTable { +public: + explicit FormatLookupTable(); + + VideoCore::Surface::PixelFormat GetPixelFormat( + Tegra::Texture::TextureFormat format, bool is_srgb, + Tegra::Texture::ComponentType red_component, Tegra::Texture::ComponentType green_component, + Tegra::Texture::ComponentType blue_component, + Tegra::Texture::ComponentType alpha_component) const noexcept; + +private: + static_assert(VideoCore::Surface::MaxPixelFormat <= std::numeric_limits<u8>::max()); + + static constexpr std::size_t NumTextureFormats = 128; + + static constexpr std::size_t PerComponent = 8; + static constexpr std::size_t PerComponents2 = PerComponent * PerComponent; + static constexpr std::size_t PerComponents3 = PerComponents2 * PerComponent; + static constexpr std::size_t PerComponents4 = PerComponents3 * PerComponent; + static constexpr std::size_t PerFormat = PerComponents4 * 2; + + static std::size_t CalculateIndex(Tegra::Texture::TextureFormat format, bool is_srgb, + Tegra::Texture::ComponentType red_component, + Tegra::Texture::ComponentType green_component, + Tegra::Texture::ComponentType blue_component, + Tegra::Texture::ComponentType alpha_component) noexcept; + + void Set(Tegra::Texture::TextureFormat format, bool is_srgb, + Tegra::Texture::ComponentType red_component, + Tegra::Texture::ComponentType green_component, + Tegra::Texture::ComponentType blue_component, + Tegra::Texture::ComponentType alpha_component, + VideoCore::Surface::PixelFormat pixel_format); + + std::array<u8, NumTextureFormats * PerFormat> table; +}; + +} // namespace VideoCommon diff --git a/src/video_core/texture_cache/surface_base.h b/src/video_core/texture_cache/surface_base.h index 1bed82898..5f79bb0aa 100644 --- a/src/video_core/texture_cache/surface_base.h +++ b/src/video_core/texture_cache/surface_base.h @@ -254,16 +254,14 @@ public: if (!layer_mipmap) { return {}; } - const u32 end_layer{layer_mipmap->first}; - const u32 end_mipmap{layer_mipmap->second}; + const auto [end_layer, end_mipmap] = *layer_mipmap; if (layer != end_layer) { if (mipmap == 0 && end_mipmap == 0) { - return GetView(ViewParams(view_params.target, layer, end_layer - layer + 1, 0, 1)); + return GetView(ViewParams(view_params.target, layer, end_layer - layer, 0, 1)); } return {}; } else { - return GetView( - ViewParams(view_params.target, layer, 1, mipmap, end_mipmap - mipmap + 1)); + return GetView(ViewParams(view_params.target, layer, 1, mipmap, end_mipmap - mipmap)); } } @@ -278,8 +276,7 @@ public: if (!layer_mipmap) { return {}; } - const u32 layer{layer_mipmap->first}; - const u32 mipmap{layer_mipmap->second}; + const auto [layer, mipmap] = *layer_mipmap; if (GetMipmapSize(mipmap) != candidate_size) { return EmplaceIrregularView(view_params, view_addr, candidate_size, mipmap, layer); } diff --git a/src/video_core/texture_cache/surface_params.cpp b/src/video_core/texture_cache/surface_params.cpp index 1e4d3fb79..38b3a4ba8 100644 --- a/src/video_core/texture_cache/surface_params.cpp +++ b/src/video_core/texture_cache/surface_params.cpp @@ -2,24 +2,23 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. -#include <map> +#include <algorithm> +#include <string> +#include <tuple> #include "common/alignment.h" #include "common/bit_util.h" #include "core/core.h" #include "video_core/engines/shader_bytecode.h" #include "video_core/surface.h" +#include "video_core/texture_cache/format_lookup_table.h" #include "video_core/texture_cache/surface_params.h" namespace VideoCommon { -using VideoCore::Surface::ComponentTypeFromDepthFormat; -using VideoCore::Surface::ComponentTypeFromRenderTarget; -using VideoCore::Surface::ComponentTypeFromTexture; using VideoCore::Surface::PixelFormat; using VideoCore::Surface::PixelFormatFromDepthFormat; using VideoCore::Surface::PixelFormatFromRenderTargetFormat; -using VideoCore::Surface::PixelFormatFromTextureFormat; using VideoCore::Surface::SurfaceTarget; using VideoCore::Surface::SurfaceTargetFromTextureType; using VideoCore::Surface::SurfaceType; @@ -69,7 +68,8 @@ constexpr u32 GetMipmapSize(bool uncompressed, u32 mip_size, u32 tile) { } // Anonymous namespace -SurfaceParams SurfaceParams::CreateForTexture(const Tegra::Texture::TICEntry& tic, +SurfaceParams SurfaceParams::CreateForTexture(const FormatLookupTable& lookup_table, + const Tegra::Texture::TICEntry& tic, const VideoCommon::Shader::Sampler& entry) { SurfaceParams params; params.is_tiled = tic.IsTiled(); @@ -78,8 +78,8 @@ SurfaceParams SurfaceParams::CreateForTexture(const Tegra::Texture::TICEntry& ti params.block_height = params.is_tiled ? tic.BlockHeight() : 0, params.block_depth = params.is_tiled ? tic.BlockDepth() : 0, params.tile_width_spacing = params.is_tiled ? (1 << tic.tile_width_spacing.Value()) : 1; - params.pixel_format = - PixelFormatFromTextureFormat(tic.format, tic.r_type.Value(), params.srgb_conversion); + params.pixel_format = lookup_table.GetPixelFormat( + tic.format, params.srgb_conversion, tic.r_type, tic.g_type, tic.b_type, tic.a_type); params.type = GetFormatType(params.pixel_format); if (entry.IsShadow() && params.type == SurfaceType::ColorTexture) { switch (params.pixel_format) { @@ -99,7 +99,6 @@ SurfaceParams SurfaceParams::CreateForTexture(const Tegra::Texture::TICEntry& ti } params.type = GetFormatType(params.pixel_format); } - params.component_type = ComponentTypeFromTexture(tic.r_type.Value()); params.type = GetFormatType(params.pixel_format); // TODO: on 1DBuffer we should use the tic info. if (tic.IsBuffer()) { @@ -128,7 +127,8 @@ SurfaceParams SurfaceParams::CreateForTexture(const Tegra::Texture::TICEntry& ti return params; } -SurfaceParams SurfaceParams::CreateForImage(const Tegra::Texture::TICEntry& tic, +SurfaceParams SurfaceParams::CreateForImage(const FormatLookupTable& lookup_table, + const Tegra::Texture::TICEntry& tic, const VideoCommon::Shader::Image& entry) { SurfaceParams params; params.is_tiled = tic.IsTiled(); @@ -137,10 +137,9 @@ SurfaceParams SurfaceParams::CreateForImage(const Tegra::Texture::TICEntry& tic, params.block_height = params.is_tiled ? tic.BlockHeight() : 0, params.block_depth = params.is_tiled ? tic.BlockDepth() : 0, params.tile_width_spacing = params.is_tiled ? (1 << tic.tile_width_spacing.Value()) : 1; - params.pixel_format = - PixelFormatFromTextureFormat(tic.format, tic.r_type.Value(), params.srgb_conversion); + params.pixel_format = lookup_table.GetPixelFormat( + tic.format, params.srgb_conversion, tic.r_type, tic.g_type, tic.b_type, tic.a_type); params.type = GetFormatType(params.pixel_format); - params.component_type = ComponentTypeFromTexture(tic.r_type.Value()); params.type = GetFormatType(params.pixel_format); params.target = ImageTypeToSurfaceTarget(entry.GetType()); // TODO: on 1DBuffer we should use the tic info. @@ -181,7 +180,6 @@ SurfaceParams SurfaceParams::CreateForDepthBuffer( params.block_depth = std::min(block_depth, 5U); params.tile_width_spacing = 1; params.pixel_format = PixelFormatFromDepthFormat(format); - params.component_type = ComponentTypeFromDepthFormat(format); params.type = GetFormatType(params.pixel_format); params.width = zeta_width; params.height = zeta_height; @@ -206,7 +204,6 @@ SurfaceParams SurfaceParams::CreateForFramebuffer(Core::System& system, std::siz params.block_depth = config.memory_layout.block_depth; params.tile_width_spacing = 1; params.pixel_format = PixelFormatFromRenderTargetFormat(config.format); - params.component_type = ComponentTypeFromRenderTarget(config.format); params.type = GetFormatType(params.pixel_format); if (params.is_tiled) { params.pitch = 0; @@ -236,7 +233,6 @@ SurfaceParams SurfaceParams::CreateForFermiCopySurface( params.block_depth = params.is_tiled ? std::min(config.BlockDepth(), 5U) : 0, params.tile_width_spacing = 1; params.pixel_format = PixelFormatFromRenderTargetFormat(config.format); - params.component_type = ComponentTypeFromRenderTarget(config.format); params.type = GetFormatType(params.pixel_format); params.width = config.width; params.height = config.height; @@ -250,6 +246,16 @@ SurfaceParams SurfaceParams::CreateForFermiCopySurface( return params; } +VideoCore::Surface::SurfaceTarget SurfaceParams::ExpectedTarget( + const VideoCommon::Shader::Sampler& entry) { + return TextureTypeToSurfaceTarget(entry.GetType(), entry.IsArray()); +} + +VideoCore::Surface::SurfaceTarget SurfaceParams::ExpectedTarget( + const VideoCommon::Shader::Image& entry) { + return ImageTypeToSurfaceTarget(entry.GetType()); +} + bool SurfaceParams::IsLayered() const { switch (target) { case SurfaceTarget::Texture1DArray: @@ -355,10 +361,10 @@ std::size_t SurfaceParams::GetInnerMipmapMemorySize(u32 level, bool as_host_size bool SurfaceParams::operator==(const SurfaceParams& rhs) const { return std::tie(is_tiled, block_width, block_height, block_depth, tile_width_spacing, width, - height, depth, pitch, num_levels, pixel_format, component_type, type, target) == + height, depth, pitch, num_levels, pixel_format, type, target) == std::tie(rhs.is_tiled, rhs.block_width, rhs.block_height, rhs.block_depth, rhs.tile_width_spacing, rhs.width, rhs.height, rhs.depth, rhs.pitch, - rhs.num_levels, rhs.pixel_format, rhs.component_type, rhs.type, rhs.target); + rhs.num_levels, rhs.pixel_format, rhs.type, rhs.target); } std::string SurfaceParams::TargetName() const { @@ -386,4 +392,42 @@ std::string SurfaceParams::TargetName() const { } } +u32 SurfaceParams::GetBlockSize() const { + const u32 x = 64U << block_width; + const u32 y = 8U << block_height; + const u32 z = 1U << block_depth; + return x * y * z; +} + +std::pair<u32, u32> SurfaceParams::GetBlockXY() const { + const u32 x_pixels = 64U / GetBytesPerPixel(); + const u32 x = x_pixels << block_width; + const u32 y = 8U << block_height; + return {x, y}; +} + +std::tuple<u32, u32, u32> SurfaceParams::GetBlockOffsetXYZ(u32 offset) const { + const auto div_ceil = [](const u32 x, const u32 y) { return ((x + y - 1) / y); }; + const u32 block_size = GetBlockSize(); + const u32 block_index = offset / block_size; + const u32 gob_offset = offset % block_size; + const u32 gob_index = gob_offset / static_cast<u32>(Tegra::Texture::GetGOBSize()); + const u32 x_gob_pixels = 64U / GetBytesPerPixel(); + const u32 x_block_pixels = x_gob_pixels << block_width; + const u32 y_block_pixels = 8U << block_height; + const u32 z_block_pixels = 1U << block_depth; + const u32 x_blocks = div_ceil(width, x_block_pixels); + const u32 y_blocks = div_ceil(height, y_block_pixels); + const u32 z_blocks = div_ceil(depth, z_block_pixels); + const u32 base_x = block_index % x_blocks; + const u32 base_y = (block_index / x_blocks) % y_blocks; + const u32 base_z = (block_index / (x_blocks * y_blocks)) % z_blocks; + u32 x = base_x * x_block_pixels; + u32 y = base_y * y_block_pixels; + u32 z = base_z * z_block_pixels; + z += gob_index >> block_height; + y += (gob_index * 8U) % y_block_pixels; + return {x, y, z}; +} + } // namespace VideoCommon diff --git a/src/video_core/texture_cache/surface_params.h b/src/video_core/texture_cache/surface_params.h index c58e7f8a4..992b5c022 100644 --- a/src/video_core/texture_cache/surface_params.h +++ b/src/video_core/texture_cache/surface_params.h @@ -4,6 +4,8 @@ #pragma once +#include <utility> + #include "common/alignment.h" #include "common/bit_util.h" #include "common/cityhash.h" @@ -16,16 +18,20 @@ namespace VideoCommon { +class FormatLookupTable; + using VideoCore::Surface::SurfaceCompression; class SurfaceParams { public: /// Creates SurfaceCachedParams from a texture configuration. - static SurfaceParams CreateForTexture(const Tegra::Texture::TICEntry& tic, + static SurfaceParams CreateForTexture(const FormatLookupTable& lookup_table, + const Tegra::Texture::TICEntry& tic, const VideoCommon::Shader::Sampler& entry); /// Creates SurfaceCachedParams from an image configuration. - static SurfaceParams CreateForImage(const Tegra::Texture::TICEntry& tic, + static SurfaceParams CreateForImage(const FormatLookupTable& lookup_table, + const Tegra::Texture::TICEntry& tic, const VideoCommon::Shader::Image& entry); /// Creates SurfaceCachedParams for a depth buffer configuration. @@ -41,6 +47,14 @@ public: static SurfaceParams CreateForFermiCopySurface( const Tegra::Engines::Fermi2D::Regs::Surface& config); + /// Obtains the texture target from a shader's sampler entry. + static VideoCore::Surface::SurfaceTarget ExpectedTarget( + const VideoCommon::Shader::Sampler& entry); + + /// Obtains the texture target from a shader's sampler entry. + static VideoCore::Surface::SurfaceTarget ExpectedTarget( + const VideoCommon::Shader::Image& entry); + std::size_t Hash() const { return static_cast<std::size_t>( Common::CityHash64(reinterpret_cast<const char*>(this), sizeof(*this))); @@ -124,6 +138,15 @@ public: std::size_t GetConvertedMipmapSize(u32 level) const; + /// Get this texture Tegra Block size in guest memory layout + u32 GetBlockSize() const; + + /// Get X, Y coordinates max sizes of a single block. + std::pair<u32, u32> GetBlockXY() const; + + /// Get the offset in x, y, z coordinates from a memory offset + std::tuple<u32, u32, u32> GetBlockOffsetXYZ(u32 offset) const; + /// Returns the size of a layer in bytes in guest memory. std::size_t GetGuestLayerSize() const { return GetLayerSize(false, false); @@ -248,7 +271,6 @@ public: u32 num_levels; u32 emulated_levels; VideoCore::Surface::PixelFormat pixel_format; - VideoCore::Surface::ComponentType component_type; VideoCore::Surface::SurfaceType type; VideoCore::Surface::SurfaceTarget target; @@ -258,7 +280,8 @@ private: /// Returns the size of all mipmap levels and aligns as needed. std::size_t GetInnerMemorySize(bool as_host_size, bool layer_only, bool uncompressed) const { - return GetLayerSize(as_host_size, uncompressed) * (layer_only ? 1U : depth); + return GetLayerSize(as_host_size, uncompressed) * + (layer_only ? 1U : (is_layered ? depth : 1U)); } /// Returns the size of a layer diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index 6a92b22d3..f4c015635 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h @@ -29,6 +29,7 @@ #include "video_core/rasterizer_interface.h" #include "video_core/surface.h" #include "video_core/texture_cache/copy_params.h" +#include "video_core/texture_cache/format_lookup_table.h" #include "video_core/texture_cache/surface_base.h" #include "video_core/texture_cache/surface_params.h" #include "video_core/texture_cache/surface_view.h" @@ -94,10 +95,16 @@ public: std::lock_guard lock{mutex}; const auto gpu_addr{tic.Address()}; if (!gpu_addr) { - return {}; + return GetNullSurface(SurfaceParams::ExpectedTarget(entry)); } - const auto params{SurfaceParams::CreateForTexture(tic, entry)}; - const auto [surface, view] = GetSurface(gpu_addr, params, true, false); + + const auto host_ptr{system.GPU().MemoryManager().GetPointer(gpu_addr)}; + const auto cache_addr{ToCacheAddr(host_ptr)}; + if (!cache_addr) { + return GetNullSurface(SurfaceParams::ExpectedTarget(entry)); + } + const auto params{SurfaceParams::CreateForTexture(format_lookup_table, tic, entry)}; + const auto [surface, view] = GetSurface(gpu_addr, cache_addr, params, true, false); if (guard_samplers) { sampled_textures.push_back(surface); } @@ -109,10 +116,15 @@ public: std::lock_guard lock{mutex}; const auto gpu_addr{tic.Address()}; if (!gpu_addr) { - return {}; + return GetNullSurface(SurfaceParams::ExpectedTarget(entry)); + } + const auto host_ptr{system.GPU().MemoryManager().GetPointer(gpu_addr)}; + const auto cache_addr{ToCacheAddr(host_ptr)}; + if (!cache_addr) { + return GetNullSurface(SurfaceParams::ExpectedTarget(entry)); } - const auto params{SurfaceParams::CreateForImage(tic, entry)}; - const auto [surface, view] = GetSurface(gpu_addr, params, true, false); + const auto params{SurfaceParams::CreateForImage(format_lookup_table, tic, entry)}; + const auto [surface, view] = GetSurface(gpu_addr, cache_addr, params, true, false); if (guard_samplers) { sampled_textures.push_back(surface); } @@ -142,11 +154,17 @@ public: SetEmptyDepthBuffer(); return {}; } + const auto host_ptr{system.GPU().MemoryManager().GetPointer(gpu_addr)}; + const auto cache_addr{ToCacheAddr(host_ptr)}; + if (!cache_addr) { + SetEmptyDepthBuffer(); + return {}; + } const auto depth_params{SurfaceParams::CreateForDepthBuffer( system, regs.zeta_width, regs.zeta_height, regs.zeta.format, regs.zeta.memory_layout.block_width, regs.zeta.memory_layout.block_height, regs.zeta.memory_layout.block_depth, regs.zeta.memory_layout.type)}; - auto surface_view = GetSurface(gpu_addr, depth_params, preserve_contents, true); + auto surface_view = GetSurface(gpu_addr, cache_addr, depth_params, preserve_contents, true); if (depth_buffer.target) depth_buffer.target->MarkAsRenderTarget(false, NO_RT); depth_buffer.target = surface_view.first; @@ -179,8 +197,16 @@ public: return {}; } - auto surface_view = GetSurface(gpu_addr, SurfaceParams::CreateForFramebuffer(system, index), - preserve_contents, true); + const auto host_ptr{system.GPU().MemoryManager().GetPointer(gpu_addr)}; + const auto cache_addr{ToCacheAddr(host_ptr)}; + if (!cache_addr) { + SetEmptyColorBuffer(index); + return {}; + } + + auto surface_view = + GetSurface(gpu_addr, cache_addr, SurfaceParams::CreateForFramebuffer(system, index), + preserve_contents, true); if (render_targets[index].target) render_targets[index].target->MarkAsRenderTarget(false, NO_RT); render_targets[index].target = surface_view.first; @@ -229,8 +255,14 @@ public: const GPUVAddr src_gpu_addr = src_config.Address(); const GPUVAddr dst_gpu_addr = dst_config.Address(); DeduceBestBlit(src_params, dst_params, src_gpu_addr, dst_gpu_addr); - std::pair<TSurface, TView> dst_surface = GetSurface(dst_gpu_addr, dst_params, true, false); - std::pair<TSurface, TView> src_surface = GetSurface(src_gpu_addr, src_params, true, false); + const auto dst_host_ptr{system.GPU().MemoryManager().GetPointer(dst_gpu_addr)}; + const auto dst_cache_addr{ToCacheAddr(dst_host_ptr)}; + const auto src_host_ptr{system.GPU().MemoryManager().GetPointer(src_gpu_addr)}; + const auto src_cache_addr{ToCacheAddr(src_host_ptr)}; + std::pair<TSurface, TView> dst_surface = + GetSurface(dst_gpu_addr, dst_cache_addr, dst_params, true, false); + std::pair<TSurface, TView> src_surface = + GetSurface(src_gpu_addr, src_cache_addr, src_params, true, false); ImageBlit(src_surface.second, dst_surface.second, copy_config); dst_surface.first->MarkAsModified(true, Tick()); } @@ -346,13 +378,6 @@ protected: return new_surface; } - std::pair<TSurface, TView> GetFermiSurface( - const Tegra::Engines::Fermi2D::Regs::Surface& config) { - SurfaceParams params = SurfaceParams::CreateForFermiCopySurface(config); - const GPUVAddr gpu_addr = config.Address(); - return GetSurface(gpu_addr, params, true, false); - } - Core::System& system; private: @@ -485,15 +510,13 @@ private: GetSiblingFormat(cr_params.pixel_format) == params.pixel_format) { SurfaceParams new_params = params; new_params.pixel_format = cr_params.pixel_format; - new_params.component_type = cr_params.component_type; new_params.type = cr_params.type; new_surface = GetUncachedSurface(gpu_addr, new_params); } else { new_surface = GetUncachedSurface(gpu_addr, params); } const auto& final_params = new_surface->GetSurfaceParams(); - if (cr_params.type != final_params.type || - (cr_params.component_type != final_params.component_type)) { + if (cr_params.type != final_params.type) { BufferCopy(current_surface, new_surface); } else { std::vector<CopyParams> bricks = current_surface->BreakDown(final_params); @@ -593,6 +616,86 @@ private: } /** + * Takes care of managing 3D textures and its slices. Does HLE methods for reconstructing the 3D + * textures within the GPU if possible. Falls back to LLE when it isn't possible to use any of + * the HLE methods. + * + * @param overlaps The overlapping surfaces registered in the cache. + * @param params The parameters on the new surface. + * @param gpu_addr The starting address of the new surface. + * @param cache_addr The starting address of the new surface on physical memory. + * @param preserve_contents Indicates that the new surface should be loaded from memory or + * left blank. + */ + std::optional<std::pair<TSurface, TView>> Manage3DSurfaces(std::vector<TSurface>& overlaps, + const SurfaceParams& params, + const GPUVAddr gpu_addr, + const CacheAddr cache_addr, + bool preserve_contents) { + if (params.target == SurfaceTarget::Texture3D) { + bool failed = false; + if (params.num_levels > 1) { + // We can't handle mipmaps in 3D textures yet, better fallback to LLE approach + return std::nullopt; + } + TSurface new_surface = GetUncachedSurface(gpu_addr, params); + bool modified = false; + for (auto& surface : overlaps) { + const SurfaceParams& src_params = surface->GetSurfaceParams(); + if (src_params.target != SurfaceTarget::Texture2D) { + failed = true; + break; + } + if (src_params.height != params.height) { + failed = true; + break; + } + if (src_params.block_depth != params.block_depth || + src_params.block_height != params.block_height) { + failed = true; + break; + } + const u32 offset = static_cast<u32>(surface->GetCacheAddr() - cache_addr); + const auto [x, y, z] = params.GetBlockOffsetXYZ(offset); + modified |= surface->IsModified(); + const CopyParams copy_params(0, 0, 0, 0, 0, z, 0, 0, params.width, params.height, + 1); + ImageCopy(surface, new_surface, copy_params); + } + if (failed) { + return std::nullopt; + } + for (const auto& surface : overlaps) { + Unregister(surface); + } + new_surface->MarkAsModified(modified, Tick()); + Register(new_surface); + auto view = new_surface->GetMainView(); + return {{std::move(new_surface), view}}; + } else { + for (const auto& surface : overlaps) { + if (!surface->MatchTarget(params.target)) { + if (overlaps.size() == 1 && surface->GetCacheAddr() == cache_addr) { + if (Settings::values.use_accurate_gpu_emulation) { + return std::nullopt; + } + Unregister(surface); + return InitializeSurface(gpu_addr, params, preserve_contents); + } + return std::nullopt; + } + if (surface->GetCacheAddr() != cache_addr) { + continue; + } + if (surface->MatchesStructure(params) == MatchStructureResult::FullMatch) { + return {{surface, surface->GetMainView()}}; + } + } + return InitializeSurface(gpu_addr, params, preserve_contents); + } + } + + /** * Gets the starting address and parameters of a candidate surface and tries * to find a matching surface within the cache. This is done in 3 big steps: * @@ -615,22 +718,9 @@ private: * left blank. * @param is_render Whether or not the surface is a render target. **/ - std::pair<TSurface, TView> GetSurface(const GPUVAddr gpu_addr, const SurfaceParams& params, - bool preserve_contents, bool is_render) { - const auto host_ptr{system.GPU().MemoryManager().GetPointer(gpu_addr)}; - const auto cache_addr{ToCacheAddr(host_ptr)}; - - // Step 0: guarantee a valid surface - if (!cache_addr) { - // Return a null surface if it's invalid - SurfaceParams new_params = params; - new_params.width = 1; - new_params.height = 1; - new_params.depth = 1; - new_params.block_height = 0; - new_params.block_depth = 0; - return InitializeSurface(gpu_addr, new_params, false); - } + std::pair<TSurface, TView> GetSurface(const GPUVAddr gpu_addr, const CacheAddr cache_addr, + const SurfaceParams& params, bool preserve_contents, + bool is_render) { // Step 1 // Check Level 1 Cache for a fast structural match. If candidate surface @@ -677,6 +767,15 @@ private: } } + // Check if it's a 3D texture + if (params.block_depth > 0) { + auto surface = + Manage3DSurfaces(overlaps, params, gpu_addr, cache_addr, preserve_contents); + if (surface) { + return *surface; + } + } + // Split cases between 1 overlap or many. if (overlaps.size() == 1) { TSurface current_surface = overlaps[0]; @@ -795,6 +894,41 @@ private: } /** + * Gets a null surface based on a target texture. + * @param target The target of the null surface. + */ + TView GetNullSurface(SurfaceTarget target) { + const u32 i_target = static_cast<u32>(target); + if (const auto it = invalid_cache.find(i_target); it != invalid_cache.end()) { + return it->second->GetMainView(); + } + SurfaceParams params{}; + params.target = target; + params.is_tiled = false; + params.srgb_conversion = false; + params.is_layered = false; + params.block_width = 0; + params.block_height = 0; + params.block_depth = 0; + params.tile_width_spacing = 1; + params.width = 1; + params.height = 1; + params.depth = 1; + params.pitch = 4; + params.num_levels = 1; + params.emulated_levels = 1; + params.pixel_format = VideoCore::Surface::PixelFormat::RGBA16F; + params.type = VideoCore::Surface::SurfaceType::ColorTexture; + auto surface = CreateSurface(0ULL, params); + invalid_memory.clear(); + invalid_memory.resize(surface->GetHostSizeInBytes(), 0U); + surface->UploadTexture(invalid_memory); + surface->MarkAsModified(false, Tick()); + invalid_cache.emplace(i_target, surface); + return surface->GetMainView(); + } + + /** * Gets the a source and destination starting address and parameters, * and tries to deduce if they are supposed to be depth textures. If so, their * parameters are modified and fixed into so. @@ -835,12 +969,11 @@ private: } } - const auto inherit_format = ([](SurfaceParams& to, TSurface from) { + const auto inherit_format = [](SurfaceParams& to, TSurface from) { const SurfaceParams& params = from->GetSurfaceParams(); to.pixel_format = params.pixel_format; - to.component_type = params.component_type; to.type = params.type; - }); + }; // Now we got the cases where one or both is Depth and the other is not known if (!incomplete_src) { inherit_format(src_params, deduced_src.surface); @@ -956,6 +1089,8 @@ private: VideoCore::RasterizerInterface& rasterizer; + FormatLookupTable format_lookup_table; + u64 ticks{}; // Guards the cache for protection conflicts. @@ -991,6 +1126,11 @@ private: std::vector<TSurface> sampled_textures; + /// This cache stores null surfaces in order to be used as a placeholder + /// for invalid texture calls. + std::unordered_map<u32, TSurface> invalid_cache; + std::vector<u8> invalid_memory; + StagingCache staging_cache; std::recursive_mutex mutex; }; diff --git a/src/video_core/textures/astc.cpp b/src/video_core/textures/astc.cpp index 58b608a36..33bd31865 100644 --- a/src/video_core/textures/astc.cpp +++ b/src/video_core/textures/astc.cpp @@ -92,11 +92,11 @@ private: const unsigned int mask = 1 << m_NextBit++; // clear the bit - *m_CurByte &= ~mask; + *m_CurByte &= static_cast<unsigned char>(~mask); // Write the bit, if necessary if (b) - *m_CurByte |= mask; + *m_CurByte |= static_cast<unsigned char>(mask); // Next byte? if (m_NextBit >= 8) { @@ -137,7 +137,7 @@ public: } uint64_t mask = (1 << (end - start + 1)) - 1; - return (m_Bits >> start) & mask; + return (m_Bits >> start) & static_cast<IntType>(mask); } private: @@ -656,7 +656,7 @@ static IntType Replicate(const IntType& val, uint32_t numBits, uint32_t toBit) { return 0; if (toBit == 0) return 0; - IntType v = val & ((1 << numBits) - 1); + IntType v = val & static_cast<IntType>((1 << numBits) - 1); IntType res = v; uint32_t reslen = numBits; while (reslen < toBit) { @@ -666,8 +666,8 @@ static IntType Replicate(const IntType& val, uint32_t numBits, uint32_t toBit) { comp = numBits - newshift; numBits = newshift; } - res <<= numBits; - res |= v >> comp; + res = static_cast<IntType>(res << numBits); + res = static_cast<IntType>(res | (v >> comp)); reslen += numBits; } return res; @@ -714,7 +714,7 @@ public: // Do nothing return val; } else if (oldDepth == 0 && newDepth != 0) { - return (1 << newDepth) - 1; + return static_cast<ChannelType>((1 << newDepth) - 1); } else if (newDepth > oldDepth) { return Replicate(val, oldDepth, newDepth); } else { @@ -722,10 +722,11 @@ public: if (newDepth == 0) { return 0xFF; } else { - uint8_t bitsWasted = oldDepth - newDepth; + uint8_t bitsWasted = static_cast<uint8_t>(oldDepth - newDepth); uint16_t v = static_cast<uint16_t>(val); - v = (v + (1 << (bitsWasted - 1))) >> bitsWasted; - v = ::std::min<uint16_t>(::std::max<uint16_t>(0, v), (1 << newDepth) - 1); + v = static_cast<uint16_t>((v + (1 << (bitsWasted - 1))) >> bitsWasted); + v = ::std::min<uint16_t>(::std::max<uint16_t>(0, v), + static_cast<uint16_t>((1 << newDepth) - 1)); return static_cast<uint8_t>(v); } } @@ -1191,18 +1192,18 @@ static uint32_t SelectPartition(int32_t seed, int32_t x, int32_t y, int32_t z, uint8_t seed11 = static_cast<uint8_t>((rnum >> 26) & 0xF); uint8_t seed12 = static_cast<uint8_t>(((rnum >> 30) | (rnum << 2)) & 0xF); - seed1 *= seed1; - seed2 *= seed2; - seed3 *= seed3; - seed4 *= seed4; - seed5 *= seed5; - seed6 *= seed6; - seed7 *= seed7; - seed8 *= seed8; - seed9 *= seed9; - seed10 *= seed10; - seed11 *= seed11; - seed12 *= seed12; + seed1 = static_cast<uint8_t>(seed1 * seed1); + seed2 = static_cast<uint8_t>(seed2 * seed2); + seed3 = static_cast<uint8_t>(seed3 * seed3); + seed4 = static_cast<uint8_t>(seed4 * seed4); + seed5 = static_cast<uint8_t>(seed5 * seed5); + seed6 = static_cast<uint8_t>(seed6 * seed6); + seed7 = static_cast<uint8_t>(seed7 * seed7); + seed8 = static_cast<uint8_t>(seed8 * seed8); + seed9 = static_cast<uint8_t>(seed9 * seed9); + seed10 = static_cast<uint8_t>(seed10 * seed10); + seed11 = static_cast<uint8_t>(seed11 * seed11); + seed12 = static_cast<uint8_t>(seed12 * seed12); int32_t sh1, sh2, sh3; if (seed & 1) { @@ -1214,18 +1215,18 @@ static uint32_t SelectPartition(int32_t seed, int32_t x, int32_t y, int32_t z, } sh3 = (seed & 0x10) ? sh1 : sh2; - seed1 >>= sh1; - seed2 >>= sh2; - seed3 >>= sh1; - seed4 >>= sh2; - seed5 >>= sh1; - seed6 >>= sh2; - seed7 >>= sh1; - seed8 >>= sh2; - seed9 >>= sh3; - seed10 >>= sh3; - seed11 >>= sh3; - seed12 >>= sh3; + seed1 = static_cast<uint8_t>(seed1 >> sh1); + seed2 = static_cast<uint8_t>(seed2 >> sh2); + seed3 = static_cast<uint8_t>(seed3 >> sh1); + seed4 = static_cast<uint8_t>(seed4 >> sh2); + seed5 = static_cast<uint8_t>(seed5 >> sh1); + seed6 = static_cast<uint8_t>(seed6 >> sh2); + seed7 = static_cast<uint8_t>(seed7 >> sh1); + seed8 = static_cast<uint8_t>(seed8 >> sh2); + seed9 = static_cast<uint8_t>(seed9 >> sh3); + seed10 = static_cast<uint8_t>(seed10 >> sh3); + seed11 = static_cast<uint8_t>(seed11 >> sh3); + seed12 = static_cast<uint8_t>(seed12 >> sh3); int32_t a = seed1 * x + seed2 * y + seed11 * z + (rnum >> 14); int32_t b = seed3 * x + seed4 * y + seed12 * z + (rnum >> 10); @@ -1558,7 +1559,9 @@ static void DecompressBlock(const uint8_t inBuf[16], const uint32_t blockWidth, // Make sure that higher non-texel bits are set to zero const uint32_t clearByteStart = (weightParams.GetPackedBitSize() >> 3) + 1; - texelWeightData[clearByteStart - 1] &= (1 << (weightParams.GetPackedBitSize() % 8)) - 1; + texelWeightData[clearByteStart - 1] = + texelWeightData[clearByteStart - 1] & + static_cast<uint8_t>((1 << (weightParams.GetPackedBitSize() % 8)) - 1); memset(texelWeightData + clearByteStart, 0, 16 - clearByteStart); std::vector<IntegerEncodedValue> texelWeightValues; diff --git a/src/video_core/textures/decoders.h b/src/video_core/textures/decoders.h index f1e3952bc..e5eac3f3b 100644 --- a/src/video_core/textures/decoders.h +++ b/src/video_core/textures/decoders.h @@ -12,6 +12,10 @@ namespace Tegra::Texture { // GOBSize constant. Calculated by 64 bytes in x multiplied by 8 y coords, represents // an small rect of (64/bytes_per_pixel)X8. +inline std::size_t GetGOBSize() { + return 512; +} + inline std::size_t GetGOBSizeShift() { return 9; } diff --git a/src/video_core/textures/texture.h b/src/video_core/textures/texture.h index 27c8ce975..8e82c6748 100644 --- a/src/video_core/textures/texture.h +++ b/src/video_core/textures/texture.h @@ -342,13 +342,14 @@ struct TSCEntry { float GetLodBias() const { // Sign extend the 13-bit value. constexpr u32 mask = 1U << (13 - 1); - return static_cast<s32>((mip_lod_bias ^ mask) - mask) / 256.0f; + return static_cast<float>(static_cast<s32>((mip_lod_bias ^ mask) - mask)) / 256.0f; } std::array<float, 4> GetBorderColor() const { if (srgb_conversion) { - return {srgb_border_color_r / 255.0f, srgb_border_color_g / 255.0f, - srgb_border_color_b / 255.0f, border_color[3]}; + return {static_cast<float>(srgb_border_color_r) / 255.0f, + static_cast<float>(srgb_border_color_g) / 255.0f, + static_cast<float>(srgb_border_color_b) / 255.0f, border_color[3]}; } return border_color; } diff --git a/src/video_core/video_core.cpp b/src/video_core/video_core.cpp index 60cda0ca3..8e947394c 100644 --- a/src/video_core/video_core.cpp +++ b/src/video_core/video_core.cpp @@ -28,7 +28,7 @@ std::unique_ptr<Tegra::GPU> CreateGPU(Core::System& system) { u16 GetResolutionScaleFactor(const RendererBase& renderer) { return static_cast<u16>( - Settings::values.resolution_factor + Settings::values.resolution_factor != 0 ? Settings::values.resolution_factor : renderer.GetRenderWindow().GetFramebufferLayout().GetScalingRatio()); } diff --git a/src/web_service/web_backend.cpp b/src/web_service/web_backend.cpp index dc149d2ed..6683f459f 100644 --- a/src/web_service/web_backend.cpp +++ b/src/web_service/web_backend.cpp @@ -2,10 +2,12 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include <array> #include <cstdlib> #include <mutex> #include <string> #include <LUrlParser.h> +#include <fmt/format.h> #include <httplib.h> #include "common/common_types.h" #include "common/logging/log.h" @@ -16,10 +18,10 @@ namespace WebService { constexpr std::array<const char, 1> API_VERSION{'1'}; -constexpr u32 HTTP_PORT = 80; -constexpr u32 HTTPS_PORT = 443; +constexpr int HTTP_PORT = 80; +constexpr int HTTPS_PORT = 443; -constexpr u32 TIMEOUT_SECONDS = 30; +constexpr std::size_t TIMEOUT_SECONDS = 30; struct Client::Impl { Impl(std::string host, std::string username, std::string token) @@ -31,8 +33,9 @@ struct Client::Impl { } /// A generic function handles POST, GET and DELETE request together - Common::WebResult GenericJson(const std::string& method, const std::string& path, - const std::string& data, bool allow_anonymous) { + Common::WebResult GenericRequest(const std::string& method, const std::string& path, + const std::string& data, bool allow_anonymous, + const std::string& accept) { if (jwt.empty()) { UpdateJWT(); } @@ -43,11 +46,11 @@ struct Client::Impl { "Credentials needed"}; } - auto result = GenericJson(method, path, data, jwt); + auto result = GenericRequest(method, path, data, accept, jwt); if (result.result_string == "401") { // Try again with new JWT UpdateJWT(); - result = GenericJson(method, path, data, jwt); + result = GenericRequest(method, path, data, accept, jwt); } return result; @@ -56,12 +59,13 @@ struct Client::Impl { /** * A generic function with explicit authentication method specified * JWT is used if the jwt parameter is not empty - * username + token is used if jwt is empty but username and token are not empty - * anonymous if all of jwt, username and token are empty + * username + token is used if jwt is empty but username and token are + * not empty anonymous if all of jwt, username and token are empty */ - Common::WebResult GenericJson(const std::string& method, const std::string& path, - const std::string& data, const std::string& jwt = "", - const std::string& username = "", const std::string& token = "") { + Common::WebResult GenericRequest(const std::string& method, const std::string& path, + const std::string& data, const std::string& accept, + const std::string& jwt = "", const std::string& username = "", + const std::string& token = "") { if (cli == nullptr) { auto parsedUrl = LUrlParser::clParseURL::ParseURL(host); int port; @@ -132,8 +136,7 @@ struct Client::Impl { return Common::WebResult{Common::WebResult::Code::WrongContent, ""}; } - if (content_type->second.find("application/json") == std::string::npos && - content_type->second.find("text/html; charset=utf-8") == std::string::npos) { + if (content_type->second.find(accept) == std::string::npos) { LOG_ERROR(WebService, "{} to {} returned wrong content: {}", method, host + path, content_type->second); return Common::WebResult{Common::WebResult::Code::WrongContent, "Wrong content"}; @@ -147,7 +150,7 @@ struct Client::Impl { return; } - auto result = GenericJson("POST", "/jwt/internal", "", "", username, token); + auto result = GenericRequest("POST", "/jwt/internal", "", "text/html", "", username, token); if (result.result_code != Common::WebResult::Code::Success) { LOG_ERROR(WebService, "UpdateJWT failed"); } else { @@ -180,16 +183,29 @@ Client::~Client() = default; Common::WebResult Client::PostJson(const std::string& path, const std::string& data, bool allow_anonymous) { - return impl->GenericJson("POST", path, data, allow_anonymous); + return impl->GenericRequest("POST", path, data, allow_anonymous, "application/json"); } Common::WebResult Client::GetJson(const std::string& path, bool allow_anonymous) { - return impl->GenericJson("GET", path, "", allow_anonymous); + return impl->GenericRequest("GET", path, "", allow_anonymous, "application/json"); } Common::WebResult Client::DeleteJson(const std::string& path, const std::string& data, bool allow_anonymous) { - return impl->GenericJson("DELETE", path, data, allow_anonymous); + return impl->GenericRequest("DELETE", path, data, allow_anonymous, "application/json"); +} + +Common::WebResult Client::GetPlain(const std::string& path, bool allow_anonymous) { + return impl->GenericRequest("GET", path, "", allow_anonymous, "text/plain"); +} + +Common::WebResult Client::GetImage(const std::string& path, bool allow_anonymous) { + return impl->GenericRequest("GET", path, "", allow_anonymous, "image/png"); +} + +Common::WebResult Client::GetExternalJWT(const std::string& audience) { + return impl->GenericRequest("POST", fmt::format("/jwt/external/{}", audience), "", false, + "text/html"); } } // namespace WebService diff --git a/src/web_service/web_backend.h b/src/web_service/web_backend.h index c637e09df..04121f17e 100644 --- a/src/web_service/web_backend.h +++ b/src/web_service/web_backend.h @@ -46,6 +46,29 @@ public: Common::WebResult DeleteJson(const std::string& path, const std::string& data, bool allow_anonymous); + /** + * Gets a plain string from the specified path. + * @param path the URL segment after the host address. + * @param allow_anonymous If true, allow anonymous unauthenticated requests. + * @return the result of the request. + */ + Common::WebResult GetPlain(const std::string& path, bool allow_anonymous); + + /** + * Gets an PNG image from the specified path. + * @param path the URL segment after the host address. + * @param allow_anonymous If true, allow anonymous unauthenticated requests. + * @return the result of the request. + */ + Common::WebResult GetImage(const std::string& path, bool allow_anonymous); + + /** + * Requests an external JWT for the specific audience provided. + * @param audience the audience of the JWT requested. + * @return the result of the request. + */ + Common::WebResult GetExternalJWT(const std::string& audience); + private: struct Impl; std::unique_ptr<Impl> impl; diff --git a/src/yuzu/CMakeLists.txt b/src/yuzu/CMakeLists.txt index ff1c1d985..11ae1e66e 100644 --- a/src/yuzu/CMakeLists.txt +++ b/src/yuzu/CMakeLists.txt @@ -78,11 +78,6 @@ add_executable(yuzu configuration/configure_web.cpp configuration/configure_web.h configuration/configure_web.ui - debugger/graphics/graphics_breakpoint_observer.cpp - debugger/graphics/graphics_breakpoint_observer.h - debugger/graphics/graphics_breakpoints.cpp - debugger/graphics/graphics_breakpoints.h - debugger/graphics/graphics_breakpoints_p.h debugger/console.cpp debugger/console.h debugger/profiler.cpp diff --git a/src/yuzu/configuration/configure_web.cpp b/src/yuzu/configuration/configure_web.cpp index 336b062b3..8637f5b3c 100644 --- a/src/yuzu/configuration/configure_web.cpp +++ b/src/yuzu/configuration/configure_web.cpp @@ -11,6 +11,31 @@ #include "yuzu/configuration/configure_web.h" #include "yuzu/uisettings.h" +static constexpr char token_delimiter{':'}; + +static std::string GenerateDisplayToken(const std::string& username, const std::string& token) { + if (username.empty() || token.empty()) { + return {}; + } + + const std::string unencoded_display_token{username + token_delimiter + token}; + QByteArray b{unencoded_display_token.c_str()}; + QByteArray b64 = b.toBase64(); + return b64.toStdString(); +} + +static std::string UsernameFromDisplayToken(const std::string& display_token) { + const std::string unencoded_display_token{ + QByteArray::fromBase64(display_token.c_str()).toStdString()}; + return unencoded_display_token.substr(0, unencoded_display_token.find(token_delimiter)); +} + +static std::string TokenFromDisplayToken(const std::string& display_token) { + const std::string unencoded_display_token{ + QByteArray::fromBase64(display_token.c_str()).toStdString()}; + return unencoded_display_token.substr(unencoded_display_token.find(token_delimiter) + 1); +} + ConfigureWeb::ConfigureWeb(QWidget* parent) : QWidget(parent), ui(std::make_unique<Ui::ConfigureWeb>()) { ui->setupUi(this); @@ -63,13 +88,18 @@ void ConfigureWeb::SetConfiguration() { ui->web_signup_link->setOpenExternalLinks(true); ui->web_token_info_link->setOpenExternalLinks(true); + if (Settings::values.yuzu_username.empty()) { + ui->username->setText(tr("Unspecified")); + } else { + ui->username->setText(QString::fromStdString(Settings::values.yuzu_username)); + } + ui->toggle_telemetry->setChecked(Settings::values.enable_telemetry); - ui->edit_username->setText(QString::fromStdString(Settings::values.yuzu_username)); - ui->edit_token->setText(QString::fromStdString(Settings::values.yuzu_token)); + ui->edit_token->setText(QString::fromStdString( + GenerateDisplayToken(Settings::values.yuzu_username, Settings::values.yuzu_token))); // Connect after setting the values, to avoid calling OnLoginChanged now connect(ui->edit_token, &QLineEdit::textChanged, this, &ConfigureWeb::OnLoginChanged); - connect(ui->edit_username, &QLineEdit::textChanged, this, &ConfigureWeb::OnLoginChanged); user_verified = true; @@ -80,12 +110,13 @@ void ConfigureWeb::ApplyConfiguration() { Settings::values.enable_telemetry = ui->toggle_telemetry->isChecked(); UISettings::values.enable_discord_presence = ui->toggle_discordrpc->isChecked(); if (user_verified) { - Settings::values.yuzu_username = ui->edit_username->text().toStdString(); - Settings::values.yuzu_token = ui->edit_token->text().toStdString(); + Settings::values.yuzu_username = + UsernameFromDisplayToken(ui->edit_token->text().toStdString()); + Settings::values.yuzu_token = TokenFromDisplayToken(ui->edit_token->text().toStdString()); } else { - QMessageBox::warning(this, tr("Username and token not verified"), - tr("Username and token were not verified. The changes to your " - "username and/or token have not been saved.")); + QMessageBox::warning( + this, tr("Token not verified"), + tr("Token was not verified. The change to your token has not been saved.")); } } @@ -96,17 +127,15 @@ void ConfigureWeb::RefreshTelemetryID() { } void ConfigureWeb::OnLoginChanged() { - if (ui->edit_username->text().isEmpty() && ui->edit_token->text().isEmpty()) { + if (ui->edit_token->text().isEmpty()) { user_verified = true; const QPixmap pixmap = QIcon::fromTheme(QStringLiteral("checked")).pixmap(16); - ui->label_username_verified->setPixmap(pixmap); ui->label_token_verified->setPixmap(pixmap); } else { user_verified = false; const QPixmap pixmap = QIcon::fromTheme(QStringLiteral("failed")).pixmap(16); - ui->label_username_verified->setPixmap(pixmap); ui->label_token_verified->setPixmap(pixmap); } } @@ -114,10 +143,11 @@ void ConfigureWeb::OnLoginChanged() { void ConfigureWeb::VerifyLogin() { ui->button_verify_login->setDisabled(true); ui->button_verify_login->setText(tr("Verifying...")); - verify_watcher.setFuture(QtConcurrent::run([username = ui->edit_username->text().toStdString(), - token = ui->edit_token->text().toStdString()] { - return Core::VerifyLogin(username, token); - })); + verify_watcher.setFuture(QtConcurrent::run( + [username = UsernameFromDisplayToken(ui->edit_token->text().toStdString()), + token = TokenFromDisplayToken(ui->edit_token->text().toStdString())] { + return Core::VerifyLogin(username, token); + })); } void ConfigureWeb::OnLoginVerified() { @@ -127,16 +157,15 @@ void ConfigureWeb::OnLoginVerified() { user_verified = true; const QPixmap pixmap = QIcon::fromTheme(QStringLiteral("checked")).pixmap(16); - ui->label_username_verified->setPixmap(pixmap); ui->label_token_verified->setPixmap(pixmap); + ui->username->setText( + QString::fromStdString(UsernameFromDisplayToken(ui->edit_token->text().toStdString()))); } else { const QPixmap pixmap = QIcon::fromTheme(QStringLiteral("failed")).pixmap(16); - ui->label_username_verified->setPixmap(pixmap); ui->label_token_verified->setPixmap(pixmap); - - QMessageBox::critical( - this, tr("Verification failed"), - tr("Verification failed. Check that you have entered your username and token " - "correctly, and that your internet connection is working.")); + ui->username->setText(tr("Unspecified")); + QMessageBox::critical(this, tr("Verification failed"), + tr("Verification failed. Check that you have entered your token " + "correctly, and that your internet connection is working.")); } } diff --git a/src/yuzu/configuration/configure_web.ui b/src/yuzu/configuration/configure_web.ui index 2f4b9dd73..8c07d1165 100644 --- a/src/yuzu/configuration/configure_web.ui +++ b/src/yuzu/configuration/configure_web.ui @@ -55,11 +55,7 @@ </widget> </item> <item row="0" column="1" colspan="3"> - <widget class="QLineEdit" name="edit_username"> - <property name="maxLength"> - <number>36</number> - </property> - </widget> + <widget class="QLabel" name="username" /> </item> <item row="1" column="0"> <widget class="QLabel" name="label_token"> @@ -79,14 +75,10 @@ </property> </widget> </item> - <item row="0" column="4"> - <widget class="QLabel" name="label_username_verified"> - </widget> - </item> <item row="1" column="1" colspan="3"> <widget class="QLineEdit" name="edit_token"> <property name="maxLength"> - <number>36</number> + <number>80</number> </property> <property name="echoMode"> <enum>QLineEdit::Password</enum> diff --git a/src/yuzu/debugger/graphics/graphics_breakpoint_observer.cpp b/src/yuzu/debugger/graphics/graphics_breakpoint_observer.cpp deleted file mode 100644 index 5f459ccfb..000000000 --- a/src/yuzu/debugger/graphics/graphics_breakpoint_observer.cpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2014 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#include <QMetaType> -#include "yuzu/debugger/graphics/graphics_breakpoint_observer.h" - -BreakPointObserverDock::BreakPointObserverDock(std::shared_ptr<Tegra::DebugContext> debug_context, - const QString& title, QWidget* parent) - : QDockWidget(title, parent), BreakPointObserver(debug_context) { - qRegisterMetaType<Tegra::DebugContext::Event>("Tegra::DebugContext::Event"); - - connect(this, &BreakPointObserverDock::Resumed, this, &BreakPointObserverDock::OnResumed); - - // NOTE: This signal is emitted from a non-GUI thread, but connect() takes - // care of delaying its handling to the GUI thread. - connect(this, &BreakPointObserverDock::BreakPointHit, this, - &BreakPointObserverDock::OnBreakPointHit, Qt::BlockingQueuedConnection); -} - -void BreakPointObserverDock::OnMaxwellBreakPointHit(Tegra::DebugContext::Event event, void* data) { - emit BreakPointHit(event, data); -} - -void BreakPointObserverDock::OnMaxwellResume() { - emit Resumed(); -} diff --git a/src/yuzu/debugger/graphics/graphics_breakpoint_observer.h b/src/yuzu/debugger/graphics/graphics_breakpoint_observer.h deleted file mode 100644 index ab32f0115..000000000 --- a/src/yuzu/debugger/graphics/graphics_breakpoint_observer.h +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2014 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#pragma once - -#include <QDockWidget> -#include "video_core/debug_utils/debug_utils.h" - -/** - * Utility class which forwards calls to OnMaxwellBreakPointHit and OnMaxwellResume to public slots. - * This is because the Maxwell breakpoint callbacks are called from a non-GUI thread, while - * the widget usually wants to perform reactions in the GUI thread. - */ -class BreakPointObserverDock : public QDockWidget, - protected Tegra::DebugContext::BreakPointObserver { - Q_OBJECT - -public: - BreakPointObserverDock(std::shared_ptr<Tegra::DebugContext> debug_context, const QString& title, - QWidget* parent = nullptr); - - void OnMaxwellBreakPointHit(Tegra::DebugContext::Event event, void* data) override; - void OnMaxwellResume() override; - -signals: - void Resumed(); - void BreakPointHit(Tegra::DebugContext::Event event, void* data); - -private: - virtual void OnBreakPointHit(Tegra::DebugContext::Event event, void* data) = 0; - virtual void OnResumed() = 0; -}; diff --git a/src/yuzu/debugger/graphics/graphics_breakpoints.cpp b/src/yuzu/debugger/graphics/graphics_breakpoints.cpp deleted file mode 100644 index 1c80082a4..000000000 --- a/src/yuzu/debugger/graphics/graphics_breakpoints.cpp +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright 2014 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#include <QLabel> -#include <QMetaType> -#include <QPushButton> -#include <QTreeView> -#include <QVBoxLayout> -#include "common/assert.h" -#include "yuzu/debugger/graphics/graphics_breakpoints.h" -#include "yuzu/debugger/graphics/graphics_breakpoints_p.h" - -BreakPointModel::BreakPointModel(std::shared_ptr<Tegra::DebugContext> debug_context, - QObject* parent) - : QAbstractListModel(parent), context_weak(debug_context), - at_breakpoint(debug_context->at_breakpoint), - active_breakpoint(debug_context->active_breakpoint) {} - -int BreakPointModel::columnCount(const QModelIndex& parent) const { - return 1; -} - -int BreakPointModel::rowCount(const QModelIndex& parent) const { - return static_cast<int>(Tegra::DebugContext::Event::NumEvents); -} - -QVariant BreakPointModel::data(const QModelIndex& index, int role) const { - const auto event = static_cast<Tegra::DebugContext::Event>(index.row()); - - switch (role) { - case Qt::DisplayRole: { - if (index.column() == 0) { - return DebugContextEventToString(event); - } - break; - } - - case Qt::CheckStateRole: { - if (index.column() == 0) - return data(index, Role_IsEnabled).toBool() ? Qt::Checked : Qt::Unchecked; - break; - } - - case Qt::BackgroundRole: { - if (at_breakpoint && index.row() == static_cast<int>(active_breakpoint)) { - return QBrush(QColor(0xE0, 0xE0, 0x10)); - } - break; - } - - case Role_IsEnabled: { - auto context = context_weak.lock(); - return context && context->breakpoints[(int)event].enabled; - } - - default: - break; - } - return QVariant(); -} - -Qt::ItemFlags BreakPointModel::flags(const QModelIndex& index) const { - if (!index.isValid()) - return 0; - - Qt::ItemFlags flags = Qt::ItemIsEnabled; - if (index.column() == 0) - flags |= Qt::ItemIsUserCheckable; - return flags; -} - -bool BreakPointModel::setData(const QModelIndex& index, const QVariant& value, int role) { - const auto event = static_cast<Tegra::DebugContext::Event>(index.row()); - - switch (role) { - case Qt::CheckStateRole: { - if (index.column() != 0) - return false; - - auto context = context_weak.lock(); - if (!context) - return false; - - context->breakpoints[(int)event].enabled = value == Qt::Checked; - QModelIndex changed_index = createIndex(index.row(), 0); - emit dataChanged(changed_index, changed_index); - return true; - } - } - - return false; -} - -void BreakPointModel::OnBreakPointHit(Tegra::DebugContext::Event event) { - auto context = context_weak.lock(); - if (!context) - return; - - active_breakpoint = context->active_breakpoint; - at_breakpoint = context->at_breakpoint; - emit dataChanged(createIndex(static_cast<int>(event), 0), - createIndex(static_cast<int>(event), 0)); -} - -void BreakPointModel::OnResumed() { - auto context = context_weak.lock(); - if (!context) - return; - - at_breakpoint = context->at_breakpoint; - emit dataChanged(createIndex(static_cast<int>(active_breakpoint), 0), - createIndex(static_cast<int>(active_breakpoint), 0)); - active_breakpoint = context->active_breakpoint; -} - -QString BreakPointModel::DebugContextEventToString(Tegra::DebugContext::Event event) { - switch (event) { - case Tegra::DebugContext::Event::MaxwellCommandLoaded: - return tr("Maxwell command loaded"); - case Tegra::DebugContext::Event::MaxwellCommandProcessed: - return tr("Maxwell command processed"); - case Tegra::DebugContext::Event::IncomingPrimitiveBatch: - return tr("Incoming primitive batch"); - case Tegra::DebugContext::Event::FinishedPrimitiveBatch: - return tr("Finished primitive batch"); - case Tegra::DebugContext::Event::NumEvents: - break; - } - - return tr("Unknown debug context event"); -} - -GraphicsBreakPointsWidget::GraphicsBreakPointsWidget( - std::shared_ptr<Tegra::DebugContext> debug_context, QWidget* parent) - : QDockWidget(tr("Maxwell Breakpoints"), parent), Tegra::DebugContext::BreakPointObserver( - debug_context) { - setObjectName(QStringLiteral("TegraBreakPointsWidget")); - - status_text = new QLabel(tr("Emulation running")); - resume_button = new QPushButton(tr("Resume")); - resume_button->setEnabled(false); - - breakpoint_model = new BreakPointModel(debug_context, this); - breakpoint_list = new QTreeView; - breakpoint_list->setRootIsDecorated(false); - breakpoint_list->setHeaderHidden(true); - breakpoint_list->setModel(breakpoint_model); - - qRegisterMetaType<Tegra::DebugContext::Event>("Tegra::DebugContext::Event"); - - connect(breakpoint_list, &QTreeView::doubleClicked, this, - &GraphicsBreakPointsWidget::OnItemDoubleClicked); - - connect(resume_button, &QPushButton::clicked, this, - &GraphicsBreakPointsWidget::OnResumeRequested); - - connect(this, &GraphicsBreakPointsWidget::BreakPointHit, this, - &GraphicsBreakPointsWidget::OnBreakPointHit, Qt::BlockingQueuedConnection); - connect(this, &GraphicsBreakPointsWidget::Resumed, this, &GraphicsBreakPointsWidget::OnResumed); - - connect(this, &GraphicsBreakPointsWidget::BreakPointHit, breakpoint_model, - &BreakPointModel::OnBreakPointHit, Qt::BlockingQueuedConnection); - connect(this, &GraphicsBreakPointsWidget::Resumed, breakpoint_model, - &BreakPointModel::OnResumed); - - connect(this, &GraphicsBreakPointsWidget::BreakPointsChanged, - [this](const QModelIndex& top_left, const QModelIndex& bottom_right) { - breakpoint_model->dataChanged(top_left, bottom_right); - }); - - QWidget* main_widget = new QWidget; - auto main_layout = new QVBoxLayout; - { - auto sub_layout = new QHBoxLayout; - sub_layout->addWidget(status_text); - sub_layout->addWidget(resume_button); - main_layout->addLayout(sub_layout); - } - main_layout->addWidget(breakpoint_list); - main_widget->setLayout(main_layout); - - setWidget(main_widget); -} - -void GraphicsBreakPointsWidget::OnMaxwellBreakPointHit(Event event, void* data) { - // Process in GUI thread - emit BreakPointHit(event, data); -} - -void GraphicsBreakPointsWidget::OnBreakPointHit(Tegra::DebugContext::Event event, void* data) { - status_text->setText(tr("Emulation halted at breakpoint")); - resume_button->setEnabled(true); -} - -void GraphicsBreakPointsWidget::OnMaxwellResume() { - // Process in GUI thread - emit Resumed(); -} - -void GraphicsBreakPointsWidget::OnResumed() { - status_text->setText(tr("Emulation running")); - resume_button->setEnabled(false); -} - -void GraphicsBreakPointsWidget::OnResumeRequested() { - if (auto context = context_weak.lock()) - context->Resume(); -} - -void GraphicsBreakPointsWidget::OnItemDoubleClicked(const QModelIndex& index) { - if (!index.isValid()) - return; - - QModelIndex check_index = breakpoint_list->model()->index(index.row(), 0); - QVariant enabled = breakpoint_list->model()->data(check_index, Qt::CheckStateRole); - QVariant new_state = Qt::Unchecked; - if (enabled == Qt::Unchecked) - new_state = Qt::Checked; - breakpoint_list->model()->setData(check_index, new_state, Qt::CheckStateRole); -} diff --git a/src/yuzu/debugger/graphics/graphics_breakpoints.h b/src/yuzu/debugger/graphics/graphics_breakpoints.h deleted file mode 100644 index a920a2ae5..000000000 --- a/src/yuzu/debugger/graphics/graphics_breakpoints.h +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2014 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#pragma once - -#include <memory> -#include <QDockWidget> -#include "video_core/debug_utils/debug_utils.h" - -class QLabel; -class QPushButton; -class QTreeView; - -class BreakPointModel; - -class GraphicsBreakPointsWidget : public QDockWidget, Tegra::DebugContext::BreakPointObserver { - Q_OBJECT - - using Event = Tegra::DebugContext::Event; - -public: - explicit GraphicsBreakPointsWidget(std::shared_ptr<Tegra::DebugContext> debug_context, - QWidget* parent = nullptr); - - void OnMaxwellBreakPointHit(Tegra::DebugContext::Event event, void* data) override; - void OnMaxwellResume() override; - -signals: - void Resumed(); - void BreakPointHit(Tegra::DebugContext::Event event, void* data); - void BreakPointsChanged(const QModelIndex& topLeft, const QModelIndex& bottomRight); - -private: - void OnBreakPointHit(Tegra::DebugContext::Event event, void* data); - void OnItemDoubleClicked(const QModelIndex&); - void OnResumeRequested(); - void OnResumed(); - - QLabel* status_text; - QPushButton* resume_button; - - BreakPointModel* breakpoint_model; - QTreeView* breakpoint_list; -}; diff --git a/src/yuzu/debugger/graphics/graphics_breakpoints_p.h b/src/yuzu/debugger/graphics/graphics_breakpoints_p.h deleted file mode 100644 index fb488e38f..000000000 --- a/src/yuzu/debugger/graphics/graphics_breakpoints_p.h +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2014 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#pragma once - -#include <memory> -#include <QAbstractListModel> -#include "video_core/debug_utils/debug_utils.h" - -class BreakPointModel : public QAbstractListModel { - Q_OBJECT - -public: - enum { - Role_IsEnabled = Qt::UserRole, - }; - - BreakPointModel(std::shared_ptr<Tegra::DebugContext> context, QObject* parent); - - int columnCount(const QModelIndex& parent = QModelIndex()) const override; - int rowCount(const QModelIndex& parent = QModelIndex()) const override; - QVariant data(const QModelIndex& index, int role = Qt::DisplayRole) const override; - Qt::ItemFlags flags(const QModelIndex& index) const override; - - bool setData(const QModelIndex& index, const QVariant& value, int role = Qt::EditRole) override; - - void OnBreakPointHit(Tegra::DebugContext::Event event); - void OnResumed(); - -private: - static QString DebugContextEventToString(Tegra::DebugContext::Event event); - - std::weak_ptr<Tegra::DebugContext> context_weak; - bool at_breakpoint; - Tegra::DebugContext::Event active_breakpoint; -}; diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp index 188f798c0..727bd8a94 100644 --- a/src/yuzu/debugger/wait_tree.cpp +++ b/src/yuzu/debugger/wait_tree.cpp @@ -57,7 +57,7 @@ std::size_t WaitTreeItem::Row() const { std::vector<std::unique_ptr<WaitTreeThread>> WaitTreeItem::MakeThreadItemList() { std::vector<std::unique_ptr<WaitTreeThread>> item_list; std::size_t row = 0; - auto add_threads = [&](const std::vector<Kernel::SharedPtr<Kernel::Thread>>& threads) { + auto add_threads = [&](const std::vector<std::shared_ptr<Kernel::Thread>>& threads) { for (std::size_t i = 0; i < threads.size(); ++i) { item_list.push_back(std::make_unique<WaitTreeThread>(*threads[i])); item_list.back()->row = row; @@ -80,7 +80,7 @@ QString WaitTreeText::GetText() const { WaitTreeMutexInfo::WaitTreeMutexInfo(VAddr mutex_address, const Kernel::HandleTable& handle_table) : mutex_address(mutex_address) { - mutex_value = Memory::Read32(mutex_address); + mutex_value = Core::System::GetInstance().Memory().Read32(mutex_address); owner_handle = static_cast<Kernel::Handle>(mutex_value & Kernel::Mutex::MutexOwnerMask); owner = handle_table.Get<Kernel::Thread>(owner_handle); } @@ -115,10 +115,11 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeCallstack::GetChildren() cons std::vector<std::unique_ptr<WaitTreeItem>> list; constexpr std::size_t BaseRegister = 29; + auto& memory = Core::System::GetInstance().Memory(); u64 base_pointer = thread.GetContext().cpu_registers[BaseRegister]; while (base_pointer != 0) { - const u64 lr = Memory::Read64(base_pointer + sizeof(u64)); + const u64 lr = memory.Read64(base_pointer + sizeof(u64)); if (lr == 0) { break; } @@ -126,7 +127,7 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeCallstack::GetChildren() cons list.push_back(std::make_unique<WaitTreeText>( tr("0x%1").arg(lr - sizeof(u32), 16, 16, QLatin1Char{'0'}))); - base_pointer = Memory::Read64(base_pointer); + base_pointer = memory.Read64(base_pointer); } return list; @@ -172,8 +173,8 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeWaitObject::GetChildren() con return list; } -WaitTreeObjectList::WaitTreeObjectList( - const std::vector<Kernel::SharedPtr<Kernel::WaitObject>>& list, bool w_all) +WaitTreeObjectList::WaitTreeObjectList(const std::vector<std::shared_ptr<Kernel::WaitObject>>& list, + bool w_all) : object_list(list), wait_all(w_all) {} WaitTreeObjectList::~WaitTreeObjectList() = default; @@ -325,7 +326,7 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const { WaitTreeEvent::WaitTreeEvent(const Kernel::ReadableEvent& object) : WaitTreeWaitObject(object) {} WaitTreeEvent::~WaitTreeEvent() = default; -WaitTreeThreadList::WaitTreeThreadList(const std::vector<Kernel::SharedPtr<Kernel::Thread>>& list) +WaitTreeThreadList::WaitTreeThreadList(const std::vector<std::shared_ptr<Kernel::Thread>>& list) : thread_list(list) {} WaitTreeThreadList::~WaitTreeThreadList() = default; diff --git a/src/yuzu/debugger/wait_tree.h b/src/yuzu/debugger/wait_tree.h index f2b13be24..631274a5f 100644 --- a/src/yuzu/debugger/wait_tree.h +++ b/src/yuzu/debugger/wait_tree.h @@ -83,7 +83,7 @@ private: VAddr mutex_address; u32 mutex_value; Kernel::Handle owner_handle; - Kernel::SharedPtr<Kernel::Thread> owner; + std::shared_ptr<Kernel::Thread> owner; }; class WaitTreeCallstack : public WaitTreeExpandableItem { @@ -116,15 +116,14 @@ protected: class WaitTreeObjectList : public WaitTreeExpandableItem { Q_OBJECT public: - WaitTreeObjectList(const std::vector<Kernel::SharedPtr<Kernel::WaitObject>>& list, - bool wait_all); + WaitTreeObjectList(const std::vector<std::shared_ptr<Kernel::WaitObject>>& list, bool wait_all); ~WaitTreeObjectList() override; QString GetText() const override; std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override; private: - const std::vector<Kernel::SharedPtr<Kernel::WaitObject>>& object_list; + const std::vector<std::shared_ptr<Kernel::WaitObject>>& object_list; bool wait_all; }; @@ -149,14 +148,14 @@ public: class WaitTreeThreadList : public WaitTreeExpandableItem { Q_OBJECT public: - explicit WaitTreeThreadList(const std::vector<Kernel::SharedPtr<Kernel::Thread>>& list); + explicit WaitTreeThreadList(const std::vector<std::shared_ptr<Kernel::Thread>>& list); ~WaitTreeThreadList() override; QString GetText() const override; std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override; private: - const std::vector<Kernel::SharedPtr<Kernel::Thread>>& thread_list; + const std::vector<std::shared_ptr<Kernel::Thread>>& thread_list; }; class WaitTreeModel : public QAbstractItemModel { diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp index 160613ee1..b21fbf826 100644 --- a/src/yuzu/main.cpp +++ b/src/yuzu/main.cpp @@ -93,7 +93,6 @@ static FileSys::VirtualFile VfsDirectoryCreateFileWrapper(const FileSys::Virtual #include "core/perf_stats.h" #include "core/settings.h" #include "core/telemetry_session.h" -#include "video_core/debug_utils/debug_utils.h" #include "yuzu/about_dialog.h" #include "yuzu/bootmanager.h" #include "yuzu/compatdb.h" @@ -101,7 +100,6 @@ static FileSys::VirtualFile VfsDirectoryCreateFileWrapper(const FileSys::Virtual #include "yuzu/configuration/config.h" #include "yuzu/configuration/configure_dialog.h" #include "yuzu/debugger/console.h" -#include "yuzu/debugger/graphics/graphics_breakpoints.h" #include "yuzu/debugger/profiler.h" #include "yuzu/debugger/wait_tree.h" #include "yuzu/discord.h" @@ -187,8 +185,6 @@ GMainWindow::GMainWindow() provider(std::make_unique<FileSys::ManualContentProvider>()) { InitializeLogging(); - debug_context = Tegra::DebugContext::Construct(); - setAcceptDrops(true); ui.setupUi(this); statusBar()->hide(); @@ -495,11 +491,6 @@ void GMainWindow::InitializeDebugWidgets() { debug_menu->addAction(microProfileDialog->toggleViewAction()); #endif - graphicsBreakpointsWidget = new GraphicsBreakPointsWidget(debug_context, this); - addDockWidget(Qt::RightDockWidgetArea, graphicsBreakpointsWidget); - graphicsBreakpointsWidget->hide(); - debug_menu->addAction(graphicsBreakpointsWidget->toggleViewAction()); - waitTreeWidget = new WaitTreeWidget(this); addDockWidget(Qt::LeftDockWidgetArea, waitTreeWidget); waitTreeWidget->hide(); @@ -817,6 +808,9 @@ QStringList GMainWindow::GetUnsupportedGLExtensions() { if (!GLAD_GL_ARB_multi_bind) { unsupported_ext.append(QStringLiteral("ARB_multi_bind")); } + if (!GLAD_GL_ARB_clip_control) { + unsupported_ext.append(QStringLiteral("ARB_clip_control")); + } // Extensions required to support some texture formats. if (!GLAD_GL_EXT_texture_compression_s3tc) { @@ -866,8 +860,6 @@ bool GMainWindow::LoadROM(const QString& filename) { Core::System& system{Core::System::GetInstance()}; system.SetFilesystem(vfs); - system.SetGPUDebugContext(debug_context); - system.SetAppletFrontendSet({ nullptr, // Parental Controls std::make_unique<QtErrorDisplay>(*this), // diff --git a/src/yuzu/main.h b/src/yuzu/main.h index 7f46bea2b..a56f9a981 100644 --- a/src/yuzu/main.h +++ b/src/yuzu/main.h @@ -22,7 +22,6 @@ class Config; class EmuThread; class GameList; class GImageInfo; -class GraphicsBreakPointsWidget; class GRenderWindow; class LoadingScreen; class MicroProfileDialog; @@ -42,10 +41,6 @@ class ManualContentProvider; class VfsFilesystem; } // namespace FileSys -namespace Tegra { -class DebugContext; -} - enum class EmulatedDirectoryTarget { NAND, SDMC, @@ -223,8 +218,6 @@ private: Ui::MainWindow ui; - std::shared_ptr<Tegra::DebugContext> debug_context; - GRenderWindow* render_window; GameList* game_list; LoadingScreen* loading_screen; @@ -255,7 +248,6 @@ private: // Debugger panes ProfilerWidget* profilerWidget; MicroProfileDialog* microProfileDialog; - GraphicsBreakPointsWidget* graphicsBreakpointsWidget; WaitTreeWidget* waitTreeWidget; QAction* actions_recent_files[max_recent_files_item]; diff --git a/src/yuzu/main.ui b/src/yuzu/main.ui index a1ce3c0c3..21f422500 100644 --- a/src/yuzu/main.ui +++ b/src/yuzu/main.ui @@ -140,11 +140,6 @@ <string>Load Folder...</string> </property> </action> - <action name="action_Load_Symbol_Map"> - <property name="text"> - <string>Load Symbol Map...</string> - </property> - </action> <action name="action_Exit"> <property name="text"> <string>E&xit</string> @@ -221,14 +216,6 @@ <string>Show Status Bar</string> </property> </action> - <action name="action_Select_Game_List_Root"> - <property name="text"> - <string>Select Game Directory...</string> - </property> - <property name="toolTip"> - <string>Selects a folder to display in the game list</string> - </property> - </action> <action name="action_Select_NAND_Directory"> <property name="text"> <string>Select NAND Directory...</string> diff --git a/src/yuzu_cmd/emu_window/emu_window_sdl2_gl.cpp b/src/yuzu_cmd/emu_window/emu_window_sdl2_gl.cpp index f91b071bf..6fde694a2 100644 --- a/src/yuzu_cmd/emu_window/emu_window_sdl2_gl.cpp +++ b/src/yuzu_cmd/emu_window/emu_window_sdl2_gl.cpp @@ -50,7 +50,7 @@ private: }; bool EmuWindow_SDL2_GL::SupportsRequiredGLExtensions() { - std::vector<std::string> unsupported_ext; + std::vector<std::string_view> unsupported_ext; if (!GLAD_GL_ARB_buffer_storage) unsupported_ext.push_back("ARB_buffer_storage"); @@ -62,6 +62,8 @@ bool EmuWindow_SDL2_GL::SupportsRequiredGLExtensions() { unsupported_ext.push_back("ARB_texture_mirror_clamp_to_edge"); if (!GLAD_GL_ARB_multi_bind) unsupported_ext.push_back("ARB_multi_bind"); + if (!GLAD_GL_ARB_clip_control) + unsupported_ext.push_back("ARB_clip_control"); // Extensions required to support some texture formats. if (!GLAD_GL_EXT_texture_compression_s3tc) @@ -71,8 +73,8 @@ bool EmuWindow_SDL2_GL::SupportsRequiredGLExtensions() { if (!GLAD_GL_ARB_depth_buffer_float) unsupported_ext.push_back("ARB_depth_buffer_float"); - for (const std::string& ext : unsupported_ext) - LOG_CRITICAL(Frontend, "Unsupported GL extension: {}", ext); + for (const auto& extension : unsupported_ext) + LOG_CRITICAL(Frontend, "Unsupported GL extension: {}", extension); return unsupported_ext.empty(); } |