diff options
author | Lioncash <mathew1800@gmail.com> | 2020-07-28 10:32:39 +0200 |
---|---|---|
committer | Lioncash <mathew1800@gmail.com> | 2020-07-28 10:36:53 +0200 |
commit | e3f0c9323039bd1148a8e36a31b468a871971fbb (patch) | |
tree | 67f4d007c3d6c887f9f8439879e9904a890d12e9 | |
parent | Merge pull request #4437 from lioncash/ptr (diff) | |
download | yuzu-e3f0c9323039bd1148a8e36a31b468a871971fbb.tar yuzu-e3f0c9323039bd1148a8e36a31b468a871971fbb.tar.gz yuzu-e3f0c9323039bd1148a8e36a31b468a871971fbb.tar.bz2 yuzu-e3f0c9323039bd1148a8e36a31b468a871971fbb.tar.lz yuzu-e3f0c9323039bd1148a8e36a31b468a871971fbb.tar.xz yuzu-e3f0c9323039bd1148a8e36a31b468a871971fbb.tar.zst yuzu-e3f0c9323039bd1148a8e36a31b468a871971fbb.zip |
-rw-r--r-- | src/common/atomic_ops.cpp | 37 | ||||
-rw-r--r-- | src/common/atomic_ops.h | 10 | ||||
-rw-r--r-- | src/core/memory.cpp | 10 |
3 files changed, 30 insertions, 27 deletions
diff --git a/src/common/atomic_ops.cpp b/src/common/atomic_ops.cpp index 1098e21ff..1612d0e67 100644 --- a/src/common/atomic_ops.cpp +++ b/src/common/atomic_ops.cpp @@ -14,50 +14,55 @@ namespace Common { #if _MSC_VER -bool AtomicCompareAndSwap(u8 volatile* pointer, u8 value, u8 expected) { - u8 result = _InterlockedCompareExchange8((char*)pointer, value, expected); +bool AtomicCompareAndSwap(volatile u8* pointer, u8 value, u8 expected) { + const u8 result = + _InterlockedCompareExchange8(reinterpret_cast<volatile char*>(pointer), value, expected); return result == expected; } -bool AtomicCompareAndSwap(u16 volatile* pointer, u16 value, u16 expected) { - u16 result = _InterlockedCompareExchange16((short*)pointer, value, expected); +bool AtomicCompareAndSwap(volatile u16* pointer, u16 value, u16 expected) { + const u16 result = + _InterlockedCompareExchange16(reinterpret_cast<volatile short*>(pointer), value, expected); return result == expected; } -bool AtomicCompareAndSwap(u32 volatile* pointer, u32 value, u32 expected) { - u32 result = _InterlockedCompareExchange((long*)pointer, value, expected); +bool AtomicCompareAndSwap(volatile u32* pointer, u32 value, u32 expected) { + const u32 result = + _InterlockedCompareExchange(reinterpret_cast<volatile long*>(pointer), value, expected); return result == expected; } -bool AtomicCompareAndSwap(u64 volatile* pointer, u64 value, u64 expected) { - u64 result = _InterlockedCompareExchange64((__int64*)pointer, value, expected); +bool AtomicCompareAndSwap(volatile u64* pointer, u64 value, u64 expected) { + const u64 result = _InterlockedCompareExchange64(reinterpret_cast<volatile __int64*>(pointer), + value, expected); return result == expected; } -bool AtomicCompareAndSwap(u64 volatile* pointer, u128 value, u128 expected) { - return _InterlockedCompareExchange128((__int64*)pointer, value[1], value[0], - (__int64*)expected.data()) != 0; +bool AtomicCompareAndSwap(volatile u64* pointer, u128 value, u128 expected) { + return _InterlockedCompareExchange128(reinterpret_cast<volatile __int64*>(pointer), value[1], + value[0], + reinterpret_cast<__int64*>(expected.data())) != 0; } #else -bool AtomicCompareAndSwap(u8 volatile* pointer, u8 value, u8 expected) { +bool AtomicCompareAndSwap(volatile u8* pointer, u8 value, u8 expected) { return __sync_bool_compare_and_swap(pointer, expected, value); } -bool AtomicCompareAndSwap(u16 volatile* pointer, u16 value, u16 expected) { +bool AtomicCompareAndSwap(volatile u16* pointer, u16 value, u16 expected) { return __sync_bool_compare_and_swap(pointer, expected, value); } -bool AtomicCompareAndSwap(u32 volatile* pointer, u32 value, u32 expected) { +bool AtomicCompareAndSwap(volatile u32* pointer, u32 value, u32 expected) { return __sync_bool_compare_and_swap(pointer, expected, value); } -bool AtomicCompareAndSwap(u64 volatile* pointer, u64 value, u64 expected) { +bool AtomicCompareAndSwap(volatile u64* pointer, u64 value, u64 expected) { return __sync_bool_compare_and_swap(pointer, expected, value); } -bool AtomicCompareAndSwap(u64 volatile* pointer, u128 value, u128 expected) { +bool AtomicCompareAndSwap(volatile u64* pointer, u128 value, u128 expected) { unsigned __int128 value_a; unsigned __int128 expected_a; std::memcpy(&value_a, value.data(), sizeof(u128)); diff --git a/src/common/atomic_ops.h b/src/common/atomic_ops.h index e6181d521..8d6b73c00 100644 --- a/src/common/atomic_ops.h +++ b/src/common/atomic_ops.h @@ -8,10 +8,10 @@ namespace Common { -bool AtomicCompareAndSwap(u8 volatile* pointer, u8 value, u8 expected); -bool AtomicCompareAndSwap(u16 volatile* pointer, u16 value, u16 expected); -bool AtomicCompareAndSwap(u32 volatile* pointer, u32 value, u32 expected); -bool AtomicCompareAndSwap(u64 volatile* pointer, u64 value, u64 expected); -bool AtomicCompareAndSwap(u64 volatile* pointer, u128 value, u128 expected); +bool AtomicCompareAndSwap(volatile u8* pointer, u8 value, u8 expected); +bool AtomicCompareAndSwap(volatile u16* pointer, u16 value, u16 expected); +bool AtomicCompareAndSwap(volatile u32* pointer, u32 value, u32 expected); +bool AtomicCompareAndSwap(volatile u64* pointer, u64 value, u64 expected); +bool AtomicCompareAndSwap(volatile u64* pointer, u128 value, u128 expected); } // namespace Common diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 2c5588933..86d17c6cb 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -704,7 +704,7 @@ struct Memory::Impl { u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; if (page_pointer != nullptr) { // NOTE: Avoid adding any extra logic to this fast-path block - T volatile* pointer = reinterpret_cast<T volatile*>(&page_pointer[vaddr]); + auto* pointer = reinterpret_cast<volatile T*>(&page_pointer[vaddr]); return Common::AtomicCompareAndSwap(pointer, data, expected); } @@ -720,9 +720,8 @@ struct Memory::Impl { case Common::PageType::RasterizerCachedMemory: { u8* host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)}; system.GPU().InvalidateRegion(vaddr, sizeof(T)); - T volatile* pointer = reinterpret_cast<T volatile*>(&host_ptr); + auto* pointer = reinterpret_cast<volatile T*>(&host_ptr); return Common::AtomicCompareAndSwap(pointer, data, expected); - break; } default: UNREACHABLE(); @@ -734,7 +733,7 @@ struct Memory::Impl { u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; if (page_pointer != nullptr) { // NOTE: Avoid adding any extra logic to this fast-path block - u64 volatile* pointer = reinterpret_cast<u64 volatile*>(&page_pointer[vaddr]); + auto* pointer = reinterpret_cast<volatile u64*>(&page_pointer[vaddr]); return Common::AtomicCompareAndSwap(pointer, data, expected); } @@ -750,9 +749,8 @@ struct Memory::Impl { case Common::PageType::RasterizerCachedMemory: { u8* host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)}; system.GPU().InvalidateRegion(vaddr, sizeof(u128)); - u64 volatile* pointer = reinterpret_cast<u64 volatile*>(&host_ptr); + auto* pointer = reinterpret_cast<volatile u64*>(&host_ptr); return Common::AtomicCompareAndSwap(pointer, data, expected); - break; } default: UNREACHABLE(); |